aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2006-02-24 04:06:36 -0500
committerAnton Altaparmakov <aia21@cantab.net>2006-02-24 04:06:36 -0500
commitfab8d6ddf6dee2608869005d45fe97f70e4f5bdd (patch)
treefecf566e03a87b2a44c7f3363ddb5c0d4bebdca7 /net
parent64419d93a5906600af5817ad0cae3c6ecf7fb389 (diff)
parentf52ee1410d563cd409b08822492273a5bc235821 (diff)
Merge branch 'master' of /home/src/linux-2.6/
Diffstat (limited to 'net')
-rw-r--r--net/802/p8023.c2
-rw-r--r--net/802/psnap.c2
-rw-r--r--net/Kconfig7
-rw-r--r--net/atm/signaling.c3
-rw-r--r--net/bluetooth/hci_sock.c10
-rw-r--r--net/bluetooth/rfcomm/core.c13
-rw-r--r--net/bridge/br_if.c105
-rw-r--r--net/bridge/br_input.c19
-rw-r--r--net/bridge/br_netfilter.c58
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/br_stp_bpdu.c30
-rw-r--r--net/bridge/br_stp_if.c4
-rw-r--r--net/bridge/br_sysfs_if.c50
-rw-r--r--net/bridge/netfilter/ebt_ulog.c10
-rw-r--r--net/bridge/netfilter/ebtables.c7
-rw-r--r--net/core/datagram.c81
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/core/utils.c4
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c1
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ieee80211/ieee80211_rx.c26
-rw-r--r--net/ieee80211/ieee80211_wx.c12
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/icmp.c7
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ip_output.c16
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/multipath_wrandom.c8
-rw-r--r--net/ipv4/netfilter.c41
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_tftp.c1
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c18
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c22
-rw-r--r--net/ipv4/netfilter/ip_tables.c7
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c26
-rw-r--r--net/ipv4/netfilter/ipt_policy.c11
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c5
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_htcp.c1
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/xfrm4_output.c13
-rw-r--r--net/ipv4/xfrm4_policy.c6
-rw-r--r--net/ipv6/addrconf.c9
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/mcast.c56
-rw-r--r--net/ipv6/netfilter/ip6_tables.c7
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/netfilter/ip6t_policy.c7
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/irnet/irnet_irda.c2
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/netfilter/Kconfig10
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c4
-rw-r--r--net/netfilter/nfnetlink_log.c20
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/netlink/genetlink.c11
-rw-r--r--net/packet/af_packet.c16
-rw-r--r--net/sctp/input.c75
-rw-r--r--net/sctp/inqueue.c4
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c12
-rw-r--r--net/sctp/proc.c32
-rw-r--r--net/sctp/sm_make_chunk.c16
-rw-r--r--net/sctp/sm_sideeffect.c4
-rw-r--r--net/sctp/sm_statefuns.c10
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth.c25
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c40
-rw-r--r--net/sunrpc/auth_unix.c6
-rw-r--r--net/sunrpc/rpc_pipe.c102
-rw-r--r--net/sunrpc/sched.c9
-rw-r--r--net/xfrm/xfrm_policy.c14
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--net/xfrm/xfrm_user.c2
98 files changed, 819 insertions, 477 deletions
diff --git a/net/802/p8023.c b/net/802/p8023.c
index d23e906456eb..53cf05709283 100644
--- a/net/802/p8023.c
+++ b/net/802/p8023.c
@@ -59,3 +59,5 @@ void destroy_8023_client(struct datalink_proto *dl)
59 59
60EXPORT_SYMBOL(destroy_8023_client); 60EXPORT_SYMBOL(destroy_8023_client);
61EXPORT_SYMBOL(make_8023_client); 61EXPORT_SYMBOL(make_8023_client);
62
63MODULE_LICENSE("GPL");
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 4d638944d933..34e42968b477 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -59,8 +59,10 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
59 proto = find_snap_client(skb->h.raw); 59 proto = find_snap_client(skb->h.raw);
60 if (proto) { 60 if (proto) {
61 /* Pass the frame on. */ 61 /* Pass the frame on. */
62 u8 *hdr = skb->data;
62 skb->h.raw += 5; 63 skb->h.raw += 5;
63 skb_pull(skb, 5); 64 skb_pull(skb, 5);
65 skb_postpull_rcsum(skb, hdr, 5);
64 rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); 66 rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
65 } else { 67 } else {
66 skb->sk = NULL; 68 skb->sk = NULL;
diff --git a/net/Kconfig b/net/Kconfig
index bc603d9aea56..5126f58d9c44 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -27,6 +27,13 @@ if NET
27 27
28menu "Networking options" 28menu "Networking options"
29 29
30config NETDEBUG
31 bool "Network packet debugging"
32 help
33 You can say Y here if you want to get additional messages useful in
34 debugging bad packets, but can overwhelm logs under denial of service
35 attacks.
36
30source "net/packet/Kconfig" 37source "net/packet/Kconfig"
31source "net/unix/Kconfig" 38source "net/unix/Kconfig"
32source "net/xfrm/Kconfig" 39source "net/xfrm/Kconfig"
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index e7211a7f382c..93ad59a28ef5 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -56,7 +56,8 @@ static void sigd_put_skb(struct sk_buff *skb)
56 remove_wait_queue(&sigd_sleep,&wait); 56 remove_wait_queue(&sigd_sleep,&wait);
57#else 57#else
58 if (!sigd) { 58 if (!sigd) {
59 printk(KERN_WARNING "atmsvc: no signaling demon\n"); 59 if (net_ratelimit())
60 printk(KERN_WARNING "atmsvc: no signaling demon\n");
60 kfree_skb(skb); 61 kfree_skb(skb);
61 return; 62 return;
62 } 63 }
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index bdb6458c6bd5..97bdec73d17e 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -143,13 +143,15 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
143static int hci_sock_release(struct socket *sock) 143static int hci_sock_release(struct socket *sock)
144{ 144{
145 struct sock *sk = sock->sk; 145 struct sock *sk = sock->sk;
146 struct hci_dev *hdev = hci_pi(sk)->hdev; 146 struct hci_dev *hdev;
147 147
148 BT_DBG("sock %p sk %p", sock, sk); 148 BT_DBG("sock %p sk %p", sock, sk);
149 149
150 if (!sk) 150 if (!sk)
151 return 0; 151 return 0;
152 152
153 hdev = hci_pi(sk)->hdev;
154
153 bt_sock_unlink(&hci_sk_list, sk); 155 bt_sock_unlink(&hci_sk_list, sk);
154 156
155 if (hdev) { 157 if (hdev) {
@@ -311,14 +313,18 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
311{ 313{
312 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 314 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
313 struct sock *sk = sock->sk; 315 struct sock *sk = sock->sk;
316 struct hci_dev *hdev = hci_pi(sk)->hdev;
314 317
315 BT_DBG("sock %p sk %p", sock, sk); 318 BT_DBG("sock %p sk %p", sock, sk);
316 319
320 if (!hdev)
321 return -EBADFD;
322
317 lock_sock(sk); 323 lock_sock(sk);
318 324
319 *addr_len = sizeof(*haddr); 325 *addr_len = sizeof(*haddr);
320 haddr->hci_family = AF_BLUETOOTH; 326 haddr->hci_family = AF_BLUETOOTH;
321 haddr->hci_dev = hci_pi(sk)->hdev->id; 327 haddr->hci_dev = hdev->id;
322 328
323 release_sock(sk); 329 release_sock(sk);
324 return 0; 330 return 0;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 0d89d6434136..5b4253c61f62 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -46,13 +46,15 @@
46#include <net/bluetooth/l2cap.h> 46#include <net/bluetooth/l2cap.h>
47#include <net/bluetooth/rfcomm.h> 47#include <net/bluetooth/rfcomm.h>
48 48
49#define VERSION "1.6"
50
51#ifndef CONFIG_BT_RFCOMM_DEBUG 49#ifndef CONFIG_BT_RFCOMM_DEBUG
52#undef BT_DBG 50#undef BT_DBG
53#define BT_DBG(D...) 51#define BT_DBG(D...)
54#endif 52#endif
55 53
54#define VERSION "1.7"
55
56static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
57
56static struct task_struct *rfcomm_thread; 58static struct task_struct *rfcomm_thread;
57 59
58static DECLARE_MUTEX(rfcomm_sem); 60static DECLARE_MUTEX(rfcomm_sem);
@@ -623,7 +625,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
623 /* Set L2CAP options */ 625 /* Set L2CAP options */
624 sk = sock->sk; 626 sk = sock->sk;
625 lock_sock(sk); 627 lock_sock(sk);
626 l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU; 628 l2cap_pi(sk)->imtu = l2cap_mtu;
627 release_sock(sk); 629 release_sock(sk);
628 630
629 s = rfcomm_session_add(sock, BT_BOUND); 631 s = rfcomm_session_add(sock, BT_BOUND);
@@ -1868,7 +1870,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1868 /* Set L2CAP options */ 1870 /* Set L2CAP options */
1869 sk = sock->sk; 1871 sk = sock->sk;
1870 lock_sock(sk); 1872 lock_sock(sk);
1871 l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU; 1873 l2cap_pi(sk)->imtu = l2cap_mtu;
1872 release_sock(sk); 1874 release_sock(sk);
1873 1875
1874 /* Start listening on the socket */ 1876 /* Start listening on the socket */
@@ -2070,6 +2072,9 @@ static void __exit rfcomm_exit(void)
2070module_init(rfcomm_init); 2072module_init(rfcomm_init);
2071module_exit(rfcomm_exit); 2073module_exit(rfcomm_exit);
2072 2074
2075module_param(l2cap_mtu, uint, 0644);
2076MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
2077
2073MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); 2078MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2074MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); 2079MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION);
2075MODULE_VERSION(VERSION); 2080MODULE_VERSION(VERSION);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ba442883e877..7fa3a5a9971f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -79,9 +79,14 @@ static int port_cost(struct net_device *dev)
79 */ 79 */
80static void port_carrier_check(void *arg) 80static void port_carrier_check(void *arg)
81{ 81{
82 struct net_bridge_port *p = arg; 82 struct net_device *dev = arg;
83 struct net_bridge_port *p;
83 84
84 rtnl_lock(); 85 rtnl_lock();
86 p = dev->br_port;
87 if (!p)
88 goto done;
89
85 if (netif_carrier_ok(p->dev)) { 90 if (netif_carrier_ok(p->dev)) {
86 u32 cost = port_cost(p->dev); 91 u32 cost = port_cost(p->dev);
87 92
@@ -97,9 +102,24 @@ static void port_carrier_check(void *arg)
97 br_stp_disable_port(p); 102 br_stp_disable_port(p);
98 spin_unlock_bh(&p->br->lock); 103 spin_unlock_bh(&p->br->lock);
99 } 104 }
105done:
100 rtnl_unlock(); 106 rtnl_unlock();
101} 107}
102 108
109static void release_nbp(struct kobject *kobj)
110{
111 struct net_bridge_port *p
112 = container_of(kobj, struct net_bridge_port, kobj);
113 kfree(p);
114}
115
116static struct kobj_type brport_ktype = {
117#ifdef CONFIG_SYSFS
118 .sysfs_ops = &brport_sysfs_ops,
119#endif
120 .release = release_nbp,
121};
122
103static void destroy_nbp(struct net_bridge_port *p) 123static void destroy_nbp(struct net_bridge_port *p)
104{ 124{
105 struct net_device *dev = p->dev; 125 struct net_device *dev = p->dev;
@@ -108,7 +128,7 @@ static void destroy_nbp(struct net_bridge_port *p)
108 p->dev = NULL; 128 p->dev = NULL;
109 dev_put(dev); 129 dev_put(dev);
110 130
111 br_sysfs_freeif(p); 131 kobject_put(&p->kobj);
112} 132}
113 133
114static void destroy_nbp_rcu(struct rcu_head *head) 134static void destroy_nbp_rcu(struct rcu_head *head)
@@ -118,17 +138,25 @@ static void destroy_nbp_rcu(struct rcu_head *head)
118 destroy_nbp(p); 138 destroy_nbp(p);
119} 139}
120 140
121/* called with RTNL */ 141/* Delete port(interface) from bridge is done in two steps.
142 * via RCU. First step, marks device as down. That deletes
143 * all the timers and stops new packets from flowing through.
144 *
145 * Final cleanup doesn't occur until after all CPU's finished
146 * processing packets.
147 *
148 * Protected from multiple admin operations by RTNL mutex
149 */
122static void del_nbp(struct net_bridge_port *p) 150static void del_nbp(struct net_bridge_port *p)
123{ 151{
124 struct net_bridge *br = p->br; 152 struct net_bridge *br = p->br;
125 struct net_device *dev = p->dev; 153 struct net_device *dev = p->dev;
126 154
127 dev->br_port = NULL; 155 sysfs_remove_link(&br->ifobj, dev->name);
156
128 dev_set_promiscuity(dev, -1); 157 dev_set_promiscuity(dev, -1);
129 158
130 cancel_delayed_work(&p->carrier_check); 159 cancel_delayed_work(&p->carrier_check);
131 flush_scheduled_work();
132 160
133 spin_lock_bh(&br->lock); 161 spin_lock_bh(&br->lock);
134 br_stp_disable_port(p); 162 br_stp_disable_port(p);
@@ -138,10 +166,10 @@ static void del_nbp(struct net_bridge_port *p)
138 166
139 list_del_rcu(&p->list); 167 list_del_rcu(&p->list);
140 168
141 del_timer_sync(&p->message_age_timer); 169 rcu_assign_pointer(dev->br_port, NULL);
142 del_timer_sync(&p->forward_delay_timer); 170
143 del_timer_sync(&p->hold_timer); 171 kobject_del(&p->kobj);
144 172
145 call_rcu(&p->rcu, destroy_nbp_rcu); 173 call_rcu(&p->rcu, destroy_nbp_rcu);
146} 174}
147 175
@@ -151,7 +179,6 @@ static void del_br(struct net_bridge *br)
151 struct net_bridge_port *p, *n; 179 struct net_bridge_port *p, *n;
152 180
153 list_for_each_entry_safe(p, n, &br->port_list, list) { 181 list_for_each_entry_safe(p, n, &br->port_list, list) {
154 br_sysfs_removeif(p);
155 del_nbp(p); 182 del_nbp(p);
156 } 183 }
157 184
@@ -245,13 +272,17 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
245 p->dev = dev; 272 p->dev = dev;
246 p->path_cost = port_cost(dev); 273 p->path_cost = port_cost(dev);
247 p->priority = 0x8000 >> BR_PORT_BITS; 274 p->priority = 0x8000 >> BR_PORT_BITS;
248 dev->br_port = p;
249 p->port_no = index; 275 p->port_no = index;
250 br_init_port(p); 276 br_init_port(p);
251 p->state = BR_STATE_DISABLED; 277 p->state = BR_STATE_DISABLED;
252 INIT_WORK(&p->carrier_check, port_carrier_check, p); 278 INIT_WORK(&p->carrier_check, port_carrier_check, dev);
253 kobject_init(&p->kobj); 279 kobject_init(&p->kobj);
254 280
281 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
282 p->kobj.ktype = &brport_ktype;
283 p->kobj.parent = &(dev->class_dev.kobj);
284 p->kobj.kset = NULL;
285
255 return p; 286 return p;
256} 287}
257 288
@@ -379,30 +410,43 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
379 if (dev->br_port != NULL) 410 if (dev->br_port != NULL)
380 return -EBUSY; 411 return -EBUSY;
381 412
382 if (IS_ERR(p = new_nbp(br, dev))) 413 p = new_nbp(br, dev);
414 if (IS_ERR(p))
383 return PTR_ERR(p); 415 return PTR_ERR(p);
384 416
385 if ((err = br_fdb_insert(br, p, dev->dev_addr))) 417 err = kobject_add(&p->kobj);
386 destroy_nbp(p); 418 if (err)
387 419 goto err0;
388 else if ((err = br_sysfs_addif(p)))
389 del_nbp(p);
390 else {
391 dev_set_promiscuity(dev, 1);
392 420
393 list_add_rcu(&p->list, &br->port_list); 421 err = br_fdb_insert(br, p, dev->dev_addr);
422 if (err)
423 goto err1;
394 424
395 spin_lock_bh(&br->lock); 425 err = br_sysfs_addif(p);
396 br_stp_recalculate_bridge_id(br); 426 if (err)
397 br_features_recompute(br); 427 goto err2;
398 if ((br->dev->flags & IFF_UP)
399 && (dev->flags & IFF_UP) && netif_carrier_ok(dev))
400 br_stp_enable_port(p);
401 spin_unlock_bh(&br->lock);
402 428
403 dev_set_mtu(br->dev, br_min_mtu(br)); 429 rcu_assign_pointer(dev->br_port, p);
404 } 430 dev_set_promiscuity(dev, 1);
431
432 list_add_rcu(&p->list, &br->port_list);
433
434 spin_lock_bh(&br->lock);
435 br_stp_recalculate_bridge_id(br);
436 br_features_recompute(br);
437 schedule_delayed_work(&p->carrier_check, BR_PORT_DEBOUNCE);
438 spin_unlock_bh(&br->lock);
405 439
440 dev_set_mtu(br->dev, br_min_mtu(br));
441 kobject_uevent(&p->kobj, KOBJ_ADD);
442
443 return 0;
444err2:
445 br_fdb_delete_by_port(br, p);
446err1:
447 kobject_del(&p->kobj);
448err0:
449 kobject_put(&p->kobj);
406 return err; 450 return err;
407} 451}
408 452
@@ -414,7 +458,6 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
414 if (!p || p->br != br) 458 if (!p || p->br != br)
415 return -EINVAL; 459 return -EINVAL;
416 460
417 br_sysfs_removeif(p);
418 del_nbp(p); 461 del_nbp(p);
419 462
420 spin_lock_bh(&br->lock); 463 spin_lock_bh(&br->lock);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index e3a73cead6b6..4eef83755315 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -45,18 +45,20 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
45int br_handle_frame_finish(struct sk_buff *skb) 45int br_handle_frame_finish(struct sk_buff *skb)
46{ 46{
47 const unsigned char *dest = eth_hdr(skb)->h_dest; 47 const unsigned char *dest = eth_hdr(skb)->h_dest;
48 struct net_bridge_port *p = skb->dev->br_port; 48 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
49 struct net_bridge *br = p->br; 49 struct net_bridge *br;
50 struct net_bridge_fdb_entry *dst; 50 struct net_bridge_fdb_entry *dst;
51 int passedup = 0; 51 int passedup = 0;
52 52
53 if (!p || p->state == BR_STATE_DISABLED)
54 goto drop;
55
53 /* insert into forwarding database after filtering to avoid spoofing */ 56 /* insert into forwarding database after filtering to avoid spoofing */
54 br_fdb_update(p->br, p, eth_hdr(skb)->h_source); 57 br = p->br;
58 br_fdb_update(br, p, eth_hdr(skb)->h_source);
55 59
56 if (p->state == BR_STATE_LEARNING) { 60 if (p->state == BR_STATE_LEARNING)
57 kfree_skb(skb); 61 goto drop;
58 goto out;
59 }
60 62
61 if (br->dev->flags & IFF_PROMISC) { 63 if (br->dev->flags & IFF_PROMISC) {
62 struct sk_buff *skb2; 64 struct sk_buff *skb2;
@@ -93,6 +95,9 @@ int br_handle_frame_finish(struct sk_buff *skb)
93 95
94out: 96out:
95 return 0; 97 return 0;
98drop:
99 kfree_skb(skb);
100 goto out;
96} 101}
97 102
98/* 103/*
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 7cac3fb9f809..e060aad8624d 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -51,9 +51,6 @@
51#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr) 51#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr)
52#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr) 52#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr)
53 53
54#define has_bridge_parent(device) ((device)->br_port != NULL)
55#define bridge_parent(device) ((device)->br_port->br->dev)
56
57#ifdef CONFIG_SYSCTL 54#ifdef CONFIG_SYSCTL
58static struct ctl_table_header *brnf_sysctl_header; 55static struct ctl_table_header *brnf_sysctl_header;
59static int brnf_call_iptables = 1; 56static int brnf_call_iptables = 1;
@@ -93,11 +90,18 @@ static struct rtable __fake_rtable = {
93 .dev = &__fake_net_device, 90 .dev = &__fake_net_device,
94 .path = &__fake_rtable.u.dst, 91 .path = &__fake_rtable.u.dst,
95 .metrics = {[RTAX_MTU - 1] = 1500}, 92 .metrics = {[RTAX_MTU - 1] = 1500},
93 .flags = DST_NOXFRM,
96 } 94 }
97 }, 95 },
98 .rt_flags = 0, 96 .rt_flags = 0,
99}; 97};
100 98
99static inline struct net_device *bridge_parent(const struct net_device *dev)
100{
101 struct net_bridge_port *port = rcu_dereference(dev->br_port);
102
103 return port ? port->br->dev : NULL;
104}
101 105
102/* PF_BRIDGE/PRE_ROUTING *********************************************/ 106/* PF_BRIDGE/PRE_ROUTING *********************************************/
103/* Undo the changes made for ip6tables PREROUTING and continue the 107/* Undo the changes made for ip6tables PREROUTING and continue the
@@ -189,11 +193,15 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
189 skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; 193 skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
190 194
191 skb->dev = bridge_parent(skb->dev); 195 skb->dev = bridge_parent(skb->dev);
192 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 196 if (!skb->dev)
193 skb_pull(skb, VLAN_HLEN); 197 kfree_skb(skb);
194 skb->nh.raw += VLAN_HLEN; 198 else {
199 if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
200 skb_pull(skb, VLAN_HLEN);
201 skb->nh.raw += VLAN_HLEN;
202 }
203 skb->dst->output(skb);
195 } 204 }
196 skb->dst->output(skb);
197 return 0; 205 return 0;
198} 206}
199 207
@@ -270,7 +278,7 @@ bridged_dnat:
270} 278}
271 279
272/* Some common code for IPv4/IPv6 */ 280/* Some common code for IPv4/IPv6 */
273static void setup_pre_routing(struct sk_buff *skb) 281static struct net_device *setup_pre_routing(struct sk_buff *skb)
274{ 282{
275 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 283 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
276 284
@@ -282,6 +290,8 @@ static void setup_pre_routing(struct sk_buff *skb)
282 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; 290 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
283 nf_bridge->physindev = skb->dev; 291 nf_bridge->physindev = skb->dev;
284 skb->dev = bridge_parent(skb->dev); 292 skb->dev = bridge_parent(skb->dev);
293
294 return skb->dev;
285} 295}
286 296
287/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ 297/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
@@ -376,7 +386,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
376 nf_bridge_put(skb->nf_bridge); 386 nf_bridge_put(skb->nf_bridge);
377 if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) 387 if ((nf_bridge = nf_bridge_alloc(skb)) == NULL)
378 return NF_DROP; 388 return NF_DROP;
379 setup_pre_routing(skb); 389 if (!setup_pre_routing(skb))
390 return NF_DROP;
380 391
381 NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, 392 NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL,
382 br_nf_pre_routing_finish_ipv6); 393 br_nf_pre_routing_finish_ipv6);
@@ -465,7 +476,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
465 nf_bridge_put(skb->nf_bridge); 476 nf_bridge_put(skb->nf_bridge);
466 if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) 477 if ((nf_bridge = nf_bridge_alloc(skb)) == NULL)
467 return NF_DROP; 478 return NF_DROP;
468 setup_pre_routing(skb); 479 if (!setup_pre_routing(skb))
480 return NF_DROP;
469 store_orig_dstaddr(skb); 481 store_orig_dstaddr(skb);
470 482
471 NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, 483 NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
@@ -539,11 +551,16 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
539 struct sk_buff *skb = *pskb; 551 struct sk_buff *skb = *pskb;
540 struct nf_bridge_info *nf_bridge; 552 struct nf_bridge_info *nf_bridge;
541 struct vlan_ethhdr *hdr = vlan_eth_hdr(skb); 553 struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
554 struct net_device *parent;
542 int pf; 555 int pf;
543 556
544 if (!skb->nf_bridge) 557 if (!skb->nf_bridge)
545 return NF_ACCEPT; 558 return NF_ACCEPT;
546 559
560 parent = bridge_parent(out);
561 if (!parent)
562 return NF_DROP;
563
547 if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) 564 if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
548 pf = PF_INET; 565 pf = PF_INET;
549 else 566 else
@@ -564,8 +581,8 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
564 nf_bridge->mask |= BRNF_BRIDGED; 581 nf_bridge->mask |= BRNF_BRIDGED;
565 nf_bridge->physoutdev = skb->dev; 582 nf_bridge->physoutdev = skb->dev;
566 583
567 NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in), 584 NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in), parent,
568 bridge_parent(out), br_nf_forward_finish); 585 br_nf_forward_finish);
569 586
570 return NF_STOLEN; 587 return NF_STOLEN;
571} 588}
@@ -688,6 +705,8 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
688 goto out; 705 goto out;
689 } 706 }
690 realoutdev = bridge_parent(skb->dev); 707 realoutdev = bridge_parent(skb->dev);
708 if (!realoutdev)
709 return NF_DROP;
691 710
692#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 711#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
693 /* iptables should match -o br0.x */ 712 /* iptables should match -o br0.x */
@@ -701,9 +720,11 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
701 /* IP forwarded traffic has a physindev, locally 720 /* IP forwarded traffic has a physindev, locally
702 * generated traffic hasn't. */ 721 * generated traffic hasn't. */
703 if (realindev != NULL) { 722 if (realindev != NULL) {
704 if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) && 723 if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) ) {
705 has_bridge_parent(realindev)) 724 struct net_device *parent = bridge_parent(realindev);
706 realindev = bridge_parent(realindev); 725 if (parent)
726 realindev = parent;
727 }
707 728
708 NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev, 729 NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev,
709 realoutdev, br_nf_local_out_finish, 730 realoutdev, br_nf_local_out_finish,
@@ -743,6 +764,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
743 if (!nf_bridge) 764 if (!nf_bridge)
744 return NF_ACCEPT; 765 return NF_ACCEPT;
745 766
767 if (!realoutdev)
768 return NF_DROP;
769
746 if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) 770 if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
747 pf = PF_INET; 771 pf = PF_INET;
748 else 772 else
@@ -782,8 +806,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
782print_error: 806print_error:
783 if (skb->dev != NULL) { 807 if (skb->dev != NULL) {
784 printk("[%s]", skb->dev->name); 808 printk("[%s]", skb->dev->name);
785 if (has_bridge_parent(skb->dev)) 809 if (realoutdev)
786 printk("[%s]", bridge_parent(skb->dev)->name); 810 printk("[%s]", realoutdev->name);
787 } 811 }
788 printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw, 812 printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw,
789 skb->data); 813 skb->data);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c5bd631ffcd5..8f10e09f251b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -232,9 +232,8 @@ extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
232 232
233#ifdef CONFIG_SYSFS 233#ifdef CONFIG_SYSFS
234/* br_sysfs_if.c */ 234/* br_sysfs_if.c */
235extern struct sysfs_ops brport_sysfs_ops;
235extern int br_sysfs_addif(struct net_bridge_port *p); 236extern int br_sysfs_addif(struct net_bridge_port *p);
236extern void br_sysfs_removeif(struct net_bridge_port *p);
237extern void br_sysfs_freeif(struct net_bridge_port *p);
238 237
239/* br_sysfs_br.c */ 238/* br_sysfs_br.c */
240extern int br_sysfs_addbr(struct net_device *dev); 239extern int br_sysfs_addbr(struct net_device *dev);
@@ -243,8 +242,6 @@ extern void br_sysfs_delbr(struct net_device *dev);
243#else 242#else
244 243
245#define br_sysfs_addif(p) (0) 244#define br_sysfs_addif(p) (0)
246#define br_sysfs_removeif(p) do { } while(0)
247#define br_sysfs_freeif(p) kfree(p)
248#define br_sysfs_addbr(dev) (0) 245#define br_sysfs_addbr(dev) (0)
249#define br_sysfs_delbr(dev) do { } while(0) 246#define br_sysfs_delbr(dev) do { } while(0)
250#endif /* CONFIG_SYSFS */ 247#endif /* CONFIG_SYSFS */
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index d071f1c9ad0b..296f6a487c52 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -133,29 +133,35 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
133 133
134static const unsigned char header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; 134static const unsigned char header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00};
135 135
136/* NO locks */ 136/* NO locks, but rcu_read_lock (preempt_disabled) */
137int br_stp_handle_bpdu(struct sk_buff *skb) 137int br_stp_handle_bpdu(struct sk_buff *skb)
138{ 138{
139 struct net_bridge_port *p = skb->dev->br_port; 139 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
140 struct net_bridge *br = p->br; 140 struct net_bridge *br;
141 unsigned char *buf; 141 unsigned char *buf;
142 142
143 if (!p)
144 goto err;
145
146 br = p->br;
147 spin_lock(&br->lock);
148
149 if (p->state == BR_STATE_DISABLED || !(br->dev->flags & IFF_UP))
150 goto out;
151
143 /* insert into forwarding database after filtering to avoid spoofing */ 152 /* insert into forwarding database after filtering to avoid spoofing */
144 br_fdb_update(p->br, p, eth_hdr(skb)->h_source); 153 br_fdb_update(br, p, eth_hdr(skb)->h_source);
154
155 if (!br->stp_enabled)
156 goto out;
145 157
146 /* need at least the 802 and STP headers */ 158 /* need at least the 802 and STP headers */
147 if (!pskb_may_pull(skb, sizeof(header)+1) || 159 if (!pskb_may_pull(skb, sizeof(header)+1) ||
148 memcmp(skb->data, header, sizeof(header))) 160 memcmp(skb->data, header, sizeof(header)))
149 goto err; 161 goto out;
150 162
151 buf = skb_pull(skb, sizeof(header)); 163 buf = skb_pull(skb, sizeof(header));
152 164
153 spin_lock_bh(&br->lock);
154 if (p->state == BR_STATE_DISABLED
155 || !(br->dev->flags & IFF_UP)
156 || !br->stp_enabled)
157 goto out;
158
159 if (buf[0] == BPDU_TYPE_CONFIG) { 165 if (buf[0] == BPDU_TYPE_CONFIG) {
160 struct br_config_bpdu bpdu; 166 struct br_config_bpdu bpdu;
161 167
@@ -201,7 +207,7 @@ int br_stp_handle_bpdu(struct sk_buff *skb)
201 br_received_tcn_bpdu(p); 207 br_received_tcn_bpdu(p);
202 } 208 }
203 out: 209 out:
204 spin_unlock_bh(&br->lock); 210 spin_unlock(&br->lock);
205 err: 211 err:
206 kfree_skb(skb); 212 kfree_skb(skb);
207 return 0; 213 return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index cc047f7fb6ef..35cf3a074087 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -67,7 +67,7 @@ void br_stp_disable_bridge(struct net_bridge *br)
67{ 67{
68 struct net_bridge_port *p; 68 struct net_bridge_port *p;
69 69
70 spin_lock(&br->lock); 70 spin_lock_bh(&br->lock);
71 list_for_each_entry(p, &br->port_list, list) { 71 list_for_each_entry(p, &br->port_list, list) {
72 if (p->state != BR_STATE_DISABLED) 72 if (p->state != BR_STATE_DISABLED)
73 br_stp_disable_port(p); 73 br_stp_disable_port(p);
@@ -76,7 +76,7 @@ void br_stp_disable_bridge(struct net_bridge *br)
76 76
77 br->topology_change = 0; 77 br->topology_change = 0;
78 br->topology_change_detected = 0; 78 br->topology_change_detected = 0;
79 spin_unlock(&br->lock); 79 spin_unlock_bh(&br->lock);
80 80
81 del_timer_sync(&br->hello_timer); 81 del_timer_sync(&br->hello_timer);
82 del_timer_sync(&br->topology_change_timer); 82 del_timer_sync(&br->topology_change_timer);
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 0ac0355d16dd..c51c9e42aeb3 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -195,23 +195,11 @@ static ssize_t brport_store(struct kobject * kobj,
195 return ret; 195 return ret;
196} 196}
197 197
198/* called from kobject_put when port ref count goes to zero. */ 198struct sysfs_ops brport_sysfs_ops = {
199static void brport_release(struct kobject *kobj)
200{
201 kfree(container_of(kobj, struct net_bridge_port, kobj));
202}
203
204static struct sysfs_ops brport_sysfs_ops = {
205 .show = brport_show, 199 .show = brport_show,
206 .store = brport_store, 200 .store = brport_store,
207}; 201};
208 202
209static struct kobj_type brport_ktype = {
210 .sysfs_ops = &brport_sysfs_ops,
211 .release = brport_release,
212};
213
214
215/* 203/*
216 * Add sysfs entries to ethernet device added to a bridge. 204 * Add sysfs entries to ethernet device added to a bridge.
217 * Creates a brport subdirectory with bridge attributes. 205 * Creates a brport subdirectory with bridge attributes.
@@ -223,17 +211,6 @@ int br_sysfs_addif(struct net_bridge_port *p)
223 struct brport_attribute **a; 211 struct brport_attribute **a;
224 int err; 212 int err;
225 213
226 ASSERT_RTNL();
227
228 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
229 p->kobj.ktype = &brport_ktype;
230 p->kobj.parent = &(p->dev->class_dev.kobj);
231 p->kobj.kset = NULL;
232
233 err = kobject_add(&p->kobj);
234 if(err)
235 goto out1;
236
237 err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj, 214 err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj,
238 SYSFS_BRIDGE_PORT_LINK); 215 SYSFS_BRIDGE_PORT_LINK);
239 if (err) 216 if (err)
@@ -245,28 +222,7 @@ int br_sysfs_addif(struct net_bridge_port *p)
245 goto out2; 222 goto out2;
246 } 223 }
247 224
248 err = sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name); 225 err= sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name);
249 if (err) 226out2:
250 goto out2;
251
252 kobject_uevent(&p->kobj, KOBJ_ADD);
253 return 0;
254 out2:
255 kobject_del(&p->kobj);
256 out1:
257 return err; 227 return err;
258} 228}
259
260void br_sysfs_removeif(struct net_bridge_port *p)
261{
262 pr_debug("br_sysfs_removeif\n");
263 sysfs_remove_link(&p->br->ifobj, p->dev->name);
264 kobject_uevent(&p->kobj, KOBJ_REMOVE);
265 kobject_del(&p->kobj);
266}
267
268void br_sysfs_freeif(struct net_bridge_port *p)
269{
270 pr_debug("br_sysfs_freeif\n");
271 kobject_put(&p->kobj);
272}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce617b3dbbb8..802baf755ef4 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -46,7 +46,7 @@
46#define PRINTR(format, args...) do { if (net_ratelimit()) \ 46#define PRINTR(format, args...) do { if (net_ratelimit()) \
47 printk(format , ## args); } while (0) 47 printk(format , ## args); } while (0)
48 48
49static unsigned int nlbufsiz = 4096; 49static unsigned int nlbufsiz = NLMSG_GOODSIZE;
50module_param(nlbufsiz, uint, 0600); 50module_param(nlbufsiz, uint, 0600);
51MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " 51MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
52 "(defaults to 4096)"); 52 "(defaults to 4096)");
@@ -98,12 +98,14 @@ static void ulog_timer(unsigned long data)
98static struct sk_buff *ulog_alloc_skb(unsigned int size) 98static struct sk_buff *ulog_alloc_skb(unsigned int size)
99{ 99{
100 struct sk_buff *skb; 100 struct sk_buff *skb;
101 unsigned int n;
101 102
102 skb = alloc_skb(nlbufsiz, GFP_ATOMIC); 103 n = max(size, nlbufsiz);
104 skb = alloc_skb(n, GFP_ATOMIC);
103 if (!skb) { 105 if (!skb) {
104 PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " 106 PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
105 "of size %ub!\n", nlbufsiz); 107 "of size %ub!\n", n);
106 if (size < nlbufsiz) { 108 if (n > size) {
107 /* try to allocate only as much as we need for 109 /* try to allocate only as much as we need for
108 * current packet */ 110 * current packet */
109 skb = alloc_skb(size, GFP_ATOMIC); 111 skb = alloc_skb(size, GFP_ATOMIC);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 00729b3604f8..cbd4020cc84d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -934,6 +934,13 @@ static int do_replace(void __user *user, unsigned int len)
934 BUGPRINT("Entries_size never zero\n"); 934 BUGPRINT("Entries_size never zero\n");
935 return -EINVAL; 935 return -EINVAL;
936 } 936 }
937 /* overflow check */
938 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
939 SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
940 return -ENOMEM;
941 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
942 return -ENOMEM;
943
937 countersize = COUNTER_OFFSET(tmp.nentries) * 944 countersize = COUNTER_OFFSET(tmp.nentries) *
938 (highest_possible_processor_id()+1); 945 (highest_possible_processor_id()+1);
939 newinfo = (struct ebt_table_info *) 946 newinfo = (struct ebt_table_info *)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index f8d322e1ea92..b8ce6bf81188 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -247,49 +247,74 @@ EXPORT_SYMBOL(skb_kill_datagram);
247int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, 247int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
248 struct iovec *to, int len) 248 struct iovec *to, int len)
249{ 249{
250 int i, err, fraglen, end = 0; 250 int start = skb_headlen(skb);
251 struct sk_buff *next = skb_shinfo(skb)->frag_list; 251 int i, copy = start - offset;
252 252
253 if (!len) 253 /* Copy header. */
254 return 0; 254 if (copy > 0) {
255 if (copy > len)
256 copy = len;
257 if (memcpy_toiovec(to, skb->data + offset, copy))
258 goto fault;
259 if ((len -= copy) == 0)
260 return 0;
261 offset += copy;
262 }
255 263
256next_skb: 264 /* Copy paged appendix. Hmm... why does this look so complicated? */
257 fraglen = skb_headlen(skb); 265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
258 i = -1; 266 int end;
259 267
260 while (1) { 268 BUG_TRAP(start <= offset + len);
261 int start = end;
262 269
263 if ((end += fraglen) > offset) { 270 end = start + skb_shinfo(skb)->frags[i].size;
264 int copy = end - offset, o = offset - start; 271 if ((copy = end - offset) > 0) {
272 int err;
273 u8 *vaddr;
274 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
275 struct page *page = frag->page;
265 276
266 if (copy > len) 277 if (copy > len)
267 copy = len; 278 copy = len;
268 if (i == -1) 279 vaddr = kmap(page);
269 err = memcpy_toiovec(to, skb->data + o, copy); 280 err = memcpy_toiovec(to, vaddr + frag->page_offset +
270 else { 281 offset - start, copy);
271 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 282 kunmap(page);
272 struct page *page = frag->page;
273 void *p = kmap(page) + frag->page_offset + o;
274 err = memcpy_toiovec(to, p, copy);
275 kunmap(page);
276 }
277 if (err) 283 if (err)
278 goto fault; 284 goto fault;
279 if (!(len -= copy)) 285 if (!(len -= copy))
280 return 0; 286 return 0;
281 offset += copy; 287 offset += copy;
282 } 288 }
283 if (++i >= skb_shinfo(skb)->nr_frags) 289 start = end;
284 break;
285 fraglen = skb_shinfo(skb)->frags[i].size;
286 } 290 }
287 if (next) { 291
288 skb = next; 292 if (skb_shinfo(skb)->frag_list) {
289 BUG_ON(skb_shinfo(skb)->frag_list); 293 struct sk_buff *list = skb_shinfo(skb)->frag_list;
290 next = skb->next; 294
291 goto next_skb; 295 for (; list; list = list->next) {
296 int end;
297
298 BUG_TRAP(start <= offset + len);
299
300 end = start + list->len;
301 if ((copy = end - offset) > 0) {
302 if (copy > len)
303 copy = len;
304 if (skb_copy_datagram_iovec(list,
305 offset - start,
306 to, copy))
307 goto fault;
308 if ((len -= copy) == 0)
309 return 0;
310 offset += copy;
311 }
312 start = end;
313 }
292 } 314 }
315 if (!len)
316 return 0;
317
293fault: 318fault:
294 return -EFAULT; 319 return -EFAULT;
295} 320}
diff --git a/net/core/dev.c b/net/core/dev.c
index fd070a098f20..2afb0de95329 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2543,13 +2543,14 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
2543 case SIOCBONDENSLAVE: 2543 case SIOCBONDENSLAVE:
2544 case SIOCBONDRELEASE: 2544 case SIOCBONDRELEASE:
2545 case SIOCBONDSETHWADDR: 2545 case SIOCBONDSETHWADDR:
2546 case SIOCBONDSLAVEINFOQUERY:
2547 case SIOCBONDINFOQUERY:
2548 case SIOCBONDCHANGEACTIVE: 2546 case SIOCBONDCHANGEACTIVE:
2549 case SIOCBRADDIF: 2547 case SIOCBRADDIF:
2550 case SIOCBRDELIF: 2548 case SIOCBRDELIF:
2551 if (!capable(CAP_NET_ADMIN)) 2549 if (!capable(CAP_NET_ADMIN))
2552 return -EPERM; 2550 return -EPERM;
2551 /* fall through */
2552 case SIOCBONDSLAVEINFOQUERY:
2553 case SIOCBONDINFOQUERY:
2553 dev_load(ifr.ifr_name); 2554 dev_load(ifr.ifr_name);
2554 rtnl_lock(); 2555 rtnl_lock();
2555 ret = dev_ifsioc(&ifr, cmd); 2556 ret = dev_ifsioc(&ifr, cmd);
@@ -3236,7 +3237,7 @@ static int __init net_dev_init(void)
3236 * Initialise the packet receive queues. 3237 * Initialise the packet receive queues.
3237 */ 3238 */
3238 3239
3239 for (i = 0; i < NR_CPUS; i++) { 3240 for_each_cpu(i) {
3240 struct softnet_data *queue; 3241 struct softnet_data *queue;
3241 3242
3242 queue = &per_cpu(softnet_data, i); 3243 queue = &per_cpu(softnet_data, i);
diff --git a/net/core/filter.c b/net/core/filter.c
index 9540946a48f3..93fbd01d2259 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -64,7 +64,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
64} 64}
65 65
66/** 66/**
67 * sk_run_filter - run a filter on a socket 67 * sk_run_filter - run a filter on a socket
68 * @skb: buffer to run the filter on 68 * @skb: buffer to run the filter on
69 * @filter: filter to apply 69 * @filter: filter to apply
70 * @flen: length of filter 70 * @flen: length of filter
@@ -78,8 +78,8 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
78{ 78{
79 struct sock_filter *fentry; /* We walk down these */ 79 struct sock_filter *fentry; /* We walk down these */
80 void *ptr; 80 void *ptr;
81 u32 A = 0; /* Accumulator */ 81 u32 A = 0; /* Accumulator */
82 u32 X = 0; /* Index Register */ 82 u32 X = 0; /* Index Register */
83 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 83 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
84 u32 tmp; 84 u32 tmp;
85 int k; 85 int k;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8700379685e0..eca2976abb25 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -455,7 +455,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
455 if (!skb) 455 if (!skb)
456 return; 456 return;
457 457
458 if (rtnetlink_fill_ifinfo(skb, dev, type, current->pid, 0, change, 0) < 0) { 458 if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change, 0) < 0) {
459 kfree_skb(skb); 459 kfree_skb(skb);
460 return; 460 return;
461 } 461 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d0732e9c8560..2144952d1c6c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -135,13 +135,15 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
135struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 135struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
136 int fclone) 136 int fclone)
137{ 137{
138 kmem_cache_t *cache;
138 struct skb_shared_info *shinfo; 139 struct skb_shared_info *shinfo;
139 struct sk_buff *skb; 140 struct sk_buff *skb;
140 u8 *data; 141 u8 *data;
141 142
143 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
144
142 /* Get the HEAD */ 145 /* Get the HEAD */
143 skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache, 146 skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
144 gfp_mask & ~__GFP_DMA);
145 if (!skb) 147 if (!skb)
146 goto out; 148 goto out;
147 149
@@ -180,7 +182,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
180out: 182out:
181 return skb; 183 return skb;
182nodata: 184nodata:
183 kmem_cache_free(skbuff_head_cache, skb); 185 kmem_cache_free(cache, skb);
184 skb = NULL; 186 skb = NULL;
185 goto out; 187 goto out;
186} 188}
@@ -409,6 +411,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
409 C(pkt_type); 411 C(pkt_type);
410 C(ip_summed); 412 C(ip_summed);
411 C(priority); 413 C(priority);
414#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
415 C(ipvs_property);
416#endif
412 C(protocol); 417 C(protocol);
413 n->destructor = NULL; 418 n->destructor = NULL;
414#ifdef CONFIG_NETFILTER 419#ifdef CONFIG_NETFILTER
@@ -420,13 +425,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
420 C(nfct_reasm); 425 C(nfct_reasm);
421 nf_conntrack_get_reasm(skb->nfct_reasm); 426 nf_conntrack_get_reasm(skb->nfct_reasm);
422#endif 427#endif
423#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
424 C(ipvs_property);
425#endif
426#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
427 C(nfct_reasm);
428 nf_conntrack_get_reasm(skb->nfct_reasm);
429#endif
430#ifdef CONFIG_BRIDGE_NETFILTER 428#ifdef CONFIG_BRIDGE_NETFILTER
431 C(nf_bridge); 429 C(nf_bridge);
432 nf_bridge_get(skb->nf_bridge); 430 nf_bridge_get(skb->nf_bridge);
diff --git a/net/core/utils.c b/net/core/utils.c
index ac1d1fcf8673..fdc4f38bc46c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -121,7 +121,7 @@ void __init net_random_init(void)
121{ 121{
122 int i; 122 int i;
123 123
124 for (i = 0; i < NR_CPUS; i++) { 124 for_each_cpu(i) {
125 struct nrnd_state *state = &per_cpu(net_rand_state,i); 125 struct nrnd_state *state = &per_cpu(net_rand_state,i);
126 __net_srandom(state, i+jiffies); 126 __net_srandom(state, i+jiffies);
127 } 127 }
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed[NR_CPUS];
134 134
135 get_random_bytes(seed, sizeof(seed)); 135 get_random_bytes(seed, sizeof(seed));
136 for (i = 0; i < NR_CPUS; i++) { 136 for_each_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 137 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 138 __net_srandom(state, seed[i]);
139 } 139 }
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index d2b5933b4510..add3cae65e2d 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -15,7 +15,6 @@
15#include <linux/config.h> 15#include <linux/config.h>
16#include <linux/module.h> 16#include <linux/module.h>
17 17
18#include <asm/bug.h>
19#include <asm/div64.h> 18#include <asm/div64.h>
20 19
21#include "tfrc.h" 20#include "tfrc.h"
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 00f983226672..dc0487b5bace 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -119,7 +119,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
119 if (err != 0) 119 if (err != 0)
120 goto failure; 120 goto failure;
121 121
122 err = ip_route_newports(&rt, inet->sport, inet->dport, sk); 122 err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport,
123 sk);
123 if (err != 0) 124 if (err != 0)
124 goto failure; 125 goto failure;
125 126
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index df074259f9c3..80c4d048869e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -468,6 +468,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
468done: 468done:
469 if (opt && opt != np->opt) 469 if (opt && opt != np->opt)
470 sock_kfree_s(sk, opt, opt->tot_len); 470 sock_kfree_s(sk, opt, opt->tot_len);
471 dst_release(dst);
471 return err; 472 return err;
472} 473}
473 474
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 9890fd97e538..c971f14712ec 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -95,6 +95,12 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
95 saddr = dev->dev_addr; 95 saddr = dev->dev_addr;
96 memcpy(eth->h_source,saddr,dev->addr_len); 96 memcpy(eth->h_source,saddr,dev->addr_len);
97 97
98 if(daddr)
99 {
100 memcpy(eth->h_dest,daddr,dev->addr_len);
101 return ETH_HLEN;
102 }
103
98 /* 104 /*
99 * Anyway, the loopback-device should never use this function... 105 * Anyway, the loopback-device should never use this function...
100 */ 106 */
@@ -105,12 +111,6 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
105 return ETH_HLEN; 111 return ETH_HLEN;
106 } 112 }
107 113
108 if(daddr)
109 {
110 memcpy(eth->h_dest,daddr,dev->addr_len);
111 return ETH_HLEN;
112 }
113
114 return -ETH_HLEN; 114 return -ETH_HLEN;
115} 115}
116 116
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 7a121802faa9..960aa78cdb97 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -350,6 +350,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
350 u8 src[ETH_ALEN]; 350 u8 src[ETH_ALEN];
351 struct ieee80211_crypt_data *crypt = NULL; 351 struct ieee80211_crypt_data *crypt = NULL;
352 int keyidx = 0; 352 int keyidx = 0;
353 int can_be_decrypted = 0;
353 354
354 hdr = (struct ieee80211_hdr_4addr *)skb->data; 355 hdr = (struct ieee80211_hdr_4addr *)skb->data;
355 stats = &ieee->stats; 356 stats = &ieee->stats;
@@ -410,12 +411,23 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
410 return 1; 411 return 1;
411 } 412 }
412 413
413 if (is_multicast_ether_addr(hdr->addr1) 414 can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) ||
414 ? ieee->host_mc_decrypt : ieee->host_decrypt) { 415 is_broadcast_ether_addr(hdr->addr2)) ?
416 ieee->host_mc_decrypt : ieee->host_decrypt;
417
418 if (can_be_decrypted) {
415 int idx = 0; 419 int idx = 0;
416 if (skb->len >= hdrlen + 3) 420 if (skb->len >= hdrlen + 3) {
421 /* Top two-bits of byte 3 are the key index */
417 idx = skb->data[hdrlen + 3] >> 6; 422 idx = skb->data[hdrlen + 3] >> 6;
423 }
424
425 /* ieee->crypt[] is WEP_KEY (4) in length. Given that idx
426 * is only allowed 2-bits of storage, no value of idx can
427 * be provided via above code that would result in idx
428 * being out of range */
418 crypt = ieee->crypt[idx]; 429 crypt = ieee->crypt[idx];
430
419#ifdef NOT_YET 431#ifdef NOT_YET
420 sta = NULL; 432 sta = NULL;
421 433
@@ -553,7 +565,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
553 565
554 /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ 566 /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
555 567
556 if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && 568 if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
557 (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) 569 (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
558 goto rx_dropped; 570 goto rx_dropped;
559 571
@@ -617,7 +629,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
617 629
618 /* skb: hdr + (possible reassembled) full MSDU payload; possibly still 630 /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
619 * encrypted/authenticated */ 631 * encrypted/authenticated */
620 if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && 632 if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
621 ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) 633 ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
622 goto rx_dropped; 634 goto rx_dropped;
623 635
@@ -1439,7 +1451,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1439 break; 1451 break;
1440 1452
1441 case IEEE80211_STYPE_PROBE_REQ: 1453 case IEEE80211_STYPE_PROBE_REQ:
1442 IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", 1454 IEEE80211_DEBUG_MGMT("received auth (%d)\n",
1443 WLAN_FC_GET_STYPE(le16_to_cpu 1455 WLAN_FC_GET_STYPE(le16_to_cpu
1444 (header->frame_ctl))); 1456 (header->frame_ctl)));
1445 1457
@@ -1473,7 +1485,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1473 break; 1485 break;
1474 case IEEE80211_STYPE_AUTH: 1486 case IEEE80211_STYPE_AUTH:
1475 1487
1476 IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", 1488 IEEE80211_DEBUG_MGMT("received auth (%d)\n",
1477 WLAN_FC_GET_STYPE(le16_to_cpu 1489 WLAN_FC_GET_STYPE(le16_to_cpu
1478 (header->frame_ctl))); 1490 (header->frame_ctl)));
1479 1491
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 23e1630f50b7..f87c6b89f845 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -232,15 +232,18 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee,
232 return start; 232 return start;
233} 233}
234 234
235#define SCAN_ITEM_SIZE 128
236
235int ieee80211_wx_get_scan(struct ieee80211_device *ieee, 237int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
236 struct iw_request_info *info, 238 struct iw_request_info *info,
237 union iwreq_data *wrqu, char *extra) 239 union iwreq_data *wrqu, char *extra)
238{ 240{
239 struct ieee80211_network *network; 241 struct ieee80211_network *network;
240 unsigned long flags; 242 unsigned long flags;
243 int err = 0;
241 244
242 char *ev = extra; 245 char *ev = extra;
243 char *stop = ev + IW_SCAN_MAX_DATA; 246 char *stop = ev + wrqu->data.length;
244 int i = 0; 247 int i = 0;
245 248
246 IEEE80211_DEBUG_WX("Getting scan\n"); 249 IEEE80211_DEBUG_WX("Getting scan\n");
@@ -249,6 +252,11 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
249 252
250 list_for_each_entry(network, &ieee->network_list, list) { 253 list_for_each_entry(network, &ieee->network_list, list) {
251 i++; 254 i++;
255 if (stop - ev < SCAN_ITEM_SIZE) {
256 err = -E2BIG;
257 break;
258 }
259
252 if (ieee->scan_age == 0 || 260 if (ieee->scan_age == 0 ||
253 time_after(network->last_scanned + ieee->scan_age, jiffies)) 261 time_after(network->last_scanned + ieee->scan_age, jiffies))
254 ev = ipw2100_translate_scan(ieee, ev, stop, network); 262 ev = ipw2100_translate_scan(ieee, ev, stop, network);
@@ -270,7 +278,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
270 278
271 IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i); 279 IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
272 280
273 return 0; 281 return err;
274} 282}
275 283
276int ieee80211_wx_set_encode(struct ieee80211_device *ieee, 284int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 95b9d81ac488..3ffa60dadc0c 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1135,7 +1135,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa)
1135 1135
1136 if (!skb) 1136 if (!skb)
1137 netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, ENOBUFS); 1137 netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, ENOBUFS);
1138 else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) { 1138 else if (inet_fill_ifaddr(skb, ifa, 0, 0, event, 0) < 0) {
1139 kfree_skb(skb); 1139 kfree_skb(skb);
1140 netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, EINVAL); 1140 netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, EINVAL);
1141 } else { 1141 } else {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ef4724de7350..0f4145babb14 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1045,7 +1045,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
1045 } 1045 }
1046 1046
1047 nl->nlmsg_flags = NLM_F_REQUEST; 1047 nl->nlmsg_flags = NLM_F_REQUEST;
1048 nl->nlmsg_pid = current->pid; 1048 nl->nlmsg_pid = 0;
1049 nl->nlmsg_seq = 0; 1049 nl->nlmsg_seq = 0;
1050 nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm)); 1050 nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm));
1051 if (cmd == SIOCDELRT) { 1051 if (cmd == SIOCDELRT) {
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 105039eb7629..e7bbff4340bb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -192,7 +192,7 @@ int sysctl_icmp_echo_ignore_all;
192int sysctl_icmp_echo_ignore_broadcasts = 1; 192int sysctl_icmp_echo_ignore_broadcasts = 1;
193 193
194/* Control parameter - ignore bogus broadcast responses? */ 194/* Control parameter - ignore bogus broadcast responses? */
195int sysctl_icmp_ignore_bogus_error_responses; 195int sysctl_icmp_ignore_bogus_error_responses = 1;
196 196
197/* 197/*
198 * Configurable global rate limit. 198 * Configurable global rate limit.
@@ -385,7 +385,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
385 u32 daddr; 385 u32 daddr;
386 386
387 if (ip_options_echo(&icmp_param->replyopts, skb)) 387 if (ip_options_echo(&icmp_param->replyopts, skb))
388 goto out; 388 return;
389 389
390 if (icmp_xmit_lock()) 390 if (icmp_xmit_lock())
391 return; 391 return;
@@ -416,7 +416,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
416 ip_rt_put(rt); 416 ip_rt_put(rt);
417out_unlock: 417out_unlock:
418 icmp_xmit_unlock(); 418 icmp_xmit_unlock();
419out:;
420} 419}
421 420
422 421
@@ -525,7 +524,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
525 iph->tos; 524 iph->tos;
526 525
527 if (ip_options_echo(&icmp_param.replyopts, skb_in)) 526 if (ip_options_echo(&icmp_param.replyopts, skb_in))
528 goto ende; 527 goto out_unlock;
529 528
530 529
531 /* 530 /*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d8ce7133cd8f..64ce52bf0485 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -970,7 +970,7 @@ int igmp_rcv(struct sk_buff *skb)
970 case IGMP_MTRACE_RESP: 970 case IGMP_MTRACE_RESP:
971 break; 971 break;
972 default: 972 default:
973 NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type); 973 break;
974 } 974 }
975 975
976drop: 976drop:
@@ -1578,7 +1578,7 @@ static int sf_setstate(struct ip_mc_list *pmc)
1578 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 1578 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
1579 if (new_in) { 1579 if (new_in) {
1580 if (!psf->sf_oldin) { 1580 if (!psf->sf_oldin) {
1581 struct ip_sf_list *prev = 0; 1581 struct ip_sf_list *prev = NULL;
1582 1582
1583 for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) { 1583 for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
1584 if (dpsf->sf_inaddr == psf->sf_inaddr) 1584 if (dpsf->sf_inaddr == psf->sf_inaddr)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index abe23923e4e7..9981dcd68f11 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -830,7 +830,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
830 skb->h.raw = skb->nh.raw; 830 skb->h.raw = skb->nh.raw;
831 skb->nh.raw = skb_push(skb, gre_hlen); 831 skb->nh.raw = skb_push(skb, gre_hlen);
832 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 832 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
833 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); 833 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
834 IPSKB_REROUTED);
834 dst_release(skb->dst); 835 dst_release(skb->dst);
835 skb->dst = &rt->u.dst; 836 skb->dst = &rt->u.dst;
836 837
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3324fbfe528a..57d290d89ec2 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -207,8 +207,10 @@ static inline int ip_finish_output(struct sk_buff *skb)
207{ 207{
208#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 208#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
209 /* Policy lookup after SNAT yielded a new policy */ 209 /* Policy lookup after SNAT yielded a new policy */
210 if (skb->dst->xfrm != NULL) 210 if (skb->dst->xfrm != NULL) {
211 return xfrm4_output_finish(skb); 211 IPCB(skb)->flags |= IPSKB_REROUTED;
212 return dst_output(skb);
213 }
212#endif 214#endif
213 if (skb->len > dst_mtu(skb->dst) && 215 if (skb->len > dst_mtu(skb->dst) &&
214 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) 216 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
@@ -271,8 +273,9 @@ int ip_mc_output(struct sk_buff *skb)
271 newskb->dev, ip_dev_loopback_xmit); 273 newskb->dev, ip_dev_loopback_xmit);
272 } 274 }
273 275
274 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev, 276 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
275 ip_finish_output); 277 ip_finish_output,
278 !(IPCB(skb)->flags & IPSKB_REROUTED));
276} 279}
277 280
278int ip_output(struct sk_buff *skb) 281int ip_output(struct sk_buff *skb)
@@ -284,8 +287,9 @@ int ip_output(struct sk_buff *skb)
284 skb->dev = dev; 287 skb->dev = dev;
285 skb->protocol = htons(ETH_P_IP); 288 skb->protocol = htons(ETH_P_IP);
286 289
287 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, 290 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
288 ip_finish_output); 291 ip_finish_output,
292 !(IPCB(skb)->flags & IPSKB_REROUTED));
289} 293}
290 294
291int ip_queue_xmit(struct sk_buff *skb, int ipfragok) 295int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e5cbe72c6b80..03d13742a4b8 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -622,7 +622,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
622 skb->h.raw = skb->nh.raw; 622 skb->h.raw = skb->nh.raw;
623 skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); 623 skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
624 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 624 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
625 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); 625 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
626 IPSKB_REROUTED);
626 dst_release(skb->dst); 627 dst_release(skb->dst);
627 skb->dst = &rt->u.dst; 628 skb->dst = &rt->u.dst;
628 629
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c
index d34a9fa608e0..342d0b9098f5 100644
--- a/net/ipv4/multipath_wrandom.c
+++ b/net/ipv4/multipath_wrandom.c
@@ -228,7 +228,7 @@ static void wrandom_set_nhinfo(__u32 network,
228 struct multipath_dest *d, *target_dest = NULL; 228 struct multipath_dest *d, *target_dest = NULL;
229 229
230 /* store the weight information for a certain route */ 230 /* store the weight information for a certain route */
231 spin_lock(&state[state_idx].lock); 231 spin_lock_bh(&state[state_idx].lock);
232 232
233 /* find state entry for gateway or add one if necessary */ 233 /* find state entry for gateway or add one if necessary */
234 list_for_each_entry_rcu(r, &state[state_idx].head, list) { 234 list_for_each_entry_rcu(r, &state[state_idx].head, list) {
@@ -276,7 +276,7 @@ static void wrandom_set_nhinfo(__u32 network,
276 * we are finished 276 * we are finished
277 */ 277 */
278 278
279 spin_unlock(&state[state_idx].lock); 279 spin_unlock_bh(&state[state_idx].lock);
280} 280}
281 281
282static void __multipath_free(struct rcu_head *head) 282static void __multipath_free(struct rcu_head *head)
@@ -302,7 +302,7 @@ static void wrandom_flush(void)
302 for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { 302 for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) {
303 struct multipath_route *r; 303 struct multipath_route *r;
304 304
305 spin_lock(&state[i].lock); 305 spin_lock_bh(&state[i].lock);
306 list_for_each_entry_rcu(r, &state[i].head, list) { 306 list_for_each_entry_rcu(r, &state[i].head, list) {
307 struct multipath_dest *d; 307 struct multipath_dest *d;
308 list_for_each_entry_rcu(d, &r->dests, list) { 308 list_for_each_entry_rcu(d, &r->dests, list) {
@@ -315,7 +315,7 @@ static void wrandom_flush(void)
315 __multipath_free); 315 __multipath_free);
316 } 316 }
317 317
318 spin_unlock(&state[i].lock); 318 spin_unlock_bh(&state[i].lock);
319 } 319 }
320} 320}
321 321
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 52a3d7c57907..ed42cdc57cd9 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -78,6 +78,47 @@ int ip_route_me_harder(struct sk_buff **pskb)
78} 78}
79EXPORT_SYMBOL(ip_route_me_harder); 79EXPORT_SYMBOL(ip_route_me_harder);
80 80
81#ifdef CONFIG_XFRM
82int ip_xfrm_me_harder(struct sk_buff **pskb)
83{
84 struct flowi fl;
85 unsigned int hh_len;
86 struct dst_entry *dst;
87
88 if (IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED)
89 return 0;
90 if (xfrm_decode_session(*pskb, &fl, AF_INET) < 0)
91 return -1;
92
93 dst = (*pskb)->dst;
94 if (dst->xfrm)
95 dst = ((struct xfrm_dst *)dst)->route;
96 dst_hold(dst);
97
98 if (xfrm_lookup(&dst, &fl, (*pskb)->sk, 0) < 0)
99 return -1;
100
101 dst_release((*pskb)->dst);
102 (*pskb)->dst = dst;
103
104 /* Change in oif may mean change in hh_len. */
105 hh_len = (*pskb)->dst->dev->hard_header_len;
106 if (skb_headroom(*pskb) < hh_len) {
107 struct sk_buff *nskb;
108
109 nskb = skb_realloc_headroom(*pskb, hh_len);
110 if (!nskb)
111 return -1;
112 if ((*pskb)->sk)
113 skb_set_owner_w(nskb, (*pskb)->sk);
114 kfree_skb(*pskb);
115 *pskb = nskb;
116 }
117 return 0;
118}
119EXPORT_SYMBOL(ip_xfrm_me_harder);
120#endif
121
81void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); 122void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
82EXPORT_SYMBOL(ip_nat_decode_session); 123EXPORT_SYMBOL(ip_nat_decode_session);
83 124
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index afe3d8f8177d..dd1048be8a01 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -807,6 +807,13 @@ static int do_replace(void __user *user, unsigned int len)
807 if (len != sizeof(tmp) + tmp.size) 807 if (len != sizeof(tmp) + tmp.size)
808 return -ENOPROTOOPT; 808 return -ENOPROTOOPT;
809 809
810 /* overflow check */
811 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
812 SMP_CACHE_BYTES)
813 return -ENOMEM;
814 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
815 return -ENOMEM;
816
810 newinfo = xt_alloc_table_info(tmp.size); 817 newinfo = xt_alloc_table_info(tmp.size);
811 if (!newinfo) 818 if (!newinfo)
812 return -ENOMEM; 819 return -ENOMEM;
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index c9ebbe0d2d9c..e0b5926c76f9 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1216,7 +1216,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1216 1216
1217 b = skb->tail; 1217 b = skb->tail;
1218 1218
1219 type |= NFNL_SUBSYS_CTNETLINK << 8; 1219 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1220 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); 1220 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
1221 nfmsg = NLMSG_DATA(nlh); 1221 nfmsg = NLMSG_DATA(nlh);
1222 1222
@@ -1567,6 +1567,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
1567}; 1567};
1568 1568
1569MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 1569MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
1570MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
1570 1571
1571static int __init ctnetlink_init(void) 1572static int __init ctnetlink_init(void)
1572{ 1573{
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
index d3c5a371f993..4ba4463cec28 100644
--- a/net/ipv4/netfilter/ip_conntrack_tftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_tftp.c
@@ -71,6 +71,7 @@ static int tftp_help(struct sk_buff **pskb,
71 71
72 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; 72 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
73 exp->mask.src.ip = 0xffffffff; 73 exp->mask.src.ip = 0xffffffff;
74 exp->mask.src.u.udp.port = 0;
74 exp->mask.dst.ip = 0xffffffff; 75 exp->mask.dst.ip = 0xffffffff;
75 exp->mask.dst.u.udp.port = 0xffff; 76 exp->mask.dst.u.udp.port = 0xffff;
76 exp->mask.dst.protonum = 0xff; 77 exp->mask.dst.protonum = 0xff;
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index c1a61462507f..1741d555ad0d 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -434,6 +434,7 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
434 } *inside; 434 } *inside;
435 struct ip_conntrack_tuple inner, target; 435 struct ip_conntrack_tuple inner, target;
436 int hdrlen = (*pskb)->nh.iph->ihl * 4; 436 int hdrlen = (*pskb)->nh.iph->ihl * 4;
437 unsigned long statusbit;
437 438
438 if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) 439 if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
439 return 0; 440 return 0;
@@ -495,17 +496,16 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
495 496
496 /* Change outer to look the reply to an incoming packet 497 /* Change outer to look the reply to an incoming packet
497 * (proto 0 means don't invert per-proto part). */ 498 * (proto 0 means don't invert per-proto part). */
499 if (manip == IP_NAT_MANIP_SRC)
500 statusbit = IPS_SRC_NAT;
501 else
502 statusbit = IPS_DST_NAT;
498 503
499 /* Obviously, we need to NAT destination IP, but source IP 504 /* Invert if this is reply dir. */
500 should be NAT'ed only if it is from a NAT'd host. 505 if (dir == IP_CT_DIR_REPLY)
506 statusbit ^= IPS_NAT_MASK;
501 507
502 Explanation: some people use NAT for anonymizing. Also, 508 if (ct->status & statusbit) {
503 CERT recommends dropping all packets from private IP
504 addresses (although ICMP errors from internal links with
505 such addresses are not too uncommon, as Alan Cox points
506 out) */
507 if (manip != IP_NAT_MANIP_SRC
508 || ((*pskb)->nh.iph->saddr == ct->tuplehash[dir].tuple.src.ip)) {
509 invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 509 invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
510 if (!manip_pkt(0, pskb, 0, &target, manip)) 510 if (!manip_pkt(0, pskb, 0, &target, manip))
511 return 0; 511 return 0;
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index ad438fb185b8..ab1f88fa21ec 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -200,20 +200,14 @@ ip_nat_in(unsigned int hooknum,
200 const struct net_device *out, 200 const struct net_device *out,
201 int (*okfn)(struct sk_buff *)) 201 int (*okfn)(struct sk_buff *))
202{ 202{
203 struct ip_conntrack *ct;
204 enum ip_conntrack_info ctinfo;
205 unsigned int ret; 203 unsigned int ret;
204 u_int32_t daddr = (*pskb)->nh.iph->daddr;
206 205
207 ret = ip_nat_fn(hooknum, pskb, in, out, okfn); 206 ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
208 if (ret != NF_DROP && ret != NF_STOLEN 207 if (ret != NF_DROP && ret != NF_STOLEN
209 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { 208 && daddr != (*pskb)->nh.iph->daddr) {
210 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 209 dst_release((*pskb)->dst);
211 210 (*pskb)->dst = NULL;
212 if (ct->tuplehash[dir].tuple.src.ip !=
213 ct->tuplehash[!dir].tuple.dst.ip) {
214 dst_release((*pskb)->dst);
215 (*pskb)->dst = NULL;
216 }
217 } 211 }
218 return ret; 212 return ret;
219} 213}
@@ -235,19 +229,19 @@ ip_nat_out(unsigned int hooknum,
235 return NF_ACCEPT; 229 return NF_ACCEPT;
236 230
237 ret = ip_nat_fn(hooknum, pskb, in, out, okfn); 231 ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
232#ifdef CONFIG_XFRM
238 if (ret != NF_DROP && ret != NF_STOLEN 233 if (ret != NF_DROP && ret != NF_STOLEN
239 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { 234 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
240 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 235 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
241 236
242 if (ct->tuplehash[dir].tuple.src.ip != 237 if (ct->tuplehash[dir].tuple.src.ip !=
243 ct->tuplehash[!dir].tuple.dst.ip 238 ct->tuplehash[!dir].tuple.dst.ip
244#ifdef CONFIG_XFRM
245 || ct->tuplehash[dir].tuple.src.u.all != 239 || ct->tuplehash[dir].tuple.src.u.all !=
246 ct->tuplehash[!dir].tuple.dst.u.all 240 ct->tuplehash[!dir].tuple.dst.u.all
247#endif
248 ) 241 )
249 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 242 return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP;
250 } 243 }
244#endif
251 return ret; 245 return ret;
252} 246}
253 247
@@ -276,7 +270,7 @@ ip_nat_local_fn(unsigned int hooknum,
276 ct->tuplehash[!dir].tuple.src.ip 270 ct->tuplehash[!dir].tuple.src.ip
277#ifdef CONFIG_XFRM 271#ifdef CONFIG_XFRM
278 || ct->tuplehash[dir].tuple.dst.u.all != 272 || ct->tuplehash[dir].tuple.dst.u.all !=
279 ct->tuplehash[dir].tuple.src.u.all 273 ct->tuplehash[!dir].tuple.src.u.all
280#endif 274#endif
281 ) 275 )
282 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 276 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2371b2062c2d..16f47c675fef 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -921,6 +921,13 @@ do_replace(void __user *user, unsigned int len)
921 if (len != sizeof(tmp) + tmp.size) 921 if (len != sizeof(tmp) + tmp.size)
922 return -ENOPROTOOPT; 922 return -ENOPROTOOPT;
923 923
924 /* overflow check */
925 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
926 SMP_CACHE_BYTES)
927 return -ENOMEM;
928 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
929 return -ENOMEM;
930
924 newinfo = xt_alloc_table_info(tmp.size); 931 newinfo = xt_alloc_table_info(tmp.size);
925 if (!newinfo) 932 if (!newinfo)
926 return -ENOMEM; 933 return -ENOMEM;
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 641dbc477650..180a9ea57b69 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -35,6 +35,10 @@
35 * each nlgroup you are using, so the total kernel memory usage increases 35 * each nlgroup you are using, so the total kernel memory usage increases
36 * by that factor. 36 * by that factor.
37 * 37 *
38 * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
39 * nlbufsiz is used with alloc_skb, which adds another
40 * sizeof(struct skb_shared_info). Use NLMSG_GOODSIZE instead.
41 *
38 * flushtimeout: 42 * flushtimeout:
39 * Specify, after how many hundredths of a second the queue should be 43 * Specify, after how many hundredths of a second the queue should be
40 * flushed even if it is not full yet. 44 * flushed even if it is not full yet.
@@ -76,7 +80,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
76 80
77#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) 81#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
78 82
79static unsigned int nlbufsiz = 4096; 83static unsigned int nlbufsiz = NLMSG_GOODSIZE;
80module_param(nlbufsiz, uint, 0400); 84module_param(nlbufsiz, uint, 0400);
81MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); 85MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
82 86
@@ -143,22 +147,26 @@ static void ulog_timer(unsigned long data)
143static struct sk_buff *ulog_alloc_skb(unsigned int size) 147static struct sk_buff *ulog_alloc_skb(unsigned int size)
144{ 148{
145 struct sk_buff *skb; 149 struct sk_buff *skb;
150 unsigned int n;
146 151
147 /* alloc skb which should be big enough for a whole 152 /* alloc skb which should be big enough for a whole
148 * multipart message. WARNING: has to be <= 131000 153 * multipart message. WARNING: has to be <= 131000
149 * due to slab allocator restrictions */ 154 * due to slab allocator restrictions */
150 155
151 skb = alloc_skb(nlbufsiz, GFP_ATOMIC); 156 n = max(size, nlbufsiz);
157 skb = alloc_skb(n, GFP_ATOMIC);
152 if (!skb) { 158 if (!skb) {
153 PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", 159 PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
154 nlbufsiz);
155 160
156 /* try to allocate only as much as we need for 161 if (n > size) {
157 * current packet */ 162 /* try to allocate only as much as we need for
163 * current packet */
158 164
159 skb = alloc_skb(size, GFP_ATOMIC); 165 skb = alloc_skb(size, GFP_ATOMIC);
160 if (!skb) 166 if (!skb)
161 PRINTR("ipt_ULOG: can't even allocate %ub\n", size); 167 PRINTR("ipt_ULOG: can't even allocate %ub\n",
168 size);
169 }
162 } 170 }
163 171
164 return skb; 172 return skb;
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c
index 18ca8258a1c5..5a7a265280f9 100644
--- a/net/ipv4/netfilter/ipt_policy.c
+++ b/net/ipv4/netfilter/ipt_policy.c
@@ -26,10 +26,13 @@ MODULE_LICENSE("GPL");
26static inline int 26static inline int
27match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e) 27match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e)
28{ 28{
29#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) 29#define MATCH_ADDR(x,y,z) (!e->match.x || \
30 ((e->x.a4.s_addr == (e->y.a4.s_addr & (z))) \
31 ^ e->invert.x))
32#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
30 33
31 return MATCH(saddr, x->props.saddr.a4 & e->smask) && 34 return MATCH_ADDR(saddr, smask, x->props.saddr.a4) &&
32 MATCH(daddr, x->id.daddr.a4 & e->dmask) && 35 MATCH_ADDR(daddr, dmask, x->id.daddr.a4) &&
33 MATCH(proto, x->id.proto) && 36 MATCH(proto, x->id.proto) &&
34 MATCH(mode, x->props.mode) && 37 MATCH(mode, x->props.mode) &&
35 MATCH(spi, x->id.spi) && 38 MATCH(spi, x->id.spi) &&
@@ -89,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
89 return 0; 92 return 0;
90 } 93 }
91 94
92 return strict ? 1 : 0; 95 return strict ? i == info->len : 0;
93} 96}
94 97
95static int match(const struct sk_buff *skb, 98static int match(const struct sk_buff *skb,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 167619f638c6..6c8624a54933 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -529,15 +529,10 @@ static int init_or_cleanup(int init)
529 goto cleanup_localinops; 529 goto cleanup_localinops;
530 } 530 }
531#endif 531#endif
532
533 /* For use by REJECT target */
534 ip_ct_attach = __nf_conntrack_attach;
535
536 return ret; 532 return ret;
537 533
538 cleanup: 534 cleanup:
539 synchronize_net(); 535 synchronize_net();
540 ip_ct_attach = NULL;
541#ifdef CONFIG_SYSCTL 536#ifdef CONFIG_SYSCTL
542 unregister_sysctl_table(nf_ct_ipv4_sysctl_header); 537 unregister_sysctl_table(nf_ct_ipv4_sysctl_header);
543 cleanup_localinops: 538 cleanup_localinops:
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 39d49dc333a7..1b167c4bb3be 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
49 int res = 0; 49 int res = 0;
50 int cpu; 50 int cpu;
51 51
52 for (cpu = 0; cpu < NR_CPUS; cpu++) 52 for_each_cpu(cpu)
53 res += proto->stats[cpu].inuse; 53 res += proto->stats[cpu].inuse;
54 54
55 return res; 55 return res;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d82c242ea704..fca5fe0cf94a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -835,7 +835,7 @@ static int rt_garbage_collect(void)
835 int r; 835 int r;
836 836
837 rthp = rt_remove_balanced_route( 837 rthp = rt_remove_balanced_route(
838 &rt_hash_table[i].chain, 838 &rt_hash_table[k].chain,
839 rth, 839 rth,
840 &r); 840 &r);
841 goal -= r; 841 goal -= r;
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 3284cfb993e6..128de4d7c0b7 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -230,7 +230,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
230 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 230 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
231 tp->snd_cwnd++; 231 tp->snd_cwnd++;
232 tp->snd_cwnd_cnt = 0; 232 tp->snd_cwnd_cnt = 0;
233 ca->ccount++;
234 } 233 }
235 } 234 }
236} 235}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a97ed5416c28..e9a54ae7d690 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -456,7 +456,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
456 456
457 tp->rcvq_space.space = space; 457 tp->rcvq_space.space = space;
458 458
459 if (sysctl_tcp_moderate_rcvbuf) { 459 if (sysctl_tcp_moderate_rcvbuf &&
460 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
460 int new_clamp = space; 461 int new_clamp = space;
461 462
462 /* Receive space grows, normalize in order to 463 /* Receive space grows, normalize in order to
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6ea353907af5..233bdf259965 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -236,7 +236,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
236 if (err) 236 if (err)
237 goto failure; 237 goto failure;
238 238
239 err = ip_route_newports(&rt, inet->sport, inet->dport, sk); 239 err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk);
240 if (err) 240 if (err)
241 goto failure; 241 goto failure;
242 242
@@ -1845,7 +1845,6 @@ void __init tcp_v4_init(struct net_proto_family *ops)
1845} 1845}
1846 1846
1847EXPORT_SYMBOL(ipv4_specific); 1847EXPORT_SYMBOL(ipv4_specific);
1848EXPORT_SYMBOL(inet_bind_bucket_create);
1849EXPORT_SYMBOL(tcp_hashinfo); 1848EXPORT_SYMBOL(tcp_hashinfo);
1850EXPORT_SYMBOL(tcp_prot); 1849EXPORT_SYMBOL(tcp_prot);
1851EXPORT_SYMBOL(tcp_unhash); 1850EXPORT_SYMBOL(tcp_unhash);
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index d4df0ddd424b..32ad229b4fed 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -152,10 +152,16 @@ error_nolock:
152 goto out_exit; 152 goto out_exit;
153} 153}
154 154
155int xfrm4_output_finish(struct sk_buff *skb) 155static int xfrm4_output_finish(struct sk_buff *skb)
156{ 156{
157 int err; 157 int err;
158 158
159#ifdef CONFIG_NETFILTER
160 if (!skb->dst->xfrm) {
161 IPCB(skb)->flags |= IPSKB_REROUTED;
162 return dst_output(skb);
163 }
164#endif
159 while (likely((err = xfrm4_output_one(skb)) == 0)) { 165 while (likely((err = xfrm4_output_one(skb)) == 0)) {
160 nf_reset(skb); 166 nf_reset(skb);
161 167
@@ -178,6 +184,7 @@ int xfrm4_output_finish(struct sk_buff *skb)
178 184
179int xfrm4_output(struct sk_buff *skb) 185int xfrm4_output(struct sk_buff *skb)
180{ 186{
181 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, 187 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
182 xfrm4_output_finish); 188 xfrm4_output_finish,
189 !(IPCB(skb)->flags & IPSKB_REROUTED));
183} 190}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 42196ba3b0b9..f285bbf296e2 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -8,7 +8,6 @@
8 * 8 *
9 */ 9 */
10 10
11#include <asm/bug.h>
12#include <linux/compiler.h> 11#include <linux/compiler.h>
13#include <linux/config.h> 12#include <linux/config.h>
14#include <linux/inetdevice.h> 13#include <linux/inetdevice.h>
@@ -36,6 +35,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
36 if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ 35 if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
37 xdst->u.rt.fl.fl4_dst == fl->fl4_dst && 36 xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
38 xdst->u.rt.fl.fl4_src == fl->fl4_src && 37 xdst->u.rt.fl.fl4_src == fl->fl4_src &&
38 xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
39 xfrm_bundle_ok(xdst, fl, AF_INET)) { 39 xfrm_bundle_ok(xdst, fl, AF_INET)) {
40 dst_clone(dst); 40 dst_clone(dst);
41 break; 41 break;
@@ -62,7 +62,8 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
62 .nl_u = { 62 .nl_u = {
63 .ip4_u = { 63 .ip4_u = {
64 .saddr = local, 64 .saddr = local,
65 .daddr = remote 65 .daddr = remote,
66 .tos = fl->fl4_tos
66 } 67 }
67 } 68 }
68 }; 69 };
@@ -231,6 +232,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl)
231 fl->proto = iph->protocol; 232 fl->proto = iph->protocol;
232 fl->fl4_dst = iph->daddr; 233 fl->fl4_dst = iph->daddr;
233 fl->fl4_src = iph->saddr; 234 fl->fl4_src = iph->saddr;
235 fl->fl4_tos = iph->tos;
234} 236}
235 237
236static inline int xfrm4_garbage_collect(void) 238static inline int xfrm4_garbage_collect(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d328d5986143..b7d8822c1be4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2165,6 +2165,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2165 dev->name); 2165 dev->name);
2166 break; 2166 break;
2167 } 2167 }
2168
2169 if (idev)
2170 idev->if_flags |= IF_READY;
2168 } else { 2171 } else {
2169 if (!netif_carrier_ok(dev)) { 2172 if (!netif_carrier_ok(dev)) {
2170 /* device is still not ready. */ 2173 /* device is still not ready. */
@@ -3321,9 +3324,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
3321 3324
3322 switch (event) { 3325 switch (event) {
3323 case RTM_NEWADDR: 3326 case RTM_NEWADDR:
3324 dst_hold(&ifp->rt->u.dst); 3327 ip6_ins_rt(ifp->rt, NULL, NULL, NULL);
3325 if (ip6_ins_rt(ifp->rt, NULL, NULL, NULL))
3326 dst_release(&ifp->rt->u.dst);
3327 if (ifp->idev->cnf.forwarding) 3328 if (ifp->idev->cnf.forwarding)
3328 addrconf_join_anycast(ifp); 3329 addrconf_join_anycast(ifp);
3329 break; 3330 break;
@@ -3334,8 +3335,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
3334 dst_hold(&ifp->rt->u.dst); 3335 dst_hold(&ifp->rt->u.dst);
3335 if (ip6_del_rt(ifp->rt, NULL, NULL, NULL)) 3336 if (ip6_del_rt(ifp->rt, NULL, NULL, NULL))
3336 dst_free(&ifp->rt->u.dst); 3337 dst_free(&ifp->rt->u.dst);
3337 else
3338 dst_release(&ifp->rt->u.dst);
3339 break; 3338 break;
3340 } 3339 }
3341} 3340}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 064ffab82a9f..6c9711ac1c03 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -369,12 +369,6 @@ int inet6_destroy_sock(struct sock *sk)
369 struct sk_buff *skb; 369 struct sk_buff *skb;
370 struct ipv6_txoptions *opt; 370 struct ipv6_txoptions *opt;
371 371
372 /*
373 * Release destination entry
374 */
375
376 sk_dst_reset(sk);
377
378 /* Release rx options */ 372 /* Release rx options */
379 373
380 if ((skb = xchg(&np->pktoptions, NULL)) != NULL) 374 if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index fcf883183cef..21eb725e885f 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -42,6 +42,7 @@
42#include <linux/net.h> 42#include <linux/net.h>
43#include <linux/skbuff.h> 43#include <linux/skbuff.h>
44#include <linux/init.h> 44#include <linux/init.h>
45#include <linux/netfilter.h>
45 46
46#ifdef CONFIG_SYSCTL 47#ifdef CONFIG_SYSCTL
47#include <linux/sysctl.h> 48#include <linux/sysctl.h>
@@ -255,6 +256,7 @@ out:
255struct icmpv6_msg { 256struct icmpv6_msg {
256 struct sk_buff *skb; 257 struct sk_buff *skb;
257 int offset; 258 int offset;
259 uint8_t type;
258}; 260};
259 261
260static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) 262static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
@@ -266,6 +268,8 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
266 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, 268 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
267 to, len, csum); 269 to, len, csum);
268 skb->csum = csum_block_add(skb->csum, csum, odd); 270 skb->csum = csum_block_add(skb->csum, csum, odd);
271 if (!(msg->type & ICMPV6_INFOMSG_MASK))
272 nf_ct_attach(skb, org_skb);
269 return 0; 273 return 0;
270} 274}
271 275
@@ -403,6 +407,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
403 407
404 msg.skb = skb; 408 msg.skb = skb;
405 msg.offset = skb->nh.raw - skb->data; 409 msg.offset = skb->nh.raw - skb->data;
410 msg.type = type;
406 411
407 len = skb->len - msg.offset; 412 len = skb->len - msg.offset;
408 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); 413 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
@@ -500,6 +505,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
500 505
501 msg.skb = skb; 506 msg.skb = skb;
502 msg.offset = 0; 507 msg.offset = 0;
508 msg.type = ICMPV6_ECHO_REPLY;
503 509
504 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), 510 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
505 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl, 511 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 92ead3cf956b..faea8a120ee2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -458,7 +458,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
458 mtu = IPV6_MIN_MTU; 458 mtu = IPV6_MIN_MTU;
459 t->dev->mtu = mtu; 459 t->dev->mtu = mtu;
460 460
461 if ((len = sizeof (*ipv6h) + ipv6h->payload_len) > mtu) { 461 if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
462 rel_type = ICMPV6_PKT_TOOBIG; 462 rel_type = ICMPV6_PKT_TOOBIG;
463 rel_code = 0; 463 rel_code = 0;
464 rel_info = mtu; 464 rel_info = mtu;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 6c05c7978bef..807c021d64a2 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1252,8 +1252,7 @@ int igmp6_event_query(struct sk_buff *skb)
1252 } 1252 }
1253 } else { 1253 } else {
1254 for (ma = idev->mc_list; ma; ma=ma->next) { 1254 for (ma = idev->mc_list; ma; ma=ma->next) {
1255 if (group_type != IPV6_ADDR_ANY && 1255 if (!ipv6_addr_equal(group, &ma->mca_addr))
1256 !ipv6_addr_equal(group, &ma->mca_addr))
1257 continue; 1256 continue;
1258 spin_lock_bh(&ma->mca_lock); 1257 spin_lock_bh(&ma->mca_lock);
1259 if (ma->mca_flags & MAF_TIMER_RUNNING) { 1258 if (ma->mca_flags & MAF_TIMER_RUNNING) {
@@ -1268,11 +1267,10 @@ int igmp6_event_query(struct sk_buff *skb)
1268 ma->mca_flags &= ~MAF_GSQUERY; 1267 ma->mca_flags &= ~MAF_GSQUERY;
1269 } 1268 }
1270 if (!(ma->mca_flags & MAF_GSQUERY) || 1269 if (!(ma->mca_flags & MAF_GSQUERY) ||
1271 mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) 1270 mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
1272 igmp6_group_queried(ma, max_delay); 1271 igmp6_group_queried(ma, max_delay);
1273 spin_unlock_bh(&ma->mca_lock); 1272 spin_unlock_bh(&ma->mca_lock);
1274 if (group_type != IPV6_ADDR_ANY) 1273 break;
1275 break;
1276 } 1274 }
1277 } 1275 }
1278 read_unlock_bh(&idev->lock); 1276 read_unlock_bh(&idev->lock);
@@ -1351,7 +1349,7 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1351 * in all filters 1349 * in all filters
1352 */ 1350 */
1353 if (psf->sf_count[MCAST_INCLUDE]) 1351 if (psf->sf_count[MCAST_INCLUDE])
1354 return 0; 1352 return type == MLD2_MODE_IS_INCLUDE;
1355 return pmc->mca_sfcount[MCAST_EXCLUDE] == 1353 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1356 psf->sf_count[MCAST_EXCLUDE]; 1354 psf->sf_count[MCAST_EXCLUDE];
1357 } 1355 }
@@ -1966,7 +1964,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc)
1966 1964
1967static int sf_setstate(struct ifmcaddr6 *pmc) 1965static int sf_setstate(struct ifmcaddr6 *pmc)
1968{ 1966{
1969 struct ip6_sf_list *psf; 1967 struct ip6_sf_list *psf, *dpsf;
1970 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; 1968 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
1971 int qrv = pmc->idev->mc_qrv; 1969 int qrv = pmc->idev->mc_qrv;
1972 int new_in, rv; 1970 int new_in, rv;
@@ -1978,8 +1976,48 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
1978 !psf->sf_count[MCAST_INCLUDE]; 1976 !psf->sf_count[MCAST_INCLUDE];
1979 } else 1977 } else
1980 new_in = psf->sf_count[MCAST_INCLUDE] != 0; 1978 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
1981 if (new_in != psf->sf_oldin) { 1979 if (new_in) {
1982 psf->sf_crcount = qrv; 1980 if (!psf->sf_oldin) {
1981 struct ip6_sf_list *prev = NULL;
1982
1983 for (dpsf=pmc->mca_tomb; dpsf;
1984 dpsf=dpsf->sf_next) {
1985 if (ipv6_addr_equal(&dpsf->sf_addr,
1986 &psf->sf_addr))
1987 break;
1988 prev = dpsf;
1989 }
1990 if (dpsf) {
1991 if (prev)
1992 prev->sf_next = dpsf->sf_next;
1993 else
1994 pmc->mca_tomb = dpsf->sf_next;
1995 kfree(dpsf);
1996 }
1997 psf->sf_crcount = qrv;
1998 rv++;
1999 }
2000 } else if (psf->sf_oldin) {
2001 psf->sf_crcount = 0;
2002 /*
2003 * add or update "delete" records if an active filter
2004 * is now inactive
2005 */
2006 for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
2007 if (ipv6_addr_equal(&dpsf->sf_addr,
2008 &psf->sf_addr))
2009 break;
2010 if (!dpsf) {
2011 dpsf = (struct ip6_sf_list *)
2012 kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2013 if (!dpsf)
2014 continue;
2015 *dpsf = *psf;
2016 /* pmc->mca_lock held by callers */
2017 dpsf->sf_next = pmc->mca_tomb;
2018 pmc->mca_tomb = dpsf;
2019 }
2020 dpsf->sf_crcount = qrv;
1983 rv++; 2021 rv++;
1984 } 2022 }
1985 } 2023 }
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 847068fd3367..74ff56c322f4 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -978,6 +978,13 @@ do_replace(void __user *user, unsigned int len)
978 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 978 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
979 return -EFAULT; 979 return -EFAULT;
980 980
981 /* overflow check */
982 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
983 SMP_CACHE_BYTES)
984 return -ENOMEM;
985 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
986 return -ENOMEM;
987
981 newinfo = xt_alloc_table_info(tmp.size); 988 newinfo = xt_alloc_table_info(tmp.size);
982 if (!newinfo) 989 if (!newinfo)
983 return -ENOMEM; 990 return -ENOMEM;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index c745717b4ce2..0e6d1d4bbd5c 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -160,6 +160,8 @@ static void send_reset(struct sk_buff *oldskb)
160 csum_partial((char *)tcph, 160 csum_partial((char *)tcph,
161 sizeof(struct tcphdr), 0)); 161 sizeof(struct tcphdr), 0));
162 162
163 nf_ct_attach(nskb, oldskb);
164
163 NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev, 165 NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
164 dst_output); 166 dst_output);
165} 167}
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c
index afe1cc4c18a5..3d39ec924041 100644
--- a/net/ipv6/netfilter/ip6t_policy.c
+++ b/net/ipv6/netfilter/ip6t_policy.c
@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL");
26static inline int 26static inline int
27match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e) 27match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e)
28{ 28{
29#define MATCH_ADDR(x,y,z) (!e->match.x || \ 29#define MATCH_ADDR(x,y,z) (!e->match.x || \
30 ((ip6_masked_addrcmp((z), &e->x, &e->y)) == 0) ^ e->invert.x) 30 ((!ip6_masked_addrcmp(&e->x.a6, &e->y.a6, z)) \
31 ^ e->invert.x))
31#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) 32#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
32 33
33 return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) && 34 return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) &&
@@ -91,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ip6t_policy_info *info)
91 return 0; 92 return 0;
92 } 93 }
93 94
94 return strict ? 1 : 0; 95 return strict ? i == info->len : 0;
95} 96}
96 97
97static int match(const struct sk_buff *skb, 98static int match(const struct sk_buff *skb,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 50a13e75d70e..4238b1ed8860 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
38 int res = 0; 38 int res = 0;
39 int cpu; 39 int cpu;
40 40
41 for (cpu=0; cpu<NR_CPUS; cpu++) 41 for_each_cpu(cpu)
42 res += proto->stats[cpu].inuse; 42 res += proto->stats[cpu].inuse;
43 43
44 return res; 44 return res;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 66f1d12ea578..ae20a0ec9bd8 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -35,7 +35,6 @@
35#include <linux/skbuff.h> 35#include <linux/skbuff.h>
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/ioctls.h> 37#include <asm/ioctls.h>
38#include <asm/bug.h>
39 38
40#include <net/ip.h> 39#include <net/ip.h>
41#include <net/sock.h> 40#include <net/sock.h>
@@ -804,10 +803,7 @@ back_from_confirm:
804 err = rawv6_push_pending_frames(sk, &fl, rp); 803 err = rawv6_push_pending_frames(sk, &fl, rp);
805 } 804 }
806done: 805done:
807 ip6_dst_store(sk, dst, 806 dst_release(dst);
808 ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
809 &np->daddr : NULL);
810
811 release_sock(sk); 807 release_sock(sk);
812out: 808out:
813 fl6_sock_release(flowlabel); 809 fl6_sock_release(flowlabel);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 66d04004afda..ca9cf6853755 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -515,6 +515,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
515done: 515done:
516 if (opt && opt != np->opt) 516 if (opt && opt != np->opt)
517 sock_kfree_s(sk, opt, opt->tot_len); 517 sock_kfree_s(sk, opt, opt->tot_len);
518 dst_release(dst);
518 return err; 519 return err;
519} 520}
520 521
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 69bd957380e7..91cce8b2d7a5 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -11,7 +11,6 @@
11 * 11 *
12 */ 12 */
13 13
14#include <asm/bug.h>
15#include <linux/compiler.h> 14#include <linux/compiler.h>
16#include <linux/config.h> 15#include <linux/config.h>
17#include <linux/netdevice.h> 16#include <linux/netdevice.h>
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 890bac0d4a56..e3debbdb67f5 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -343,12 +343,12 @@ static void irda_task_timer_expired(void *data)
343static void irda_device_setup(struct net_device *dev) 343static void irda_device_setup(struct net_device *dev)
344{ 344{
345 dev->hard_header_len = 0; 345 dev->hard_header_len = 0;
346 dev->addr_len = 0; 346 dev->addr_len = LAP_ALEN;
347 347
348 dev->type = ARPHRD_IRDA; 348 dev->type = ARPHRD_IRDA;
349 dev->tx_queue_len = 8; /* Window size + 1 s-frame */ 349 dev->tx_queue_len = 8; /* Window size + 1 s-frame */
350 350
351 memset(dev->broadcast, 0xff, 4); 351 memset(dev->broadcast, 0xff, LAP_ALEN);
352 352
353 dev->mtu = 2048; 353 dev->mtu = 2048;
354 dev->flags = IFF_NOARP; 354 dev->flags = IFF_NOARP;
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index 07ec326c71f5..f65c7a83bc5c 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -696,7 +696,7 @@ irnet_daddr_to_dname(irnet_socket * self)
696 { 696 {
697 /* Yes !!! Get it.. */ 697 /* Yes !!! Get it.. */
698 strlcpy(self->rname, discoveries[i].info, sizeof(self->rname)); 698 strlcpy(self->rname, discoveries[i].info, sizeof(self->rname));
699 self->rname[NICKNAME_MAX_LEN + 1] = '\0'; 699 self->rname[sizeof(self->rname) - 1] = '\0';
700 DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n", 700 DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n",
701 self->daddr, self->rname); 701 self->daddr, self->rname);
702 kfree(discoveries); 702 kfree(discoveries);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 43f1ce74187d..b2d4d1dd2116 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1423,7 +1423,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
1423 1423
1424 if (err < 0) { 1424 if (err < 0) {
1425 x->km.state = XFRM_STATE_DEAD; 1425 x->km.state = XFRM_STATE_DEAD;
1426 xfrm_state_put(x); 1426 __xfrm_state_put(x);
1427 goto out; 1427 goto out;
1428 } 1428 }
1429 1429
@@ -1620,6 +1620,7 @@ static int key_notify_sa_flush(struct km_event *c)
1620 return -ENOBUFS; 1620 return -ENOBUFS;
1621 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); 1621 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
1622 hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); 1622 hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
1623 hdr->sadb_msg_type = SADB_FLUSH;
1623 hdr->sadb_msg_seq = c->seq; 1624 hdr->sadb_msg_seq = c->seq;
1624 hdr->sadb_msg_pid = c->pid; 1625 hdr->sadb_msg_pid = c->pid;
1625 hdr->sadb_msg_version = PF_KEY_V2; 1626 hdr->sadb_msg_version = PF_KEY_V2;
@@ -2385,6 +2386,7 @@ static int key_notify_policy_flush(struct km_event *c)
2385 if (!skb_out) 2386 if (!skb_out)
2386 return -ENOBUFS; 2387 return -ENOBUFS;
2387 hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); 2388 hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
2389 hdr->sadb_msg_type = SADB_X_SPDFLUSH;
2388 hdr->sadb_msg_seq = c->seq; 2390 hdr->sadb_msg_seq = c->seq;
2389 hdr->sadb_msg_pid = c->pid; 2391 hdr->sadb_msg_pid = c->pid;
2390 hdr->sadb_msg_version = PF_KEY_V2; 2392 hdr->sadb_msg_version = PF_KEY_V2;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 99c0a0fa4a97..a8e5544da93e 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -102,8 +102,6 @@ config NF_CT_NETLINK
102 help 102 help
103 This option enables support for a netlink-based userspace interface 103 This option enables support for a netlink-based userspace interface
104 104
105endmenu
106
107config NETFILTER_XTABLES 105config NETFILTER_XTABLES
108 tristate "Netfilter Xtables support (required for ip_tables)" 106 tristate "Netfilter Xtables support (required for ip_tables)"
109 help 107 help
@@ -128,7 +126,7 @@ config NETFILTER_XT_TARGET_CONNMARK
128 tristate '"CONNMARK" target support' 126 tristate '"CONNMARK" target support'
129 depends on NETFILTER_XTABLES 127 depends on NETFILTER_XTABLES
130 depends on IP_NF_MANGLE || IP6_NF_MANGLE 128 depends on IP_NF_MANGLE || IP6_NF_MANGLE
131 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4) 129 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK)
132 help 130 help
133 This option adds a `CONNMARK' target, which allows one to manipulate 131 This option adds a `CONNMARK' target, which allows one to manipulate
134 the connection mark value. Similar to the MARK target, but 132 the connection mark value. Similar to the MARK target, but
@@ -189,7 +187,7 @@ config NETFILTER_XT_MATCH_COMMENT
189config NETFILTER_XT_MATCH_CONNBYTES 187config NETFILTER_XT_MATCH_CONNBYTES
190 tristate '"connbytes" per-connection counter match support' 188 tristate '"connbytes" per-connection counter match support'
191 depends on NETFILTER_XTABLES 189 depends on NETFILTER_XTABLES
192 depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || NF_CT_ACCT 190 depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK)
193 help 191 help
194 This option adds a `connbytes' match, which allows you to match the 192 This option adds a `connbytes' match, which allows you to match the
195 number of bytes and/or packets for each direction within a connection. 193 number of bytes and/or packets for each direction within a connection.
@@ -200,7 +198,7 @@ config NETFILTER_XT_MATCH_CONNBYTES
200config NETFILTER_XT_MATCH_CONNMARK 198config NETFILTER_XT_MATCH_CONNMARK
201 tristate '"connmark" connection mark match support' 199 tristate '"connmark" connection mark match support'
202 depends on NETFILTER_XTABLES 200 depends on NETFILTER_XTABLES
203 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || NF_CONNTRACK_MARK 201 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK)
204 help 202 help
205 This option adds a `connmark' match, which allows you to match the 203 This option adds a `connmark' match, which allows you to match the
206 connection mark value previously set for the session by `CONNMARK'. 204 connection mark value previously set for the session by `CONNMARK'.
@@ -361,3 +359,5 @@ config NETFILTER_XT_MATCH_TCPMSS
361 359
362 To compile it as a module, choose M here. If unsure, say N. 360 To compile it as a module, choose M here. If unsure, say N.
363 361
362endmenu
363
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 62bb509f05d4..d622ddf08bb0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -188,7 +188,7 @@ extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
188struct nf_conntrack_protocol * 188struct nf_conntrack_protocol *
189__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) 189__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
190{ 190{
191 if (unlikely(nf_ct_protos[l3proto] == NULL)) 191 if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
192 return &nf_conntrack_generic_protocol; 192 return &nf_conntrack_generic_protocol;
193 193
194 return nf_ct_protos[l3proto][protocol]; 194 return nf_ct_protos[l3proto][protocol];
@@ -1556,6 +1556,8 @@ void nf_conntrack_cleanup(void)
1556{ 1556{
1557 int i; 1557 int i;
1558 1558
1559 ip_ct_attach = NULL;
1560
1559 /* This makes sure all current packets have passed through 1561 /* This makes sure all current packets have passed through
1560 netfilter framework. Roll on, two-stage module 1562 netfilter framework. Roll on, two-stage module
1561 delete... */ 1563 delete... */
@@ -1715,6 +1717,9 @@ int __init nf_conntrack_init(void)
1715 nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto; 1717 nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
1716 write_unlock_bh(&nf_conntrack_lock); 1718 write_unlock_bh(&nf_conntrack_lock);
1717 1719
1720 /* For use by REJECT target */
1721 ip_ct_attach = __nf_conntrack_attach;
1722
1718 /* Set up fake conntrack: 1723 /* Set up fake conntrack:
1719 - to never be deleted, not in any hashes */ 1724 - to never be deleted, not in any hashes */
1720 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1725 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index ab0c920f0d30..6f210f399762 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -657,8 +657,6 @@ static int __init init(void)
657 /* FIXME should be configurable whether IPv4 and IPv6 FTP connections 657 /* FIXME should be configurable whether IPv4 and IPv6 FTP connections
658 are tracked or not - YK */ 658 are tracked or not - YK */
659 for (i = 0; i < ports_c; i++) { 659 for (i = 0; i < ports_c; i++) {
660 memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper));
661
662 ftp[i][0].tuple.src.l3num = PF_INET; 660 ftp[i][0].tuple.src.l3num = PF_INET;
663 ftp[i][1].tuple.src.l3num = PF_INET6; 661 ftp[i][1].tuple.src.l3num = PF_INET6;
664 for (j = 0; j < 2; j++) { 662 for (j = 0; j < 2; j++) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 73ab16bc7d40..9ff3463037e1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1232,7 +1232,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1232 1232
1233 b = skb->tail; 1233 b = skb->tail;
1234 1234
1235 type |= NFNL_SUBSYS_CTNETLINK << 8; 1235 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1236 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); 1236 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
1237 nfmsg = NLMSG_DATA(nlh); 1237 nfmsg = NLMSG_DATA(nlh);
1238 1238
@@ -1589,6 +1589,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
1589}; 1589};
1590 1590
1591MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 1591MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
1592MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
1592 1593
1593static int __init ctnetlink_init(void) 1594static int __init ctnetlink_init(void)
1594{ 1595{
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index df99138c3b3b..6492ed66fb3c 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -864,7 +864,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff)
864{ 864{
865 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, 865 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
866 skb->len - dataoff, IPPROTO_TCP, 866 skb->len - dataoff, IPPROTO_TCP,
867 skb->ip_summed == CHECKSUM_HW ? skb->csum 867 skb->ip_summed == CHECKSUM_HW
868 ? csum_sub(skb->csum,
869 skb_checksum(skb, 0, dataoff, 0))
868 : skb_checksum(skb, dataoff, skb->len - dataoff, 870 : skb_checksum(skb, dataoff, skb->len - dataoff,
869 0)); 871 0));
870} 872}
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 4264dd079a16..831d206344e0 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -161,7 +161,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff)
161{ 161{
162 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, 162 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
163 skb->len - dataoff, IPPROTO_UDP, 163 skb->len - dataoff, IPPROTO_UDP,
164 skb->ip_summed == CHECKSUM_HW ? skb->csum 164 skb->ip_summed == CHECKSUM_HW
165 ? csum_sub(skb->csum,
166 skb_checksum(skb, 0, dataoff, 0))
165 : skb_checksum(skb, dataoff, skb->len - dataoff, 167 : skb_checksum(skb, dataoff, skb->len - dataoff,
166 0)); 168 0));
167} 169}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e10512e229b6..3b3c781b40c0 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -37,7 +37,7 @@
37#include "../bridge/br_private.h" 37#include "../bridge/br_private.h"
38#endif 38#endif
39 39
40#define NFULNL_NLBUFSIZ_DEFAULT 4096 40#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
41#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ 41#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
42#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 42#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
43 43
@@ -314,24 +314,28 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
314 unsigned int pkt_size) 314 unsigned int pkt_size)
315{ 315{
316 struct sk_buff *skb; 316 struct sk_buff *skb;
317 unsigned int n;
317 318
318 UDEBUG("entered (%u, %u)\n", inst_size, pkt_size); 319 UDEBUG("entered (%u, %u)\n", inst_size, pkt_size);
319 320
320 /* alloc skb which should be big enough for a whole multipart 321 /* alloc skb which should be big enough for a whole multipart
321 * message. WARNING: has to be <= 128k due to slab restrictions */ 322 * message. WARNING: has to be <= 128k due to slab restrictions */
322 323
323 skb = alloc_skb(inst_size, GFP_ATOMIC); 324 n = max(inst_size, pkt_size);
325 skb = alloc_skb(n, GFP_ATOMIC);
324 if (!skb) { 326 if (!skb) {
325 PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", 327 PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
326 inst_size); 328 inst_size);
327 329
328 /* try to allocate only as much as we need for current 330 if (n > pkt_size) {
329 * packet */ 331 /* try to allocate only as much as we need for current
332 * packet */
330 333
331 skb = alloc_skb(pkt_size, GFP_ATOMIC); 334 skb = alloc_skb(pkt_size, GFP_ATOMIC);
332 if (!skb) 335 if (!skb)
333 PRINTR("nfnetlink_log: can't even alloc %u bytes\n", 336 PRINTR("nfnetlink_log: can't even alloc %u "
334 pkt_size); 337 "bytes\n", pkt_size);
338 }
335 } 339 }
336 340
337 return skb; 341 return skb;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 18ed9c5d209c..cac38b2e147a 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -825,7 +825,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
825 } 825 }
826 826
827 if (nfqa[NFQA_MARK-1]) 827 if (nfqa[NFQA_MARK-1])
828 skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1])); 828 entry->skb->nfmark = ntohl(*(u_int32_t *)
829 NFA_DATA(nfqa[NFQA_MARK-1]));
829 830
830 issue_verdict(entry, verdict); 831 issue_verdict(entry, verdict);
831 instance_put(queue); 832 instance_put(queue);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2101b45d2ec6..6b9772d95872 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -702,7 +702,8 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
702 * 0: continue 702 * 0: continue
703 * 1: repeat lookup - reference dropped while waiting for socket memory. 703 * 1: repeat lookup - reference dropped while waiting for socket memory.
704 */ 704 */
705int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo) 705int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
706 long timeo, struct sock *ssk)
706{ 707{
707 struct netlink_sock *nlk; 708 struct netlink_sock *nlk;
708 709
@@ -712,7 +713,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t
712 test_bit(0, &nlk->state)) { 713 test_bit(0, &nlk->state)) {
713 DECLARE_WAITQUEUE(wait, current); 714 DECLARE_WAITQUEUE(wait, current);
714 if (!timeo) { 715 if (!timeo) {
715 if (!nlk->pid) 716 if (!ssk || nlk_sk(ssk)->pid == 0)
716 netlink_overrun(sk); 717 netlink_overrun(sk);
717 sock_put(sk); 718 sock_put(sk);
718 kfree_skb(skb); 719 kfree_skb(skb);
@@ -797,7 +798,7 @@ retry:
797 kfree_skb(skb); 798 kfree_skb(skb);
798 return PTR_ERR(sk); 799 return PTR_ERR(sk);
799 } 800 }
800 err = netlink_attachskb(sk, skb, nonblock, timeo); 801 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
801 if (err == 1) 802 if (err == 1)
802 goto retry; 803 goto retry;
803 if (err) 804 if (err)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 4ae1538c54a9..43e72419c868 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -238,7 +238,7 @@ int genl_register_family(struct genl_family *family)
238 sizeof(struct nlattr *), GFP_KERNEL); 238 sizeof(struct nlattr *), GFP_KERNEL);
239 if (family->attrbuf == NULL) { 239 if (family->attrbuf == NULL) {
240 err = -ENOMEM; 240 err = -ENOMEM;
241 goto errout; 241 goto errout_locked;
242 } 242 }
243 } else 243 } else
244 family->attrbuf = NULL; 244 family->attrbuf = NULL;
@@ -288,7 +288,7 @@ int genl_unregister_family(struct genl_family *family)
288 return -ENOENT; 288 return -ENOENT;
289} 289}
290 290
291static inline int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 291static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
292 int *errp) 292 int *errp)
293{ 293{
294 struct genl_ops *ops; 294 struct genl_ops *ops;
@@ -375,7 +375,7 @@ static void genl_rcv(struct sock *sk, int len)
375 do { 375 do {
376 if (genl_trylock()) 376 if (genl_trylock())
377 return; 377 return;
378 netlink_run_queue(sk, &qlen, &genl_rcv_msg); 378 netlink_run_queue(sk, &qlen, genl_rcv_msg);
379 genl_unlock(); 379 genl_unlock();
380 } while (qlen && genl_sock && genl_sock->sk_receive_queue.qlen); 380 } while (qlen && genl_sock && genl_sock->sk_receive_queue.qlen);
381} 381}
@@ -549,10 +549,8 @@ static int __init genl_init(void)
549 netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); 549 netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
550 genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID, 550 genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID,
551 genl_rcv, THIS_MODULE); 551 genl_rcv, THIS_MODULE);
552 if (genl_sock == NULL) { 552 if (genl_sock == NULL)
553 panic("GENL: Cannot initialize generic netlink\n"); 553 panic("GENL: Cannot initialize generic netlink\n");
554 return -ENOMEM;
555 }
556 554
557 return 0; 555 return 0;
558 556
@@ -560,7 +558,6 @@ errout_register:
560 genl_unregister_family(&genl_ctrl); 558 genl_unregister_family(&genl_ctrl);
561errout: 559errout:
562 panic("GENL: Cannot register controller: %d\n", err); 560 panic("GENL: Cannot register controller: %d\n", err);
563 return err;
564} 561}
565 562
566subsys_initcall(genl_init); 563subsys_initcall(genl_init);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ee93abc71cb8..9db7dbdb16e6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -365,7 +365,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
365 */ 365 */
366 366
367 err = -EMSGSIZE; 367 err = -EMSGSIZE;
368 if(len>dev->mtu+dev->hard_header_len) 368 if (len > dev->mtu + dev->hard_header_len)
369 goto out_unlock; 369 goto out_unlock;
370 370
371 err = -ENOBUFS; 371 err = -ENOBUFS;
@@ -935,7 +935,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
935 * Check legality 935 * Check legality
936 */ 936 */
937 937
938 if(addr_len!=sizeof(struct sockaddr)) 938 if (addr_len != sizeof(struct sockaddr))
939 return -EINVAL; 939 return -EINVAL;
940 strlcpy(name,uaddr->sa_data,sizeof(name)); 940 strlcpy(name,uaddr->sa_data,sizeof(name));
941 941
@@ -1092,7 +1092,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1092 * retries. 1092 * retries.
1093 */ 1093 */
1094 1094
1095 if(skb==NULL) 1095 if (skb == NULL)
1096 goto out; 1096 goto out;
1097 1097
1098 /* 1098 /*
@@ -1392,8 +1392,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1392 if (level != SOL_PACKET) 1392 if (level != SOL_PACKET)
1393 return -ENOPROTOOPT; 1393 return -ENOPROTOOPT;
1394 1394
1395 if (get_user(len,optlen)) 1395 if (get_user(len, optlen))
1396 return -EFAULT; 1396 return -EFAULT;
1397 1397
1398 if (len < 0) 1398 if (len < 0)
1399 return -EINVAL; 1399 return -EINVAL;
@@ -1419,9 +1419,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1419 return -ENOPROTOOPT; 1419 return -ENOPROTOOPT;
1420 } 1420 }
1421 1421
1422 if (put_user(len, optlen)) 1422 if (put_user(len, optlen))
1423 return -EFAULT; 1423 return -EFAULT;
1424 return 0; 1424 return 0;
1425} 1425}
1426 1426
1427 1427
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 4aa6fc60357c..cb78b50868ee 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -257,20 +257,26 @@ int sctp_rcv(struct sk_buff *skb)
257 */ 257 */
258 sctp_bh_lock_sock(sk); 258 sctp_bh_lock_sock(sk);
259 259
260 /* It is possible that the association could have moved to a different
261 * socket if it is peeled off. If so, update the sk.
262 */
263 if (sk != rcvr->sk) {
264 sctp_bh_lock_sock(rcvr->sk);
265 sctp_bh_unlock_sock(sk);
266 sk = rcvr->sk;
267 }
268
260 if (sock_owned_by_user(sk)) 269 if (sock_owned_by_user(sk))
261 sk_add_backlog(sk, skb); 270 sk_add_backlog(sk, skb);
262 else 271 else
263 sctp_backlog_rcv(sk, skb); 272 sctp_backlog_rcv(sk, skb);
264 273
265 /* Release the sock and any reference counts we took in the 274 /* Release the sock and the sock ref we took in the lookup calls.
266 * lookup calls. 275 * The asoc/ep ref will be released in sctp_backlog_rcv.
267 */ 276 */
268 sctp_bh_unlock_sock(sk); 277 sctp_bh_unlock_sock(sk);
269 if (asoc)
270 sctp_association_put(asoc);
271 else
272 sctp_endpoint_put(ep);
273 sock_put(sk); 278 sock_put(sk);
279
274 return ret; 280 return ret;
275 281
276discard_it: 282discard_it:
@@ -296,12 +302,50 @@ discard_release:
296int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) 302int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
297{ 303{
298 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 304 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
299 struct sctp_inq *inqueue = &chunk->rcvr->inqueue; 305 struct sctp_inq *inqueue = NULL;
300 306 struct sctp_ep_common *rcvr = NULL;
301 sctp_inq_push(inqueue, chunk); 307
308 rcvr = chunk->rcvr;
309
310 BUG_TRAP(rcvr->sk == sk);
311
312 if (rcvr->dead) {
313 sctp_chunk_free(chunk);
314 } else {
315 inqueue = &chunk->rcvr->inqueue;
316 sctp_inq_push(inqueue, chunk);
317 }
318
319 /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */
320 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
321 sctp_association_put(sctp_assoc(rcvr));
322 else
323 sctp_endpoint_put(sctp_ep(rcvr));
324
302 return 0; 325 return 0;
303} 326}
304 327
328void sctp_backlog_migrate(struct sctp_association *assoc,
329 struct sock *oldsk, struct sock *newsk)
330{
331 struct sk_buff *skb;
332 struct sctp_chunk *chunk;
333
334 skb = oldsk->sk_backlog.head;
335 oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
336 while (skb != NULL) {
337 struct sk_buff *next = skb->next;
338
339 chunk = SCTP_INPUT_CB(skb)->chunk;
340 skb->next = NULL;
341 if (&assoc->base == chunk->rcvr)
342 sk_add_backlog(newsk, skb);
343 else
344 sk_add_backlog(oldsk, skb);
345 skb = next;
346 }
347}
348
305/* Handle icmp frag needed error. */ 349/* Handle icmp frag needed error. */
306void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, 350void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
307 struct sctp_transport *t, __u32 pmtu) 351 struct sctp_transport *t, __u32 pmtu)
@@ -544,10 +588,16 @@ int sctp_rcv_ootb(struct sk_buff *skb)
544 sctp_errhdr_t *err; 588 sctp_errhdr_t *err;
545 589
546 ch = (sctp_chunkhdr_t *) skb->data; 590 ch = (sctp_chunkhdr_t *) skb->data;
547 ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
548 591
549 /* Scan through all the chunks in the packet. */ 592 /* Scan through all the chunks in the packet. */
550 while (ch_end > (__u8 *)ch && ch_end < skb->tail) { 593 do {
594 /* Break out if chunk length is less then minimal. */
595 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
596 break;
597
598 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
599 if (ch_end > skb->tail)
600 break;
551 601
552 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the 602 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
553 * receiver MUST silently discard the OOTB packet and take no 603 * receiver MUST silently discard the OOTB packet and take no
@@ -578,8 +628,7 @@ int sctp_rcv_ootb(struct sk_buff *skb)
578 } 628 }
579 629
580 ch = (sctp_chunkhdr_t *) ch_end; 630 ch = (sctp_chunkhdr_t *) ch_end;
581 ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length)); 631 } while (ch_end < skb->tail);
582 }
583 632
584 return 0; 633 return 0;
585 634
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 2d33922c044b..297b8951463e 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -73,8 +73,10 @@ void sctp_inq_free(struct sctp_inq *queue)
73 /* If there is a packet which is currently being worked on, 73 /* If there is a packet which is currently being worked on,
74 * free it as well. 74 * free it as well.
75 */ 75 */
76 if (queue->in_progress) 76 if (queue->in_progress) {
77 sctp_chunk_free(queue->in_progress); 77 sctp_chunk_free(queue->in_progress);
78 queue->in_progress = NULL;
79 }
78 80
79 if (queue->malloced) { 81 if (queue->malloced) {
80 /* Dump the master memory segment. */ 82 /* Dump the master memory segment. */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a40991ef72c9..437cba7260a4 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -608,7 +608,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
608 * When a Fast Retransmit is being performed the sender SHOULD 608 * When a Fast Retransmit is being performed the sender SHOULD
609 * ignore the value of cwnd and SHOULD NOT delay retransmission. 609 * ignore the value of cwnd and SHOULD NOT delay retransmission.
610 */ 610 */
611 if (!chunk->fast_retransmit) 611 if (chunk->fast_retransmit <= 0)
612 if (transport->flight_size >= transport->cwnd) { 612 if (transport->flight_size >= transport->cwnd) {
613 retval = SCTP_XMIT_RWND_FULL; 613 retval = SCTP_XMIT_RWND_FULL;
614 goto finish; 614 goto finish;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index efb72faba20c..f148f9576dd2 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -406,7 +406,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
406 * chunks that are not yet acked should be added to the 406 * chunks that are not yet acked should be added to the
407 * retransmit queue. 407 * retransmit queue.
408 */ 408 */
409 if ((fast_retransmit && chunk->fast_retransmit) || 409 if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
410 (!fast_retransmit && !chunk->tsn_gap_acked)) { 410 (!fast_retransmit && !chunk->tsn_gap_acked)) {
411 /* RFC 2960 6.2.1 Processing a Received SACK 411 /* RFC 2960 6.2.1 Processing a Received SACK
412 * 412 *
@@ -603,7 +603,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
603 /* Mark the chunk as ineligible for fast retransmit 603 /* Mark the chunk as ineligible for fast retransmit
604 * after it is retransmitted. 604 * after it is retransmitted.
605 */ 605 */
606 chunk->fast_retransmit = 0; 606 if (chunk->fast_retransmit > 0)
607 chunk->fast_retransmit = -1;
607 608
608 *start_timer = 1; 609 *start_timer = 1;
609 q->empty = 0; 610 q->empty = 0;
@@ -621,7 +622,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
621 list_for_each(lchunk1, lqueue) { 622 list_for_each(lchunk1, lqueue) {
622 chunk1 = list_entry(lchunk1, struct sctp_chunk, 623 chunk1 = list_entry(lchunk1, struct sctp_chunk,
623 transmitted_list); 624 transmitted_list);
624 chunk1->fast_retransmit = 0; 625 if (chunk1->fast_retransmit > 0)
626 chunk1->fast_retransmit = -1;
625 } 627 }
626 } 628 }
627 } 629 }
@@ -1562,11 +1564,11 @@ static void sctp_mark_missing(struct sctp_outq *q,
1562 /* 1564 /*
1563 * M4) If any DATA chunk is found to have a 1565 * M4) If any DATA chunk is found to have a
1564 * 'TSN.Missing.Report' 1566 * 'TSN.Missing.Report'
1565 * value larger than or equal to 4, mark that chunk for 1567 * value larger than or equal to 3, mark that chunk for
1566 * retransmission and start the fast retransmit procedure. 1568 * retransmission and start the fast retransmit procedure.
1567 */ 1569 */
1568 1570
1569 if (chunk->tsn_missing_report >= 4) { 1571 if (chunk->tsn_missing_report >= 3) {
1570 chunk->fast_retransmit = 1; 1572 chunk->fast_retransmit = 1;
1571 do_fast_retransmit = 1; 1573 do_fast_retransmit = 1;
1572 } 1574 }
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 6e4dc28874d7..d47a52c303a8 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -176,7 +176,7 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
176 176
177static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) 177static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
178{ 178{
179 if (*pos > sctp_ep_hashsize) 179 if (*pos >= sctp_ep_hashsize)
180 return NULL; 180 return NULL;
181 181
182 if (*pos < 0) 182 if (*pos < 0)
@@ -185,8 +185,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
185 if (*pos == 0) 185 if (*pos == 0)
186 seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); 186 seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n");
187 187
188 ++*pos;
189
190 return (void *)pos; 188 return (void *)pos;
191} 189}
192 190
@@ -198,11 +196,9 @@ static void sctp_eps_seq_stop(struct seq_file *seq, void *v)
198 196
199static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) 197static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
200{ 198{
201 if (*pos > sctp_ep_hashsize) 199 if (++*pos >= sctp_ep_hashsize)
202 return NULL; 200 return NULL;
203 201
204 ++*pos;
205
206 return pos; 202 return pos;
207} 203}
208 204
@@ -214,19 +210,19 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
214 struct sctp_ep_common *epb; 210 struct sctp_ep_common *epb;
215 struct sctp_endpoint *ep; 211 struct sctp_endpoint *ep;
216 struct sock *sk; 212 struct sock *sk;
217 int hash = *(int *)v; 213 int hash = *(loff_t *)v;
218 214
219 if (hash > sctp_ep_hashsize) 215 if (hash >= sctp_ep_hashsize)
220 return -ENOMEM; 216 return -ENOMEM;
221 217
222 head = &sctp_ep_hashtable[hash-1]; 218 head = &sctp_ep_hashtable[hash];
223 sctp_local_bh_disable(); 219 sctp_local_bh_disable();
224 read_lock(&head->lock); 220 read_lock(&head->lock);
225 for (epb = head->chain; epb; epb = epb->next) { 221 for (epb = head->chain; epb; epb = epb->next) {
226 ep = sctp_ep(epb); 222 ep = sctp_ep(epb);
227 sk = epb->sk; 223 sk = epb->sk;
228 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 224 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
229 sctp_sk(sk)->type, sk->sk_state, hash-1, 225 sctp_sk(sk)->type, sk->sk_state, hash,
230 epb->bind_addr.port, 226 epb->bind_addr.port,
231 sock_i_uid(sk), sock_i_ino(sk)); 227 sock_i_uid(sk), sock_i_ino(sk));
232 228
@@ -283,7 +279,7 @@ void sctp_eps_proc_exit(void)
283 279
284static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) 280static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
285{ 281{
286 if (*pos > sctp_assoc_hashsize) 282 if (*pos >= sctp_assoc_hashsize)
287 return NULL; 283 return NULL;
288 284
289 if (*pos < 0) 285 if (*pos < 0)
@@ -293,8 +289,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
293 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " 289 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT "
294 "RPORT LADDRS <-> RADDRS\n"); 290 "RPORT LADDRS <-> RADDRS\n");
295 291
296 ++*pos;
297
298 return (void *)pos; 292 return (void *)pos;
299} 293}
300 294
@@ -306,11 +300,9 @@ static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
306 300
307static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) 301static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
308{ 302{
309 if (*pos > sctp_assoc_hashsize) 303 if (++*pos >= sctp_assoc_hashsize)
310 return NULL; 304 return NULL;
311 305
312 ++*pos;
313
314 return pos; 306 return pos;
315} 307}
316 308
@@ -321,12 +313,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
321 struct sctp_ep_common *epb; 313 struct sctp_ep_common *epb;
322 struct sctp_association *assoc; 314 struct sctp_association *assoc;
323 struct sock *sk; 315 struct sock *sk;
324 int hash = *(int *)v; 316 int hash = *(loff_t *)v;
325 317
326 if (hash > sctp_assoc_hashsize) 318 if (hash >= sctp_assoc_hashsize)
327 return -ENOMEM; 319 return -ENOMEM;
328 320
329 head = &sctp_assoc_hashtable[hash-1]; 321 head = &sctp_assoc_hashtable[hash];
330 sctp_local_bh_disable(); 322 sctp_local_bh_disable();
331 read_lock(&head->lock); 323 read_lock(&head->lock);
332 for (epb = head->chain; epb; epb = epb->next) { 324 for (epb = head->chain; epb; epb = epb->next) {
@@ -335,7 +327,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
335 seq_printf(seq, 327 seq_printf(seq,
336 "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ", 328 "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ",
337 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 329 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
338 assoc->state, hash-1, assoc->assoc_id, 330 assoc->state, hash, assoc->assoc_id,
339 (sk->sk_rcvbuf - assoc->rwnd), 331 (sk->sk_rcvbuf - assoc->rwnd),
340 assoc->sndbuf_used, 332 assoc->sndbuf_used,
341 sock_i_uid(sk), sock_i_ino(sk), 333 sock_i_uid(sk), sock_i_ino(sk),
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 556c495c6922..5e0de3c0eead 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1275,7 +1275,12 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1275 unsigned int keylen; 1275 unsigned int keylen;
1276 char *key; 1276 char *key;
1277 1277
1278 headersize = sizeof(sctp_paramhdr_t) + SCTP_SECRET_SIZE; 1278 /* Header size is static data prior to the actual cookie, including
1279 * any padding.
1280 */
1281 headersize = sizeof(sctp_paramhdr_t) +
1282 (sizeof(struct sctp_signed_cookie) -
1283 sizeof(struct sctp_cookie));
1279 bodysize = sizeof(struct sctp_cookie) 1284 bodysize = sizeof(struct sctp_cookie)
1280 + ntohs(init_chunk->chunk_hdr->length) + addrs_len; 1285 + ntohs(init_chunk->chunk_hdr->length) + addrs_len;
1281 1286
@@ -1354,7 +1359,7 @@ struct sctp_association *sctp_unpack_cookie(
1354 struct sctp_signed_cookie *cookie; 1359 struct sctp_signed_cookie *cookie;
1355 struct sctp_cookie *bear_cookie; 1360 struct sctp_cookie *bear_cookie;
1356 int headersize, bodysize, fixed_size; 1361 int headersize, bodysize, fixed_size;
1357 __u8 digest[SCTP_SIGNATURE_SIZE]; 1362 __u8 *digest = ep->digest;
1358 struct scatterlist sg; 1363 struct scatterlist sg;
1359 unsigned int keylen, len; 1364 unsigned int keylen, len;
1360 char *key; 1365 char *key;
@@ -1362,7 +1367,12 @@ struct sctp_association *sctp_unpack_cookie(
1362 struct sk_buff *skb = chunk->skb; 1367 struct sk_buff *skb = chunk->skb;
1363 struct timeval tv; 1368 struct timeval tv;
1364 1369
1365 headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE; 1370 /* Header size is static data prior to the actual cookie, including
1371 * any padding.
1372 */
1373 headersize = sizeof(sctp_chunkhdr_t) +
1374 (sizeof(struct sctp_signed_cookie) -
1375 sizeof(struct sctp_cookie));
1366 bodysize = ntohs(chunk->chunk_hdr->length) - headersize; 1376 bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
1367 fixed_size = headersize + sizeof(struct sctp_cookie); 1377 fixed_size = headersize + sizeof(struct sctp_cookie);
1368 1378
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b8b38aba92b3..8d1dc24bab4c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1300,7 +1300,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1300 "T1 INIT Timeout adjustment" 1300 "T1 INIT Timeout adjustment"
1301 " init_err_counter: %d" 1301 " init_err_counter: %d"
1302 " cycle: %d" 1302 " cycle: %d"
1303 " timeout: %d\n", 1303 " timeout: %ld\n",
1304 asoc->init_err_counter, 1304 asoc->init_err_counter,
1305 asoc->init_cycle, 1305 asoc->init_cycle,
1306 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]); 1306 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]);
@@ -1328,7 +1328,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1328 SCTP_DEBUG_PRINTK( 1328 SCTP_DEBUG_PRINTK(
1329 "T1 COOKIE Timeout adjustment" 1329 "T1 COOKIE Timeout adjustment"
1330 " init_err_counter: %d" 1330 " init_err_counter: %d"
1331 " timeout: %d\n", 1331 " timeout: %ld\n",
1332 asoc->init_err_counter, 1332 asoc->init_err_counter,
1333 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]); 1333 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
1334 1334
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 477d7f80dba6..2b9a832b29a7 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -884,7 +884,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
884{ 884{
885 struct sctp_transport *transport = (struct sctp_transport *) arg; 885 struct sctp_transport *transport = (struct sctp_transport *) arg;
886 886
887 if (asoc->overall_error_count > asoc->max_retrans) { 887 if (asoc->overall_error_count >= asoc->max_retrans) {
888 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 888 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
889 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 889 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
890 SCTP_U32(SCTP_ERROR_NO_ERROR)); 890 SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -2122,7 +2122,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
2122 struct sctp_bind_addr *bp; 2122 struct sctp_bind_addr *bp;
2123 int attempts = asoc->init_err_counter + 1; 2123 int attempts = asoc->init_err_counter + 1;
2124 2124
2125 if (attempts >= asoc->max_init_attempts) { 2125 if (attempts > asoc->max_init_attempts) {
2126 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2126 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
2127 SCTP_U32(SCTP_ERROR_STALE_COOKIE)); 2127 SCTP_U32(SCTP_ERROR_STALE_COOKIE));
2128 return SCTP_DISPOSITION_DELETE_TCB; 2128 return SCTP_DISPOSITION_DELETE_TCB;
@@ -3090,6 +3090,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3090 break; 3090 break;
3091 3091
3092 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 3092 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3093 if (ch_end > skb->tail)
3094 break;
3093 3095
3094 if (SCTP_CID_SHUTDOWN_ACK == ch->type) 3096 if (SCTP_CID_SHUTDOWN_ACK == ch->type)
3095 ootb_shut_ack = 1; 3097 ootb_shut_ack = 1;
@@ -4638,7 +4640,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
4638 4640
4639 SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); 4641 SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
4640 4642
4641 if (attempts < asoc->max_init_attempts) { 4643 if (attempts <= asoc->max_init_attempts) {
4642 bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; 4644 bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
4643 repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); 4645 repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
4644 if (!repl) 4646 if (!repl)
@@ -4695,7 +4697,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
4695 4697
4696 SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); 4698 SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
4697 4699
4698 if (attempts < asoc->max_init_attempts) { 4700 if (attempts <= asoc->max_init_attempts) {
4699 repl = sctp_make_cookie_echo(asoc, NULL); 4701 repl = sctp_make_cookie_echo(asoc, NULL);
4700 if (!repl) 4702 if (!repl)
4701 return SCTP_DISPOSITION_NOMEM; 4703 return SCTP_DISPOSITION_NOMEM;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c98ee375ba5e..0ea947eb6813 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2995,7 +2995,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
2995 sp->hbinterval = jiffies_to_msecs(sctp_hb_interval); 2995 sp->hbinterval = jiffies_to_msecs(sctp_hb_interval);
2996 sp->pathmaxrxt = sctp_max_retrans_path; 2996 sp->pathmaxrxt = sctp_max_retrans_path;
2997 sp->pathmtu = 0; // allow default discovery 2997 sp->pathmtu = 0; // allow default discovery
2998 sp->sackdelay = sctp_sack_timeout; 2998 sp->sackdelay = jiffies_to_msecs(sctp_sack_timeout);
2999 sp->param_flags = SPP_HB_ENABLE | 2999 sp->param_flags = SPP_HB_ENABLE |
3000 SPP_PMTUD_ENABLE | 3000 SPP_PMTUD_ENABLE |
3001 SPP_SACKDELAY_ENABLE; 3001 SPP_SACKDELAY_ENABLE;
@@ -5426,7 +5426,7 @@ out:
5426 return err; 5426 return err;
5427 5427
5428do_error: 5428do_error:
5429 if (asoc->init_err_counter + 1 >= asoc->max_init_attempts) 5429 if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
5430 err = -ETIMEDOUT; 5430 err = -ETIMEDOUT;
5431 else 5431 else
5432 err = -ECONNREFUSED; 5432 err = -ECONNREFUSED;
@@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5602 */ 5602 */
5603 newsp->type = type; 5603 newsp->type = type;
5604 5604
5605 spin_lock_bh(&oldsk->sk_lock.slock);
5606 /* Migrate the backlog from oldsk to newsk. */
5607 sctp_backlog_migrate(assoc, oldsk, newsk);
5605 /* Migrate the association to the new socket. */ 5608 /* Migrate the association to the new socket. */
5606 sctp_assoc_migrate(assoc, newsk); 5609 sctp_assoc_migrate(assoc, newsk);
5610 spin_unlock_bh(&oldsk->sk_lock.slock);
5607 5611
5608 /* If the association on the newsk is already closed before accept() 5612 /* If the association on the newsk is already closed before accept()
5609 * is called, set RCV_SHUTDOWN flag. 5613 * is called, set RCV_SHUTDOWN flag.
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index fcd7096c953d..dc6f3ff32358 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -159,12 +159,9 @@ static ctl_table sctp_table[] = {
159 .ctl_name = NET_SCTP_PRESERVE_ENABLE, 159 .ctl_name = NET_SCTP_PRESERVE_ENABLE,
160 .procname = "cookie_preserve_enable", 160 .procname = "cookie_preserve_enable",
161 .data = &sctp_cookie_preserve_enable, 161 .data = &sctp_cookie_preserve_enable,
162 .maxlen = sizeof(long), 162 .maxlen = sizeof(int),
163 .mode = 0644, 163 .mode = 0644,
164 .proc_handler = &proc_doulongvec_ms_jiffies_minmax, 164 .proc_handler = &proc_dointvec
165 .strategy = &sctp_sysctl_jiffies_ms,
166 .extra1 = &rto_timer_min,
167 .extra2 = &rto_timer_max
168 }, 165 },
169 { 166 {
170 .ctl_name = NET_SCTP_RTO_ALPHA, 167 .ctl_name = NET_SCTP_RTO_ALPHA,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 68d73e2dd155..160f62ad1cc5 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -350,7 +350,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
350 tp->rto_pending = 0; 350 tp->rto_pending = 0;
351 351
352 SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " 352 SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
353 "rttvar: %d, rto: %d\n", __FUNCTION__, 353 "rttvar: %d, rto: %ld\n", __FUNCTION__,
354 tp, rtt, tp->srtt, tp->rttvar, tp->rto); 354 tp, rtt, tp->srtt, tp->rttvar, tp->rto);
355} 355}
356 356
diff --git a/net/socket.c b/net/socket.c
index b38a263853c3..a00851f981db 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
2078 int cpu; 2078 int cpu;
2079 int counter = 0; 2079 int counter = 0;
2080 2080
2081 for (cpu = 0; cpu < NR_CPUS; cpu++) 2081 for_each_cpu(cpu)
2082 counter += per_cpu(sockets_in_use, cpu); 2082 counter += per_cpu(sockets_in_use, cpu);
2083 2083
2084 /* It can be negative, by the way. 8) */ 2084 /* It can be negative, by the way. 8) */
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 9ac1b8c26c01..8d6f1a176b15 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -184,7 +184,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free)
184 */ 184 */
185struct rpc_cred * 185struct rpc_cred *
186rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, 186rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
187 int taskflags) 187 int flags)
188{ 188{
189 struct rpc_cred_cache *cache = auth->au_credcache; 189 struct rpc_cred_cache *cache = auth->au_credcache;
190 HLIST_HEAD(free); 190 HLIST_HEAD(free);
@@ -193,7 +193,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
193 *cred = NULL; 193 *cred = NULL;
194 int nr = 0; 194 int nr = 0;
195 195
196 if (!(taskflags & RPC_TASK_ROOTCREDS)) 196 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS))
197 nr = acred->uid & RPC_CREDCACHE_MASK; 197 nr = acred->uid & RPC_CREDCACHE_MASK;
198retry: 198retry:
199 spin_lock(&rpc_credcache_lock); 199 spin_lock(&rpc_credcache_lock);
@@ -202,7 +202,7 @@ retry:
202 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { 202 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) {
203 struct rpc_cred *entry; 203 struct rpc_cred *entry;
204 entry = hlist_entry(pos, struct rpc_cred, cr_hash); 204 entry = hlist_entry(pos, struct rpc_cred, cr_hash);
205 if (entry->cr_ops->crmatch(acred, entry, taskflags)) { 205 if (entry->cr_ops->crmatch(acred, entry, flags)) {
206 hlist_del(&entry->cr_hash); 206 hlist_del(&entry->cr_hash);
207 cred = entry; 207 cred = entry;
208 break; 208 break;
@@ -224,7 +224,7 @@ retry:
224 rpcauth_destroy_credlist(&free); 224 rpcauth_destroy_credlist(&free);
225 225
226 if (!cred) { 226 if (!cred) {
227 new = auth->au_ops->crcreate(auth, acred, taskflags); 227 new = auth->au_ops->crcreate(auth, acred, flags);
228 if (!IS_ERR(new)) { 228 if (!IS_ERR(new)) {
229#ifdef RPC_DEBUG 229#ifdef RPC_DEBUG
230 new->cr_magic = RPCAUTH_CRED_MAGIC; 230 new->cr_magic = RPCAUTH_CRED_MAGIC;
@@ -232,13 +232,21 @@ retry:
232 goto retry; 232 goto retry;
233 } else 233 } else
234 cred = new; 234 cred = new;
235 } else if ((cred->cr_flags & RPCAUTH_CRED_NEW)
236 && cred->cr_ops->cr_init != NULL
237 && !(flags & RPCAUTH_LOOKUP_NEW)) {
238 int res = cred->cr_ops->cr_init(auth, cred);
239 if (res < 0) {
240 put_rpccred(cred);
241 cred = ERR_PTR(res);
242 }
235 } 243 }
236 244
237 return (struct rpc_cred *) cred; 245 return (struct rpc_cred *) cred;
238} 246}
239 247
240struct rpc_cred * 248struct rpc_cred *
241rpcauth_lookupcred(struct rpc_auth *auth, int taskflags) 249rpcauth_lookupcred(struct rpc_auth *auth, int flags)
242{ 250{
243 struct auth_cred acred = { 251 struct auth_cred acred = {
244 .uid = current->fsuid, 252 .uid = current->fsuid,
@@ -250,7 +258,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int taskflags)
250 dprintk("RPC: looking up %s cred\n", 258 dprintk("RPC: looking up %s cred\n",
251 auth->au_ops->au_name); 259 auth->au_ops->au_name);
252 get_group_info(acred.group_info); 260 get_group_info(acred.group_info);
253 ret = auth->au_ops->lookup_cred(auth, &acred, taskflags); 261 ret = auth->au_ops->lookup_cred(auth, &acred, flags);
254 put_group_info(acred.group_info); 262 put_group_info(acred.group_info);
255 return ret; 263 return ret;
256} 264}
@@ -265,11 +273,14 @@ rpcauth_bindcred(struct rpc_task *task)
265 .group_info = current->group_info, 273 .group_info = current->group_info,
266 }; 274 };
267 struct rpc_cred *ret; 275 struct rpc_cred *ret;
276 int flags = 0;
268 277
269 dprintk("RPC: %4d looking up %s cred\n", 278 dprintk("RPC: %4d looking up %s cred\n",
270 task->tk_pid, task->tk_auth->au_ops->au_name); 279 task->tk_pid, task->tk_auth->au_ops->au_name);
271 get_group_info(acred.group_info); 280 get_group_info(acred.group_info);
272 ret = auth->au_ops->lookup_cred(auth, &acred, task->tk_flags); 281 if (task->tk_flags & RPC_TASK_ROOTCREDS)
282 flags |= RPCAUTH_LOOKUP_ROOTCREDS;
283 ret = auth->au_ops->lookup_cred(auth, &acred, flags);
273 if (!IS_ERR(ret)) 284 if (!IS_ERR(ret))
274 task->tk_msg.rpc_cred = ret; 285 task->tk_msg.rpc_cred = ret;
275 else 286 else
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8d782282ec19..bb46efd92e57 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -158,6 +158,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
158 old = gss_cred->gc_ctx; 158 old = gss_cred->gc_ctx;
159 gss_cred->gc_ctx = ctx; 159 gss_cred->gc_ctx = ctx;
160 cred->cr_flags |= RPCAUTH_CRED_UPTODATE; 160 cred->cr_flags |= RPCAUTH_CRED_UPTODATE;
161 cred->cr_flags &= ~RPCAUTH_CRED_NEW;
161 write_unlock(&gss_ctx_lock); 162 write_unlock(&gss_ctx_lock);
162 if (old) 163 if (old)
163 gss_put_ctx(old); 164 gss_put_ctx(old);
@@ -580,7 +581,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
580 } else { 581 } else {
581 struct auth_cred acred = { .uid = uid }; 582 struct auth_cred acred = { .uid = uid };
582 spin_unlock(&gss_auth->lock); 583 spin_unlock(&gss_auth->lock);
583 cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, 0); 584 cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW);
584 if (IS_ERR(cred)) { 585 if (IS_ERR(cred)) {
585 err = PTR_ERR(cred); 586 err = PTR_ERR(cred);
586 goto err_put_ctx; 587 goto err_put_ctx;
@@ -758,13 +759,13 @@ gss_destroy_cred(struct rpc_cred *rc)
758 * Lookup RPCSEC_GSS cred for the current process 759 * Lookup RPCSEC_GSS cred for the current process
759 */ 760 */
760static struct rpc_cred * 761static struct rpc_cred *
761gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) 762gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
762{ 763{
763 return rpcauth_lookup_credcache(auth, acred, taskflags); 764 return rpcauth_lookup_credcache(auth, acred, flags);
764} 765}
765 766
766static struct rpc_cred * 767static struct rpc_cred *
767gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) 768gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
768{ 769{
769 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 770 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
770 struct gss_cred *cred = NULL; 771 struct gss_cred *cred = NULL;
@@ -785,13 +786,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags)
785 */ 786 */
786 cred->gc_flags = 0; 787 cred->gc_flags = 0;
787 cred->gc_base.cr_ops = &gss_credops; 788 cred->gc_base.cr_ops = &gss_credops;
789 cred->gc_base.cr_flags = RPCAUTH_CRED_NEW;
788 cred->gc_service = gss_auth->service; 790 cred->gc_service = gss_auth->service;
789 do {
790 err = gss_create_upcall(gss_auth, cred);
791 } while (err == -EAGAIN);
792 if (err < 0)
793 goto out_err;
794
795 return &cred->gc_base; 791 return &cred->gc_base;
796 792
797out_err: 793out_err:
@@ -801,13 +797,34 @@ out_err:
801} 797}
802 798
803static int 799static int
804gss_match(struct auth_cred *acred, struct rpc_cred *rc, int taskflags) 800gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
801{
802 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
803 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
804 int err;
805
806 do {
807 err = gss_create_upcall(gss_auth, gss_cred);
808 } while (err == -EAGAIN);
809 return err;
810}
811
812static int
813gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
805{ 814{
806 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 815 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
807 816
817 /*
818 * If the searchflags have set RPCAUTH_LOOKUP_NEW, then
819 * we don't really care if the credential has expired or not,
820 * since the caller should be prepared to reinitialise it.
821 */
822 if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW))
823 goto out;
808 /* Don't match with creds that have expired. */ 824 /* Don't match with creds that have expired. */
809 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) 825 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
810 return 0; 826 return 0;
827out:
811 return (rc->cr_uid == acred->uid); 828 return (rc->cr_uid == acred->uid);
812} 829}
813 830
@@ -1241,6 +1258,7 @@ static struct rpc_authops authgss_ops = {
1241static struct rpc_credops gss_credops = { 1258static struct rpc_credops gss_credops = {
1242 .cr_name = "AUTH_GSS", 1259 .cr_name = "AUTH_GSS",
1243 .crdestroy = gss_destroy_cred, 1260 .crdestroy = gss_destroy_cred,
1261 .cr_init = gss_cred_init,
1244 .crmatch = gss_match, 1262 .crmatch = gss_match,
1245 .crmarshal = gss_marshal, 1263 .crmarshal = gss_marshal,
1246 .crrefresh = gss_refresh, 1264 .crrefresh = gss_refresh,
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 1b3ed4fd1987..df14b6bfbf10 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -75,7 +75,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
75 75
76 atomic_set(&cred->uc_count, 1); 76 atomic_set(&cred->uc_count, 1);
77 cred->uc_flags = RPCAUTH_CRED_UPTODATE; 77 cred->uc_flags = RPCAUTH_CRED_UPTODATE;
78 if (flags & RPC_TASK_ROOTCREDS) { 78 if (flags & RPCAUTH_LOOKUP_ROOTCREDS) {
79 cred->uc_uid = 0; 79 cred->uc_uid = 0;
80 cred->uc_gid = 0; 80 cred->uc_gid = 0;
81 cred->uc_gids[0] = NOGROUP; 81 cred->uc_gids[0] = NOGROUP;
@@ -108,12 +108,12 @@ unx_destroy_cred(struct rpc_cred *cred)
108 * request root creds (e.g. for NFS swapping). 108 * request root creds (e.g. for NFS swapping).
109 */ 109 */
110static int 110static int
111unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int taskflags) 111unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
112{ 112{
113 struct unx_cred *cred = (struct unx_cred *) rcred; 113 struct unx_cred *cred = (struct unx_cred *) rcred;
114 int i; 114 int i;
115 115
116 if (!(taskflags & RPC_TASK_ROOTCREDS)) { 116 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) {
117 int groups; 117 int groups;
118 118
119 if (cred->uc_uid != acred->uid 119 if (cred->uc_uid != acred->uid
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9764c80ab0b2..a5c0c7b6e151 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -38,44 +38,42 @@ static kmem_cache_t *rpc_inode_cachep __read_mostly;
38 38
39#define RPC_UPCALL_TIMEOUT (30*HZ) 39#define RPC_UPCALL_TIMEOUT (30*HZ)
40 40
41static void 41static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
42__rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, int err) 42 void (*destroy_msg)(struct rpc_pipe_msg *), int err)
43{ 43{
44 struct rpc_pipe_msg *msg; 44 struct rpc_pipe_msg *msg;
45 void (*destroy_msg)(struct rpc_pipe_msg *);
46 45
47 destroy_msg = rpci->ops->destroy_msg; 46 if (list_empty(head))
48 while (!list_empty(head)) { 47 return;
48 do {
49 msg = list_entry(head->next, struct rpc_pipe_msg, list); 49 msg = list_entry(head->next, struct rpc_pipe_msg, list);
50 list_del_init(&msg->list); 50 list_del(&msg->list);
51 msg->errno = err; 51 msg->errno = err;
52 destroy_msg(msg); 52 destroy_msg(msg);
53 } 53 } while (!list_empty(head));
54}
55
56static void
57__rpc_purge_upcall(struct inode *inode, int err)
58{
59 struct rpc_inode *rpci = RPC_I(inode);
60
61 __rpc_purge_list(rpci, &rpci->pipe, err);
62 rpci->pipelen = 0;
63 wake_up(&rpci->waitq); 54 wake_up(&rpci->waitq);
64} 55}
65 56
66static void 57static void
67rpc_timeout_upcall_queue(void *data) 58rpc_timeout_upcall_queue(void *data)
68{ 59{
60 LIST_HEAD(free_list);
69 struct rpc_inode *rpci = (struct rpc_inode *)data; 61 struct rpc_inode *rpci = (struct rpc_inode *)data;
70 struct inode *inode = &rpci->vfs_inode; 62 struct inode *inode = &rpci->vfs_inode;
63 void (*destroy_msg)(struct rpc_pipe_msg *);
71 64
72 mutex_lock(&inode->i_mutex); 65 spin_lock(&inode->i_lock);
73 if (rpci->ops == NULL) 66 if (rpci->ops == NULL) {
74 goto out; 67 spin_unlock(&inode->i_lock);
75 if (rpci->nreaders == 0 && !list_empty(&rpci->pipe)) 68 return;
76 __rpc_purge_upcall(inode, -ETIMEDOUT); 69 }
77out: 70 destroy_msg = rpci->ops->destroy_msg;
78 mutex_unlock(&inode->i_mutex); 71 if (rpci->nreaders == 0) {
72 list_splice_init(&rpci->pipe, &free_list);
73 rpci->pipelen = 0;
74 }
75 spin_unlock(&inode->i_lock);
76 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
79} 77}
80 78
81int 79int
@@ -84,7 +82,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
84 struct rpc_inode *rpci = RPC_I(inode); 82 struct rpc_inode *rpci = RPC_I(inode);
85 int res = -EPIPE; 83 int res = -EPIPE;
86 84
87 mutex_lock(&inode->i_mutex); 85 spin_lock(&inode->i_lock);
88 if (rpci->ops == NULL) 86 if (rpci->ops == NULL)
89 goto out; 87 goto out;
90 if (rpci->nreaders) { 88 if (rpci->nreaders) {
@@ -100,7 +98,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
100 res = 0; 98 res = 0;
101 } 99 }
102out: 100out:
103 mutex_unlock(&inode->i_mutex); 101 spin_unlock(&inode->i_lock);
104 wake_up(&rpci->waitq); 102 wake_up(&rpci->waitq);
105 return res; 103 return res;
106} 104}
@@ -115,21 +113,29 @@ static void
115rpc_close_pipes(struct inode *inode) 113rpc_close_pipes(struct inode *inode)
116{ 114{
117 struct rpc_inode *rpci = RPC_I(inode); 115 struct rpc_inode *rpci = RPC_I(inode);
116 struct rpc_pipe_ops *ops;
118 117
119 mutex_lock(&inode->i_mutex); 118 mutex_lock(&inode->i_mutex);
120 if (rpci->ops != NULL) { 119 ops = rpci->ops;
120 if (ops != NULL) {
121 LIST_HEAD(free_list);
122
123 spin_lock(&inode->i_lock);
121 rpci->nreaders = 0; 124 rpci->nreaders = 0;
122 __rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE); 125 list_splice_init(&rpci->in_upcall, &free_list);
123 __rpc_purge_upcall(inode, -EPIPE); 126 list_splice_init(&rpci->pipe, &free_list);
124 rpci->nwriters = 0; 127 rpci->pipelen = 0;
125 if (rpci->ops->release_pipe)
126 rpci->ops->release_pipe(inode);
127 rpci->ops = NULL; 128 rpci->ops = NULL;
129 spin_unlock(&inode->i_lock);
130 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
131 rpci->nwriters = 0;
132 if (ops->release_pipe)
133 ops->release_pipe(inode);
134 cancel_delayed_work(&rpci->queue_timeout);
135 flush_scheduled_work();
128 } 136 }
129 rpc_inode_setowner(inode, NULL); 137 rpc_inode_setowner(inode, NULL);
130 mutex_unlock(&inode->i_mutex); 138 mutex_unlock(&inode->i_mutex);
131 cancel_delayed_work(&rpci->queue_timeout);
132 flush_scheduled_work();
133} 139}
134 140
135static struct inode * 141static struct inode *
@@ -177,16 +183,26 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
177 goto out; 183 goto out;
178 msg = (struct rpc_pipe_msg *)filp->private_data; 184 msg = (struct rpc_pipe_msg *)filp->private_data;
179 if (msg != NULL) { 185 if (msg != NULL) {
186 spin_lock(&inode->i_lock);
180 msg->errno = -EAGAIN; 187 msg->errno = -EAGAIN;
181 list_del_init(&msg->list); 188 list_del(&msg->list);
189 spin_unlock(&inode->i_lock);
182 rpci->ops->destroy_msg(msg); 190 rpci->ops->destroy_msg(msg);
183 } 191 }
184 if (filp->f_mode & FMODE_WRITE) 192 if (filp->f_mode & FMODE_WRITE)
185 rpci->nwriters --; 193 rpci->nwriters --;
186 if (filp->f_mode & FMODE_READ) 194 if (filp->f_mode & FMODE_READ) {
187 rpci->nreaders --; 195 rpci->nreaders --;
188 if (!rpci->nreaders) 196 if (rpci->nreaders == 0) {
189 __rpc_purge_upcall(inode, -EAGAIN); 197 LIST_HEAD(free_list);
198 spin_lock(&inode->i_lock);
199 list_splice_init(&rpci->pipe, &free_list);
200 rpci->pipelen = 0;
201 spin_unlock(&inode->i_lock);
202 rpc_purge_list(rpci, &free_list,
203 rpci->ops->destroy_msg, -EAGAIN);
204 }
205 }
190 if (rpci->ops->release_pipe) 206 if (rpci->ops->release_pipe)
191 rpci->ops->release_pipe(inode); 207 rpci->ops->release_pipe(inode);
192out: 208out:
@@ -209,6 +225,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
209 } 225 }
210 msg = filp->private_data; 226 msg = filp->private_data;
211 if (msg == NULL) { 227 if (msg == NULL) {
228 spin_lock(&inode->i_lock);
212 if (!list_empty(&rpci->pipe)) { 229 if (!list_empty(&rpci->pipe)) {
213 msg = list_entry(rpci->pipe.next, 230 msg = list_entry(rpci->pipe.next,
214 struct rpc_pipe_msg, 231 struct rpc_pipe_msg,
@@ -218,6 +235,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
218 filp->private_data = msg; 235 filp->private_data = msg;
219 msg->copied = 0; 236 msg->copied = 0;
220 } 237 }
238 spin_unlock(&inode->i_lock);
221 if (msg == NULL) 239 if (msg == NULL)
222 goto out_unlock; 240 goto out_unlock;
223 } 241 }
@@ -225,7 +243,9 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
225 res = rpci->ops->upcall(filp, msg, buf, len); 243 res = rpci->ops->upcall(filp, msg, buf, len);
226 if (res < 0 || msg->len == msg->copied) { 244 if (res < 0 || msg->len == msg->copied) {
227 filp->private_data = NULL; 245 filp->private_data = NULL;
228 list_del_init(&msg->list); 246 spin_lock(&inode->i_lock);
247 list_del(&msg->list);
248 spin_unlock(&inode->i_lock);
229 rpci->ops->destroy_msg(msg); 249 rpci->ops->destroy_msg(msg);
230 } 250 }
231out_unlock: 251out_unlock:
@@ -610,7 +630,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
610 return ERR_PTR(error); 630 return ERR_PTR(error);
611 dir = nd->dentry->d_inode; 631 dir = nd->dentry->d_inode;
612 mutex_lock(&dir->i_mutex); 632 mutex_lock(&dir->i_mutex);
613 dentry = lookup_hash(nd); 633 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len);
614 if (IS_ERR(dentry)) 634 if (IS_ERR(dentry))
615 goto out_err; 635 goto out_err;
616 if (dentry->d_inode) { 636 if (dentry->d_inode) {
@@ -672,7 +692,7 @@ rpc_rmdir(char *path)
672 return error; 692 return error;
673 dir = nd.dentry->d_inode; 693 dir = nd.dentry->d_inode;
674 mutex_lock(&dir->i_mutex); 694 mutex_lock(&dir->i_mutex);
675 dentry = lookup_hash(&nd); 695 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
676 if (IS_ERR(dentry)) { 696 if (IS_ERR(dentry)) {
677 error = PTR_ERR(dentry); 697 error = PTR_ERR(dentry);
678 goto out_release; 698 goto out_release;
@@ -733,7 +753,7 @@ rpc_unlink(char *path)
733 return error; 753 return error;
734 dir = nd.dentry->d_inode; 754 dir = nd.dentry->d_inode;
735 mutex_lock(&dir->i_mutex); 755 mutex_lock(&dir->i_mutex);
736 dentry = lookup_hash(&nd); 756 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
737 if (IS_ERR(dentry)) { 757 if (IS_ERR(dentry)) {
738 error = PTR_ERR(dentry); 758 error = PTR_ERR(dentry);
739 goto out_release; 759 goto out_release;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 7415406aa1ae..802d4fe0f55c 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -908,10 +908,10 @@ void rpc_release_task(struct rpc_task *task)
908 908
909/** 909/**
910 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 910 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
911 * @clnt - pointer to RPC client 911 * @clnt: pointer to RPC client
912 * @flags - RPC flags 912 * @flags: RPC flags
913 * @ops - RPC call ops 913 * @ops: RPC call ops
914 * @data - user call data 914 * @data: user call data
915 */ 915 */
916struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, 916struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
917 const struct rpc_call_ops *ops, 917 const struct rpc_call_ops *ops,
@@ -930,6 +930,7 @@ EXPORT_SYMBOL(rpc_run_task);
930/** 930/**
931 * rpc_find_parent - find the parent of a child task. 931 * rpc_find_parent - find the parent of a child task.
932 * @child: child task 932 * @child: child task
933 * @parent: parent task
933 * 934 *
934 * Checks that the parent task is still sleeping on the 935 * Checks that the parent task is still sleeping on the
935 * queue 'childq'. If so returns a pointer to the parent. 936 * queue 'childq'. If so returns a pointer to the parent.
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 077bbf9fb9b7..8206025d8e46 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -13,7 +13,6 @@
13 * 13 *
14 */ 14 */
15 15
16#include <asm/bug.h>
17#include <linux/config.h> 16#include <linux/config.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include <linux/kmod.h> 18#include <linux/kmod.h>
@@ -783,7 +782,7 @@ int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
783 int nx = 0; 782 int nx = 0;
784 int err; 783 int err;
785 u32 genid; 784 u32 genid;
786 u16 family = dst_orig->ops->family; 785 u16 family;
787 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 786 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
788 u32 sk_sid = security_sk_sid(sk, fl, dir); 787 u32 sk_sid = security_sk_sid(sk, fl, dir);
789restart: 788restart:
@@ -797,13 +796,14 @@ restart:
797 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT]) 796 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
798 return 0; 797 return 0;
799 798
800 policy = flow_cache_lookup(fl, sk_sid, family, dir, 799 policy = flow_cache_lookup(fl, sk_sid, dst_orig->ops->family,
801 xfrm_policy_lookup); 800 dir, xfrm_policy_lookup);
802 } 801 }
803 802
804 if (!policy) 803 if (!policy)
805 return 0; 804 return 0;
806 805
806 family = dst_orig->ops->family;
807 policy->curlft.use_time = (unsigned long)xtime.tv_sec; 807 policy->curlft.use_time = (unsigned long)xtime.tv_sec;
808 808
809 switch (policy->action) { 809 switch (policy->action) {
@@ -886,11 +886,11 @@ restart:
886 * We can't enlist stable bundles either. 886 * We can't enlist stable bundles either.
887 */ 887 */
888 write_unlock_bh(&policy->lock); 888 write_unlock_bh(&policy->lock);
889
890 xfrm_pol_put(policy);
891 if (dst) 889 if (dst)
892 dst_free(dst); 890 dst_free(dst);
893 goto restart; 891
892 err = -EHOSTUNREACH;
893 goto error;
894 } 894 }
895 dst->next = policy->bundles; 895 dst->next = policy->bundles;
896 policy->bundles = dst; 896 policy->bundles = dst;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index e12d0be5f976..c656cbaf35e8 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -220,14 +220,14 @@ static int __xfrm_state_delete(struct xfrm_state *x)
220 x->km.state = XFRM_STATE_DEAD; 220 x->km.state = XFRM_STATE_DEAD;
221 spin_lock(&xfrm_state_lock); 221 spin_lock(&xfrm_state_lock);
222 list_del(&x->bydst); 222 list_del(&x->bydst);
223 atomic_dec(&x->refcnt); 223 __xfrm_state_put(x);
224 if (x->id.spi) { 224 if (x->id.spi) {
225 list_del(&x->byspi); 225 list_del(&x->byspi);
226 atomic_dec(&x->refcnt); 226 __xfrm_state_put(x);
227 } 227 }
228 spin_unlock(&xfrm_state_lock); 228 spin_unlock(&xfrm_state_lock);
229 if (del_timer(&x->timer)) 229 if (del_timer(&x->timer))
230 atomic_dec(&x->refcnt); 230 __xfrm_state_put(x);
231 231
232 /* The number two in this test is the reference 232 /* The number two in this test is the reference
233 * mentioned in the comment below plus the reference 233 * mentioned in the comment below plus the reference
@@ -243,7 +243,7 @@ static int __xfrm_state_delete(struct xfrm_state *x)
243 * The xfrm_state_alloc call gives a reference, and that 243 * The xfrm_state_alloc call gives a reference, and that
244 * is what we are dropping here. 244 * is what we are dropping here.
245 */ 245 */
246 atomic_dec(&x->refcnt); 246 __xfrm_state_put(x);
247 err = 0; 247 err = 0;
248 } 248 }
249 249
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index ac87a09ba83e..7de17559249a 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -345,7 +345,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
345 345
346 if (err < 0) { 346 if (err < 0) {
347 x->km.state = XFRM_STATE_DEAD; 347 x->km.state = XFRM_STATE_DEAD;
348 xfrm_state_put(x); 348 __xfrm_state_put(x);
349 goto out; 349 goto out;
350 } 350 }
351 351