aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-09-05 17:20:21 -0400
committerDave Jones <davej@redhat.com>2006-09-05 17:20:21 -0400
commit115b384cf87249d76adb0b21aca11ee22128927d (patch)
treef39a2a54863e9d82d1196906f92c82ab5991c6af /net
parent8eb7925f93af75e66a240d148efdec212f95bcb7 (diff)
parentc336923b668fdcf0312efbec3b44895d713f4d81 (diff)
Merge ../linus
Diffstat (limited to 'net')
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/bridge/br_forward.c10
-rw-r--r--net/bridge/br_if.c7
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/netfilter/ebt_ulog.c3
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c56
-rw-r--r--net/core/dst.c3
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/netevent.c69
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c15
-rw-r--r--net/core/skbuff.c47
-rw-r--r--net/core/utils.c7
-rw-r--r--net/core/wireless.c24
-rw-r--r--net/dccp/ccids/ccid3.c153
-rw-r--r--net/dccp/ccids/ccid3.h9
-rw-r--r--net/dccp/ccids/lib/loss_interval.c36
-rw-r--r--net/dccp/ccids/lib/loss_interval.h9
-rw-r--r--net/dccp/ccids/lib/packet_history.c168
-rw-r--r--net/dccp/ccids/lib/packet_history.h17
-rw-r--r--net/dccp/ccids/lib/tfrc.h2
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c2
-rw-r--r--net/dccp/dccp.h10
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/ieee80211/Kconfig1
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c28
-rw-r--r--net/ipv4/fib_semantics.c12
-rw-r--r--net/ipv4/igmp.c38
-rw-r--r--net/ipv4/ip_output.c12
-rw-r--r--net/ipv4/ip_sockglue.c9
-rw-r--r--net/ipv4/netfilter/arp_tables.c30
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c17
-rw-r--r--net/ipv4/netfilter/ip_conntrack_sip.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c36
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c5
-rw-r--r--net/ipv4/netfilter/ipt_hashlimit.c14
-rw-r--r--net/ipv4/route.c10
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv4/tcp_probe.c5
-rw-r--r--net/ipv6/addrconf.c178
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/exthdrs.c29
-rw-r--r--net/ipv6/icmp.c13
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_output.c131
-rw-r--r--net/ipv6/mcast.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c34
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipx/af_ipx.c10
-rw-r--r--net/lapb/lapb_iface.c12
-rw-r--r--net/llc/af_llc.c20
-rw-r--r--net/llc/llc_sap.c7
-rw-r--r--net/netfilter/nf_conntrack_netlink.c17
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/xt_SECMARK.c2
-rw-r--r--net/netfilter/xt_physdev.c1
-rw-r--r--net/netfilter/xt_string.c7
-rw-r--r--net/netlink/af_netlink.c14
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sctp/sm_make_chunk.c30
-rw-r--r--net/sctp/sm_statefuns.c20
-rw-r--r--net/sctp/socket.c20
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c3
-rw-r--r--net/sunrpc/cache.c6
-rw-r--r--net/sunrpc/clnt.c82
-rw-r--r--net/sunrpc/rpc_pipe.c61
-rw-r--r--net/sunrpc/xprt.c21
-rw-r--r--net/sunrpc/xprtsock.c29
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/xfrm/xfrm_policy.c27
83 files changed, 1132 insertions, 649 deletions
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 3f95b0886a6a..91fe5f53ff11 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -507,7 +507,7 @@ err_out:
507 goto out; 507 goto out;
508} 508}
509 509
510void __exit atm_proc_exit(void) 510void atm_proc_exit(void)
511{ 511{
512 atm_proc_dirs_remove(); 512 atm_proc_dirs_remove();
513} 513}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 6ccd32b30809..864fbbc7b24d 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -40,11 +40,15 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
40 else { 40 else {
41#ifdef CONFIG_BRIDGE_NETFILTER 41#ifdef CONFIG_BRIDGE_NETFILTER
42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ 42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
43 nf_bridge_maybe_copy_header(skb); 43 if (nf_bridge_maybe_copy_header(skb))
44 kfree_skb(skb);
45 else
44#endif 46#endif
45 skb_push(skb, ETH_HLEN); 47 {
48 skb_push(skb, ETH_HLEN);
46 49
47 dev_queue_xmit(skb); 50 dev_queue_xmit(skb);
51 }
48 } 52 }
49 53
50 return 0; 54 return 0;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f55ef682ef84..b1211d5342f6 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -386,12 +386,17 @@ void br_features_recompute(struct net_bridge *br)
386 checksum = 0; 386 checksum = 0;
387 387
388 if (feature & NETIF_F_GSO) 388 if (feature & NETIF_F_GSO)
389 feature |= NETIF_F_TSO; 389 feature |= NETIF_F_GSO_SOFTWARE;
390 feature |= NETIF_F_GSO; 390 feature |= NETIF_F_GSO;
391 391
392 features &= feature; 392 features &= feature;
393 } 393 }
394 394
395 if (!(checksum & NETIF_F_ALL_CSUM))
396 features &= ~NETIF_F_SG;
397 if (!(features & NETIF_F_SG))
398 features &= ~NETIF_F_GSO_MASK;
399
395 br->dev->features = features | checksum | NETIF_F_LLTX | 400 br->dev->features = features | checksum | NETIF_F_LLTX |
396 NETIF_F_GSO_ROBUST; 401 NETIF_F_GSO_ROBUST;
397} 402}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 06abb6634f5b..53086fb75089 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -85,7 +85,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
85 goto err_out; 85 goto err_out;
86 86
87 err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0); 87 err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0);
88 if (err) 88 if (err < 0)
89 goto err_kfree; 89 goto err_kfree;
90 90
91 NETLINK_CB(skb).dst_group = RTNLGRP_LINK; 91 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 02693a230dc1..9f950db3b76f 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -74,6 +74,9 @@ static void ulog_send(unsigned int nlgroup)
74 if (timer_pending(&ub->timer)) 74 if (timer_pending(&ub->timer))
75 del_timer(&ub->timer); 75 del_timer(&ub->timer);
76 76
77 if (!ub->skb)
78 return;
79
77 /* last nlmsg needs NLMSG_DONE */ 80 /* last nlmsg needs NLMSG_DONE */
78 if (ub->qlen > 1) 81 if (ub->qlen > 1)
79 ub->lastnlh->nlmsg_type = NLMSG_DONE; 82 ub->lastnlh->nlmsg_type = NLMSG_DONE;
diff --git a/net/core/Makefile b/net/core/Makefile
index e9bd2467d5a9..2645ba428d48 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_mcast.o dst.o \ 10obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o
12 12
13obj-$(CONFIG_XFRM) += flow.o 13obj-$(CONFIG_XFRM) += flow.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 4d2b5167d7f5..d4a1ec3bded5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -116,6 +116,7 @@
116#include <linux/audit.h> 116#include <linux/audit.h>
117#include <linux/dmaengine.h> 117#include <linux/dmaengine.h>
118#include <linux/err.h> 118#include <linux/err.h>
119#include <linux/ctype.h>
119 120
120/* 121/*
121 * The list of packet types we will receive (as opposed to discard) 122 * The list of packet types we will receive (as opposed to discard)
@@ -632,14 +633,22 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas
632 * @name: name string 633 * @name: name string
633 * 634 *
634 * Network device names need to be valid file names to 635 * Network device names need to be valid file names to
635 * to allow sysfs to work 636 * to allow sysfs to work. We also disallow any kind of
637 * whitespace.
636 */ 638 */
637int dev_valid_name(const char *name) 639int dev_valid_name(const char *name)
638{ 640{
639 return !(*name == '\0' 641 if (*name == '\0')
640 || !strcmp(name, ".") 642 return 0;
641 || !strcmp(name, "..") 643 if (!strcmp(name, ".") || !strcmp(name, ".."))
642 || strchr(name, '/')); 644 return 0;
645
646 while (*name) {
647 if (*name == '/' || isspace(*name))
648 return 0;
649 name++;
650 }
651 return 1;
643} 652}
644 653
645/** 654/**
@@ -1166,11 +1175,6 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
1166 goto out_set_summed; 1175 goto out_set_summed;
1167 1176
1168 if (unlikely(skb_shinfo(skb)->gso_size)) { 1177 if (unlikely(skb_shinfo(skb)->gso_size)) {
1169 static int warned;
1170
1171 WARN_ON(!warned);
1172 warned = 1;
1173
1174 /* Let GSO fix up the checksum. */ 1178 /* Let GSO fix up the checksum. */
1175 goto out_set_summed; 1179 goto out_set_summed;
1176 } 1180 }
@@ -1220,11 +1224,6 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1220 __skb_pull(skb, skb->mac_len); 1224 __skb_pull(skb, skb->mac_len);
1221 1225
1222 if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 1226 if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
1223 static int warned;
1224
1225 WARN_ON(!warned);
1226 warned = 1;
1227
1228 if (skb_header_cloned(skb) && 1227 if (skb_header_cloned(skb) &&
1229 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1228 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1230 return ERR_PTR(err); 1229 return ERR_PTR(err);
@@ -1629,26 +1628,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb)
1629 struct net_device *dev = skb->dev; 1628 struct net_device *dev = skb->dev;
1630 1629
1631 if (dev->master) { 1630 if (dev->master) {
1632 /* 1631 if (skb_bond_should_drop(skb)) {
1633 * On bonding slaves other than the currently active
1634 * slave, suppress duplicates except for 802.3ad
1635 * ETH_P_SLOW and alb non-mcast/bcast.
1636 */
1637 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1638 if (dev->master->priv_flags & IFF_MASTER_ALB) {
1639 if (skb->pkt_type != PACKET_BROADCAST &&
1640 skb->pkt_type != PACKET_MULTICAST)
1641 goto keep;
1642 }
1643
1644 if (dev->master->priv_flags & IFF_MASTER_8023AD &&
1645 skb->protocol == __constant_htons(ETH_P_SLOW))
1646 goto keep;
1647
1648 kfree_skb(skb); 1632 kfree_skb(skb);
1649 return NULL; 1633 return NULL;
1650 } 1634 }
1651keep:
1652 skb->dev = dev->master; 1635 skb->dev = dev->master;
1653 } 1636 }
1654 1637
@@ -3429,12 +3412,9 @@ static void net_dma_rebalance(void)
3429 unsigned int cpu, i, n; 3412 unsigned int cpu, i, n;
3430 struct dma_chan *chan; 3413 struct dma_chan *chan;
3431 3414
3432 lock_cpu_hotplug();
3433
3434 if (net_dma_count == 0) { 3415 if (net_dma_count == 0) {
3435 for_each_online_cpu(cpu) 3416 for_each_online_cpu(cpu)
3436 rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); 3417 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
3437 unlock_cpu_hotplug();
3438 return; 3418 return;
3439 } 3419 }
3440 3420
@@ -3447,15 +3427,13 @@ static void net_dma_rebalance(void)
3447 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3427 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
3448 3428
3449 while(n) { 3429 while(n) {
3450 per_cpu(softnet_data.net_dma, cpu) = chan; 3430 per_cpu(softnet_data, cpu).net_dma = chan;
3451 cpu = next_cpu(cpu, cpu_online_map); 3431 cpu = next_cpu(cpu, cpu_online_map);
3452 n--; 3432 n--;
3453 } 3433 }
3454 i++; 3434 i++;
3455 } 3435 }
3456 rcu_read_unlock(); 3436 rcu_read_unlock();
3457
3458 unlock_cpu_hotplug();
3459} 3437}
3460 3438
3461/** 3439/**
diff --git a/net/core/dst.c b/net/core/dst.c
index 470c05bc4cb2..1a5e49da0e77 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy)
95 dst_gc_timer_inc = DST_GC_INC; 95 dst_gc_timer_inc = DST_GC_INC;
96 dst_gc_timer_expires = DST_GC_MIN; 96 dst_gc_timer_expires = DST_GC_MIN;
97 } 97 }
98 dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
99#if RT_CACHE_DEBUG >= 2 98#if RT_CACHE_DEBUG >= 2
100 printk("dst_total: %d/%d %ld\n", 99 printk("dst_total: %d/%d %ld\n",
101 atomic_read(&dst_total), delayed, dst_gc_timer_expires); 100 atomic_read(&dst_total), delayed, dst_gc_timer_expires);
102#endif 101#endif
103 add_timer(&dst_gc_timer); 102 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
104 103
105out: 104out:
106 spin_unlock(&dst_lock); 105 spin_unlock(&dst_lock);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7ad681f5e712..5130d2efdbbe 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -29,6 +29,7 @@
29#include <net/neighbour.h> 29#include <net/neighbour.h>
30#include <net/dst.h> 30#include <net/dst.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <net/netevent.h>
32#include <linux/rtnetlink.h> 33#include <linux/rtnetlink.h>
33#include <linux/random.h> 34#include <linux/random.h>
34#include <linux/string.h> 35#include <linux/string.h>
@@ -754,6 +755,7 @@ static void neigh_timer_handler(unsigned long arg)
754 neigh->nud_state = NUD_STALE; 755 neigh->nud_state = NUD_STALE;
755 neigh->updated = jiffies; 756 neigh->updated = jiffies;
756 neigh_suspect(neigh); 757 neigh_suspect(neigh);
758 notify = 1;
757 } 759 }
758 } else if (state & NUD_DELAY) { 760 } else if (state & NUD_DELAY) {
759 if (time_before_eq(now, 761 if (time_before_eq(now,
@@ -762,6 +764,7 @@ static void neigh_timer_handler(unsigned long arg)
762 neigh->nud_state = NUD_REACHABLE; 764 neigh->nud_state = NUD_REACHABLE;
763 neigh->updated = jiffies; 765 neigh->updated = jiffies;
764 neigh_connect(neigh); 766 neigh_connect(neigh);
767 notify = 1;
765 next = neigh->confirmed + neigh->parms->reachable_time; 768 next = neigh->confirmed + neigh->parms->reachable_time;
766 } else { 769 } else {
767 NEIGH_PRINTK2("neigh %p is probed.\n", neigh); 770 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
@@ -819,6 +822,8 @@ static void neigh_timer_handler(unsigned long arg)
819out: 822out:
820 write_unlock(&neigh->lock); 823 write_unlock(&neigh->lock);
821 } 824 }
825 if (notify)
826 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
822 827
823#ifdef CONFIG_ARPD 828#ifdef CONFIG_ARPD
824 if (notify && neigh->parms->app_probes) 829 if (notify && neigh->parms->app_probes)
@@ -926,9 +931,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
926{ 931{
927 u8 old; 932 u8 old;
928 int err; 933 int err;
929#ifdef CONFIG_ARPD
930 int notify = 0; 934 int notify = 0;
931#endif
932 struct net_device *dev; 935 struct net_device *dev;
933 int update_isrouter = 0; 936 int update_isrouter = 0;
934 937
@@ -948,9 +951,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
948 neigh_suspect(neigh); 951 neigh_suspect(neigh);
949 neigh->nud_state = new; 952 neigh->nud_state = new;
950 err = 0; 953 err = 0;
951#ifdef CONFIG_ARPD
952 notify = old & NUD_VALID; 954 notify = old & NUD_VALID;
953#endif
954 goto out; 955 goto out;
955 } 956 }
956 957
@@ -1022,9 +1023,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1022 if (!(new & NUD_CONNECTED)) 1023 if (!(new & NUD_CONNECTED))
1023 neigh->confirmed = jiffies - 1024 neigh->confirmed = jiffies -
1024 (neigh->parms->base_reachable_time << 1); 1025 (neigh->parms->base_reachable_time << 1);
1025#ifdef CONFIG_ARPD
1026 notify = 1; 1026 notify = 1;
1027#endif
1028 } 1027 }
1029 if (new == old) 1028 if (new == old)
1030 goto out; 1029 goto out;
@@ -1056,6 +1055,9 @@ out:
1056 (neigh->flags & ~NTF_ROUTER); 1055 (neigh->flags & ~NTF_ROUTER);
1057 } 1056 }
1058 write_unlock_bh(&neigh->lock); 1057 write_unlock_bh(&neigh->lock);
1058
1059 if (notify)
1060 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1059#ifdef CONFIG_ARPD 1061#ifdef CONFIG_ARPD
1060 if (notify && neigh->parms->app_probes) 1062 if (notify && neigh->parms->app_probes)
1061 neigh_app_notify(neigh); 1063 neigh_app_notify(neigh);
diff --git a/net/core/netevent.c b/net/core/netevent.c
new file mode 100644
index 000000000000..35d02c38554e
--- /dev/null
+++ b/net/core/netevent.c
@@ -0,0 +1,69 @@
1/*
2 * Network event notifiers
3 *
4 * Authors:
5 * Tom Tucker <tom@opengridcomputing.com>
6 * Steve Wise <swise@opengridcomputing.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 */
15
16#include <linux/rtnetlink.h>
17#include <linux/notifier.h>
18
19static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
20
21/**
22 * register_netevent_notifier - register a netevent notifier block
23 * @nb: notifier
24 *
25 * Register a notifier to be called when a netevent occurs.
26 * The notifier passed is linked into the kernel structures and must
27 * not be reused until it has been unregistered. A negative errno code
28 * is returned on a failure.
29 */
30int register_netevent_notifier(struct notifier_block *nb)
31{
32 int err;
33
34 err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
35 return err;
36}
37
38/**
39 * netevent_unregister_notifier - unregister a netevent notifier block
40 * @nb: notifier
41 *
42 * Unregister a notifier previously registered by
43 * register_neigh_notifier(). The notifier is unlinked into the
44 * kernel structures and may then be reused. A negative errno code
45 * is returned on a failure.
46 */
47
48int unregister_netevent_notifier(struct notifier_block *nb)
49{
50 return atomic_notifier_chain_unregister(&netevent_notif_chain, nb);
51}
52
53/**
54 * call_netevent_notifiers - call all netevent notifier blocks
55 * @val: value passed unmodified to notifier function
56 * @v: pointer passed unmodified to notifier function
57 *
58 * Call all neighbour notifier blocks. Parameters and return value
59 * are as for notifier_call_chain().
60 */
61
62int call_netevent_notifiers(unsigned long val, void *v)
63{
64 return atomic_notifier_call_chain(&netevent_notif_chain, val, v);
65}
66
67EXPORT_SYMBOL_GPL(register_netevent_notifier);
68EXPORT_SYMBOL_GPL(unregister_netevent_notifier);
69EXPORT_SYMBOL_GPL(call_netevent_notifiers);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 67ed14ddabd2..6a7320b39ed0 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32);
2150 skb->dev = odev; 2150 skb->dev = odev;
2151 skb->pkt_type = PACKET_HOST; 2151 skb->pkt_type = PACKET_HOST;
2152 skb->nh.iph = iph;
2153 skb->h.uh = udph;
2152 2154
2153 if (pkt_dev->nfrags <= 0) 2155 if (pkt_dev->nfrags <= 0)
2154 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2156 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
@@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2460 skb->protocol = protocol; 2462 skb->protocol = protocol;
2461 skb->dev = odev; 2463 skb->dev = odev;
2462 skb->pkt_type = PACKET_HOST; 2464 skb->pkt_type = PACKET_HOST;
2465 skb->nh.ipv6h = iph;
2466 skb->h.uh = udph;
2463 2467
2464 if (pkt_dev->nfrags <= 0) 2468 if (pkt_dev->nfrags <= 0)
2465 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2469 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 20e5bb73f147..30cc1ba6ed5c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
394 } 394 }
395 395
396 if (ida[IFLA_ADDRESS - 1]) { 396 if (ida[IFLA_ADDRESS - 1]) {
397 struct sockaddr *sa;
398 int len;
399
397 if (!dev->set_mac_address) { 400 if (!dev->set_mac_address) {
398 err = -EOPNOTSUPP; 401 err = -EOPNOTSUPP;
399 goto out; 402 goto out;
@@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
405 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len)) 408 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len))
406 goto out; 409 goto out;
407 410
408 err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1])); 411 len = sizeof(sa_family_t) + dev->addr_len;
412 sa = kmalloc(len, GFP_KERNEL);
413 if (!sa) {
414 err = -ENOMEM;
415 goto out;
416 }
417 sa->sa_family = dev->type;
418 memcpy(sa->sa_data, RTA_DATA(ida[IFLA_ADDRESS - 1]),
419 dev->addr_len);
420 err = dev->set_mac_address(dev, sa);
421 kfree(sa);
409 if (err) 422 if (err)
410 goto out; 423 goto out;
411 send_addr_notify = 1; 424 send_addr_notify = 1;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 476aa3978504..c54f3664bce5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -71,13 +71,6 @@ static kmem_cache_t *skbuff_head_cache __read_mostly;
71static kmem_cache_t *skbuff_fclone_cache __read_mostly; 71static kmem_cache_t *skbuff_fclone_cache __read_mostly;
72 72
73/* 73/*
74 * lockdep: lock class key used by skb_queue_head_init():
75 */
76struct lock_class_key skb_queue_lock_key;
77
78EXPORT_SYMBOL(skb_queue_lock_key);
79
80/*
81 * Keep out-of-line to prevent kernel bloat. 74 * Keep out-of-line to prevent kernel bloat.
82 * __builtin_return_address is not used because it is not always 75 * __builtin_return_address is not used because it is not always
83 * reliable. 76 * reliable.
@@ -256,6 +249,31 @@ nodata:
256 goto out; 249 goto out;
257} 250}
258 251
252/**
253 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
254 * @dev: network device to receive on
255 * @length: length to allocate
256 * @gfp_mask: get_free_pages mask, passed to alloc_skb
257 *
258 * Allocate a new &sk_buff and assign it a usage count of one. The
259 * buffer has unspecified headroom built in. Users should allocate
260 * the headroom they think they need without accounting for the
261 * built in space. The built in space is used for optimisations.
262 *
263 * %NULL is returned if there is no free memory.
264 */
265struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
266 unsigned int length, gfp_t gfp_mask)
267{
268 struct sk_buff *skb;
269
270 skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
271 if (likely(skb)) {
272 skb_reserve(skb, NET_SKB_PAD);
273 skb->dev = dev;
274 }
275 return skb;
276}
259 277
260static void skb_drop_list(struct sk_buff **listp) 278static void skb_drop_list(struct sk_buff **listp)
261{ 279{
@@ -846,7 +864,11 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
846 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 864 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
847 return err; 865 return err;
848 866
849 for (i = 0; i < nfrags; i++) { 867 i = 0;
868 if (offset >= len)
869 goto drop_pages;
870
871 for (; i < nfrags; i++) {
850 int end = offset + skb_shinfo(skb)->frags[i].size; 872 int end = offset + skb_shinfo(skb)->frags[i].size;
851 873
852 if (end < len) { 874 if (end < len) {
@@ -854,9 +876,9 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
854 continue; 876 continue;
855 } 877 }
856 878
857 if (len > offset) 879 skb_shinfo(skb)->frags[i++].size = len - offset;
858 skb_shinfo(skb)->frags[i++].size = len - offset;
859 880
881drop_pages:
860 skb_shinfo(skb)->nr_frags = i; 882 skb_shinfo(skb)->nr_frags = i;
861 883
862 for (; i < nfrags; i++) 884 for (; i < nfrags; i++)
@@ -864,7 +886,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
864 886
865 if (skb_shinfo(skb)->frag_list) 887 if (skb_shinfo(skb)->frag_list)
866 skb_drop_fraglist(skb); 888 skb_drop_fraglist(skb);
867 break; 889 goto done;
868 } 890 }
869 891
870 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 892 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
@@ -879,6 +901,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
879 return -ENOMEM; 901 return -ENOMEM;
880 902
881 nfrag->next = frag->next; 903 nfrag->next = frag->next;
904 kfree_skb(frag);
882 frag = nfrag; 905 frag = nfrag;
883 *fragp = frag; 906 *fragp = frag;
884 } 907 }
@@ -897,6 +920,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
897 break; 920 break;
898 } 921 }
899 922
923done:
900 if (len > skb_headlen(skb)) { 924 if (len > skb_headlen(skb)) {
901 skb->data_len -= skb->len - len; 925 skb->data_len -= skb->len - len;
902 skb->len = len; 926 skb->len = len;
@@ -2042,6 +2066,7 @@ EXPORT_SYMBOL(__kfree_skb);
2042EXPORT_SYMBOL(kfree_skb); 2066EXPORT_SYMBOL(kfree_skb);
2043EXPORT_SYMBOL(__pskb_pull_tail); 2067EXPORT_SYMBOL(__pskb_pull_tail);
2044EXPORT_SYMBOL(__alloc_skb); 2068EXPORT_SYMBOL(__alloc_skb);
2069EXPORT_SYMBOL(__netdev_alloc_skb);
2045EXPORT_SYMBOL(pskb_copy); 2070EXPORT_SYMBOL(pskb_copy);
2046EXPORT_SYMBOL(pskb_expand_head); 2071EXPORT_SYMBOL(pskb_expand_head);
2047EXPORT_SYMBOL(skb_checksum); 2072EXPORT_SYMBOL(skb_checksum);
diff --git a/net/core/utils.c b/net/core/utils.c
index 4f96f389243d..e31c90e05594 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -130,12 +130,13 @@ void __init net_random_init(void)
130static int net_random_reseed(void) 130static int net_random_reseed(void)
131{ 131{
132 int i; 132 int i;
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed;
134 134
135 get_random_bytes(seed, sizeof(seed));
136 for_each_possible_cpu(i) { 135 for_each_possible_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 136 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 137
138 get_random_bytes(&seed, sizeof(seed));
139 __net_srandom(state, seed);
139 } 140 }
140 return 0; 141 return 0;
141} 142}
diff --git a/net/core/wireless.c b/net/core/wireless.c
index d2bc72d318f7..de0bde4b51dd 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -82,6 +82,7 @@
82#include <linux/init.h> /* for __init */ 82#include <linux/init.h> /* for __init */
83#include <linux/if_arp.h> /* ARPHRD_ETHER */ 83#include <linux/if_arp.h> /* ARPHRD_ETHER */
84#include <linux/etherdevice.h> /* compare_ether_addr */ 84#include <linux/etherdevice.h> /* compare_ether_addr */
85#include <linux/interrupt.h>
85 86
86#include <linux/wireless.h> /* Pretty obvious */ 87#include <linux/wireless.h> /* Pretty obvious */
87#include <net/iw_handler.h> /* New driver API */ 88#include <net/iw_handler.h> /* New driver API */
@@ -1842,6 +1843,18 @@ int wireless_rtnetlink_set(struct net_device * dev,
1842 */ 1843 */
1843 1844
1844#ifdef WE_EVENT_RTNETLINK 1845#ifdef WE_EVENT_RTNETLINK
1846static struct sk_buff_head wireless_nlevent_queue;
1847
1848static void wireless_nlevent_process(unsigned long data)
1849{
1850 struct sk_buff *skb;
1851
1852 while ((skb = skb_dequeue(&wireless_nlevent_queue)))
1853 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC);
1854}
1855
1856static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0);
1857
1845/* ---------------------------------------------------------------- */ 1858/* ---------------------------------------------------------------- */
1846/* 1859/*
1847 * Fill a rtnetlink message with our event data. 1860 * Fill a rtnetlink message with our event data.
@@ -1904,8 +1917,17 @@ static inline void rtmsg_iwinfo(struct net_device * dev,
1904 return; 1917 return;
1905 } 1918 }
1906 NETLINK_CB(skb).dst_group = RTNLGRP_LINK; 1919 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
1907 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC); 1920 skb_queue_tail(&wireless_nlevent_queue, skb);
1921 tasklet_schedule(&wireless_nlevent_tasklet);
1922}
1923
1924static int __init wireless_nlevent_init(void)
1925{
1926 skb_queue_head_init(&wireless_nlevent_queue);
1927 return 0;
1908} 1928}
1929
1930subsys_initcall(wireless_nlevent_init);
1909#endif /* WE_EVENT_RTNETLINK */ 1931#endif /* WE_EVENT_RTNETLINK */
1910 1932
1911/* ---------------------------------------------------------------- */ 1933/* ---------------------------------------------------------------- */
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index c39bff706cfc..090bc39e8199 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/ccid3.c 2 * net/dccp/ccids/ccid3.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com> 5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * 6 *
7 * An implementation of the DCCP protocol 7 * An implementation of the DCCP protocol
8 * 8 *
@@ -342,6 +342,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
342 new_packet->dccphtx_ccval = 342 new_packet->dccphtx_ccval =
343 DCCP_SKB_CB(skb)->dccpd_ccval = 343 DCCP_SKB_CB(skb)->dccpd_ccval =
344 hctx->ccid3hctx_last_win_count; 344 hctx->ccid3hctx_last_win_count;
345 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
346 hctx->ccid3hctx_t_ipi);
345 } 347 }
346out: 348out:
347 return rc; 349 return rc;
@@ -413,7 +415,8 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
413 case TFRC_SSTATE_NO_FBACK: 415 case TFRC_SSTATE_NO_FBACK:
414 case TFRC_SSTATE_FBACK: 416 case TFRC_SSTATE_FBACK:
415 if (len > 0) { 417 if (len > 0) {
416 hctx->ccid3hctx_t_nom = now; 418 timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
419 hctx->ccid3hctx_t_ipi);
417 ccid3_calc_new_t_ipi(hctx); 420 ccid3_calc_new_t_ipi(hctx);
418 ccid3_calc_new_delta(hctx); 421 ccid3_calc_new_delta(hctx);
419 timeval_add_usecs(&hctx->ccid3hctx_t_nom, 422 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
@@ -757,8 +760,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
757 } 760 }
758 761
759 hcrx->ccid3hcrx_tstamp_last_feedback = now; 762 hcrx->ccid3hcrx_tstamp_last_feedback = now;
760 hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval; 763 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
761 hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno;
762 hcrx->ccid3hcrx_bytes_recv = 0; 764 hcrx->ccid3hcrx_bytes_recv = 0;
763 765
764 /* Convert to multiples of 10us */ 766 /* Convert to multiples of 10us */
@@ -782,7 +784,7 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
782 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 784 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
783 return 0; 785 return 0;
784 786
785 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter; 787 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter;
786 788
787 if (dccp_packet_without_ack(skb)) 789 if (dccp_packet_without_ack(skb))
788 return 0; 790 return 0;
@@ -854,6 +856,11 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
854 interval = 1; 856 interval = 1;
855 } 857 }
856found: 858found:
859 if (!tail) {
860 LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n",
861 __FUNCTION__);
862 return ~0;
863 }
857 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; 864 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
858 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", 865 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
859 dccp_role(sk), sk, rtt); 866 dccp_role(sk), sk, rtt);
@@ -864,9 +871,20 @@ found:
864 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); 871 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
865 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); 872 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
866 873
874 if (x_recv == 0)
875 x_recv = hcrx->ccid3hcrx_x_recv;
876
867 tmp1 = (u64)x_recv * (u64)rtt; 877 tmp1 = (u64)x_recv * (u64)rtt;
868 do_div(tmp1,10000000); 878 do_div(tmp1,10000000);
869 tmp2 = (u32)tmp1; 879 tmp2 = (u32)tmp1;
880
881 if (!tmp2) {
882 LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 "
883 "%s: x_recv = %u, rtt =%u\n",
884 __FUNCTION__, x_recv, rtt);
885 return ~0;
886 }
887
870 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; 888 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
871 /* do not alter order above or you will get overflow on 32 bit */ 889 /* do not alter order above or you will get overflow on 32 bit */
872 p = tfrc_calc_x_reverse_lookup(fval); 890 p = tfrc_calc_x_reverse_lookup(fval);
@@ -882,31 +900,101 @@ found:
882static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) 900static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
883{ 901{
884 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 902 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
903 struct dccp_li_hist_entry *next, *head;
904 u64 seq_temp;
885 905
886 if (seq_loss != DCCP_MAX_SEQNO + 1 && 906 if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
887 list_empty(&hcrx->ccid3hcrx_li_hist)) { 907 if (!dccp_li_hist_interval_new(ccid3_li_hist,
888 struct dccp_li_hist_entry *li_tail; 908 &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
909 return;
889 910
890 li_tail = dccp_li_hist_interval_new(ccid3_li_hist, 911 next = (struct dccp_li_hist_entry *)
891 &hcrx->ccid3hcrx_li_hist, 912 hcrx->ccid3hcrx_li_hist.next;
892 seq_loss, win_loss); 913 next->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
893 if (li_tail == NULL) 914 } else {
915 struct dccp_li_hist_entry *entry;
916 struct list_head *tail;
917
918 head = (struct dccp_li_hist_entry *)
919 hcrx->ccid3hcrx_li_hist.next;
920 /* FIXME win count check removed as was wrong */
921 /* should make this check with receive history */
922 /* and compare there as per section 10.2 of RFC4342 */
923
924 /* new loss event detected */
925 /* calculate last interval length */
926 seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
927 entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
928
929 if (entry == NULL) {
930 printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__);
931 dump_stack();
894 return; 932 return;
895 li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); 933 }
896 } else 934
897 LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of " 935 list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
898 "interval\n", __FUNCTION__); 936
937 tail = hcrx->ccid3hcrx_li_hist.prev;
938 list_del(tail);
939 kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
940
941 /* Create the newest interval */
942 entry->dccplih_seqno = seq_loss;
943 entry->dccplih_interval = seq_temp;
944 entry->dccplih_win_count = win_loss;
945 }
899} 946}
900 947
901static void ccid3_hc_rx_detect_loss(struct sock *sk) 948static int ccid3_hc_rx_detect_loss(struct sock *sk,
949 struct dccp_rx_hist_entry *packet)
902{ 950{
903 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 951 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
904 u8 win_loss; 952 struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
905 const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist, 953 u64 seqno = packet->dccphrx_seqno;
906 &hcrx->ccid3hcrx_li_hist, 954 u64 tmp_seqno;
907 &win_loss); 955 int loss = 0;
956 u8 ccval;
957
958
959 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
960
961 if (!rx_hist ||
962 follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
963 hcrx->ccid3hcrx_seqno_nonloss = seqno;
964 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
965 goto detect_out;
966 }
967
908 968
909 ccid3_hc_rx_update_li(sk, seq_loss, win_loss); 969 while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
970 > TFRC_RECV_NUM_LATE_LOSS) {
971 loss = 1;
972 ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss,
973 hcrx->ccid3hcrx_ccval_nonloss);
974 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
975 dccp_inc_seqno(&tmp_seqno);
976 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
977 dccp_inc_seqno(&tmp_seqno);
978 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
979 tmp_seqno, &ccval)) {
980 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
981 hcrx->ccid3hcrx_ccval_nonloss = ccval;
982 dccp_inc_seqno(&tmp_seqno);
983 }
984 }
985
986 /* FIXME - this code could be simplified with above while */
987 /* but works at moment */
988 if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
989 hcrx->ccid3hcrx_seqno_nonloss = seqno;
990 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
991 }
992
993detect_out:
994 dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
995 &hcrx->ccid3hcrx_li_hist, packet,
996 hcrx->ccid3hcrx_seqno_nonloss);
997 return loss;
910} 998}
911 999
912static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 1000static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -916,8 +1004,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
916 struct dccp_rx_hist_entry *packet; 1004 struct dccp_rx_hist_entry *packet;
917 struct timeval now; 1005 struct timeval now;
918 u8 win_count; 1006 u8 win_count;
919 u32 p_prev, r_sample, t_elapsed; 1007 u32 p_prev, rtt_prev, r_sample, t_elapsed;
920 int ins; 1008 int loss;
921 1009
922 BUG_ON(hcrx == NULL || 1010 BUG_ON(hcrx == NULL ||
923 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || 1011 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
@@ -932,7 +1020,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
932 case DCCP_PKT_DATAACK: 1020 case DCCP_PKT_DATAACK:
933 if (opt_recv->dccpor_timestamp_echo == 0) 1021 if (opt_recv->dccpor_timestamp_echo == 0)
934 break; 1022 break;
935 p_prev = hcrx->ccid3hcrx_rtt; 1023 rtt_prev = hcrx->ccid3hcrx_rtt;
936 dccp_timestamp(sk, &now); 1024 dccp_timestamp(sk, &now);
937 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10); 1025 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
938 r_sample = timeval_usecs(&now); 1026 r_sample = timeval_usecs(&now);
@@ -951,8 +1039,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
951 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 + 1039 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
952 r_sample / 10; 1040 r_sample / 10;
953 1041
954 if (p_prev != hcrx->ccid3hcrx_rtt) 1042 if (rtt_prev != hcrx->ccid3hcrx_rtt)
955 ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n", 1043 ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
956 dccp_role(sk), hcrx->ccid3hcrx_rtt, 1044 dccp_role(sk), hcrx->ccid3hcrx_rtt,
957 opt_recv->dccpor_elapsed_time); 1045 opt_recv->dccpor_elapsed_time);
958 break; 1046 break;
@@ -973,8 +1061,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
973 1061
974 win_count = packet->dccphrx_ccval; 1062 win_count = packet->dccphrx_ccval;
975 1063
976 ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, 1064 loss = ccid3_hc_rx_detect_loss(sk, packet);
977 &hcrx->ccid3hcrx_li_hist, packet);
978 1065
979 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) 1066 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
980 return; 1067 return;
@@ -991,7 +1078,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
991 case TFRC_RSTATE_DATA: 1078 case TFRC_RSTATE_DATA:
992 hcrx->ccid3hcrx_bytes_recv += skb->len - 1079 hcrx->ccid3hcrx_bytes_recv += skb->len -
993 dccp_hdr(skb)->dccph_doff * 4; 1080 dccp_hdr(skb)->dccph_doff * 4;
994 if (ins != 0) 1081 if (loss)
995 break; 1082 break;
996 1083
997 dccp_timestamp(sk, &now); 1084 dccp_timestamp(sk, &now);
@@ -1012,7 +1099,6 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1012 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", 1099 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
1013 dccp_role(sk), sk, dccp_state_name(sk->sk_state)); 1100 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1014 1101
1015 ccid3_hc_rx_detect_loss(sk);
1016 p_prev = hcrx->ccid3hcrx_p; 1102 p_prev = hcrx->ccid3hcrx_p;
1017 1103
1018 /* Calculate loss event rate */ 1104 /* Calculate loss event rate */
@@ -1022,6 +1108,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1022 /* Scaling up by 1000000 as fixed decimal */ 1108 /* Scaling up by 1000000 as fixed decimal */
1023 if (i_mean != 0) 1109 if (i_mean != 0)
1024 hcrx->ccid3hcrx_p = 1000000 / i_mean; 1110 hcrx->ccid3hcrx_p = 1000000 / i_mean;
1111 } else {
1112 printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__);
1113 dump_stack();
1025 } 1114 }
1026 1115
1027 if (hcrx->ccid3hcrx_p > p_prev) { 1116 if (hcrx->ccid3hcrx_p > p_prev) {
@@ -1230,7 +1319,7 @@ static __exit void ccid3_module_exit(void)
1230} 1319}
1231module_exit(ccid3_module_exit); 1320module_exit(ccid3_module_exit);
1232 1321
1233MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " 1322MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
1234 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 1323 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
1235MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); 1324MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
1236MODULE_LICENSE("GPL"); 1325MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 5ade4f668b22..0a2cb7536d26 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/ccids/ccid3.h 2 * net/dccp/ccids/ccid3.h
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -120,9 +120,10 @@ struct ccid3_hc_rx_sock {
120#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv 120#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv
121#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt 121#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt
122#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p 122#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p
123 u64 ccid3hcrx_seqno_last_counter:48, 123 u64 ccid3hcrx_seqno_nonloss:48,
124 ccid3hcrx_ccval_nonloss:4,
124 ccid3hcrx_state:8, 125 ccid3hcrx_state:8,
125 ccid3hcrx_last_counter:4; 126 ccid3hcrx_ccval_last_counter:4;
126 u32 ccid3hcrx_bytes_recv; 127 u32 ccid3hcrx_bytes_recv;
127 struct timeval ccid3hcrx_tstamp_last_feedback; 128 struct timeval ccid3hcrx_tstamp_last_feedback;
128 struct timeval ccid3hcrx_tstamp_last_ack; 129 struct timeval ccid3hcrx_tstamp_last_ack;
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 5d7b7d864385..906c81ab9d4f 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/lib/loss_interval.c 2 * net/dccp/ccids/lib/loss_interval.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <net/sock.h>
15 16
16#include "loss_interval.h" 17#include "loss_interval.h"
17 18
@@ -90,13 +91,13 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
90 u32 w_tot = 0; 91 u32 w_tot = 0;
91 92
92 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) { 93 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) {
93 if (i < DCCP_LI_HIST_IVAL_F_LENGTH) { 94 if (li_entry->dccplih_interval != ~0) {
94 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i]; 95 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i];
95 w_tot += dccp_li_hist_w[i]; 96 w_tot += dccp_li_hist_w[i];
97 if (i != 0)
98 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
96 } 99 }
97 100
98 if (i != 0)
99 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
100 101
101 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH) 102 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH)
102 break; 103 break;
@@ -107,37 +108,36 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
107 108
108 i_tot = max(i_tot0, i_tot1); 109 i_tot = max(i_tot0, i_tot1);
109 110
110 /* FIXME: Why do we do this? -Ian McDonald */ 111 if (!w_tot) {
111 if (i_tot * 4 < w_tot) 112 LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__);
112 i_tot = w_tot * 4; 113 return 1;
114 }
113 115
114 return i_tot * 4 / w_tot; 116 return i_tot / w_tot;
115} 117}
116 118
117EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean); 119EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean);
118 120
119struct dccp_li_hist_entry *dccp_li_hist_interval_new(struct dccp_li_hist *hist, 121int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
120 struct list_head *list, 122 struct list_head *list, const u64 seq_loss, const u8 win_loss)
121 const u64 seq_loss,
122 const u8 win_loss)
123{ 123{
124 struct dccp_li_hist_entry *tail = NULL, *entry; 124 struct dccp_li_hist_entry *entry;
125 int i; 125 int i;
126 126
127 for (i = 0; i <= DCCP_LI_HIST_IVAL_F_LENGTH; ++i) { 127 for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
128 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); 128 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
129 if (entry == NULL) { 129 if (entry == NULL) {
130 dccp_li_hist_purge(hist, list); 130 dccp_li_hist_purge(hist, list);
131 return NULL; 131 dump_stack();
132 return 0;
132 } 133 }
133 if (tail == NULL) 134 entry->dccplih_interval = ~0;
134 tail = entry;
135 list_add(&entry->dccplih_node, list); 135 list_add(&entry->dccplih_node, list);
136 } 136 }
137 137
138 entry->dccplih_seqno = seq_loss; 138 entry->dccplih_seqno = seq_loss;
139 entry->dccplih_win_count = win_loss; 139 entry->dccplih_win_count = win_loss;
140 return tail; 140 return 1;
141} 141}
142 142
143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new); 143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new);
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index 43bf78269d1d..0ae85f0340b2 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -4,7 +4,7 @@
4 * net/dccp/ccids/lib/loss_interval.h 4 * net/dccp/ccids/lib/loss_interval.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
@@ -52,9 +52,6 @@ extern void dccp_li_hist_purge(struct dccp_li_hist *hist,
52 52
53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); 53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list);
54 54
55extern struct dccp_li_hist_entry * 55extern int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
56 dccp_li_hist_interval_new(struct dccp_li_hist *hist, 56 struct list_head *list, const u64 seq_loss, const u8 win_loss);
57 struct list_head *list,
58 const u64 seq_loss,
59 const u8 win_loss);
60#endif /* _DCCP_LI_HIST_ */ 57#endif /* _DCCP_LI_HIST_ */
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index ad98d6a322eb..b876c9c81c65 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/packet_history.h 2 * net/dccp/packet_history.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -112,64 +112,27 @@ struct dccp_rx_hist_entry *
112 112
113EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet); 113EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet);
114 114
115int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, 115void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
116 struct list_head *rx_list, 116 struct list_head *rx_list,
117 struct list_head *li_list, 117 struct list_head *li_list,
118 struct dccp_rx_hist_entry *packet) 118 struct dccp_rx_hist_entry *packet,
119 u64 nonloss_seqno)
119{ 120{
120 struct dccp_rx_hist_entry *entry, *next, *iter; 121 struct dccp_rx_hist_entry *entry, *next;
121 u8 num_later = 0; 122 u8 num_later = 0;
122 123
123 iter = dccp_rx_hist_head(rx_list); 124 list_add(&packet->dccphrx_node, rx_list);
124 if (iter == NULL)
125 dccp_rx_hist_add_entry(rx_list, packet);
126 else {
127 const u64 seqno = packet->dccphrx_seqno;
128
129 if (after48(seqno, iter->dccphrx_seqno))
130 dccp_rx_hist_add_entry(rx_list, packet);
131 else {
132 if (dccp_rx_hist_entry_data_packet(iter))
133 num_later = 1;
134
135 list_for_each_entry_continue(iter, rx_list,
136 dccphrx_node) {
137 if (after48(seqno, iter->dccphrx_seqno)) {
138 dccp_rx_hist_add_entry(&iter->dccphrx_node,
139 packet);
140 goto trim_history;
141 }
142
143 if (dccp_rx_hist_entry_data_packet(iter))
144 num_later++;
145 125
146 if (num_later == TFRC_RECV_NUM_LATE_LOSS) {
147 dccp_rx_hist_entry_delete(hist, packet);
148 return 1;
149 }
150 }
151
152 if (num_later < TFRC_RECV_NUM_LATE_LOSS)
153 dccp_rx_hist_add_entry(rx_list, packet);
154 /*
155 * FIXME: else what? should we destroy the packet
156 * like above?
157 */
158 }
159 }
160
161trim_history:
162 /*
163 * Trim history (remove all packets after the NUM_LATE_LOSS + 1
164 * data packets)
165 */
166 num_later = TFRC_RECV_NUM_LATE_LOSS + 1; 126 num_later = TFRC_RECV_NUM_LATE_LOSS + 1;
167 127
168 if (!list_empty(li_list)) { 128 if (!list_empty(li_list)) {
169 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { 129 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
170 if (num_later == 0) { 130 if (num_later == 0) {
171 list_del_init(&entry->dccphrx_node); 131 if (after48(nonloss_seqno,
172 dccp_rx_hist_entry_delete(hist, entry); 132 entry->dccphrx_seqno)) {
133 list_del_init(&entry->dccphrx_node);
134 dccp_rx_hist_entry_delete(hist, entry);
135 }
173 } else if (dccp_rx_hist_entry_data_packet(entry)) 136 } else if (dccp_rx_hist_entry_data_packet(entry))
174 --num_later; 137 --num_later;
175 } 138 }
@@ -217,94 +180,10 @@ trim_history:
217 --num_later; 180 --num_later;
218 } 181 }
219 } 182 }
220
221 return 0;
222} 183}
223 184
224EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); 185EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
225 186
226u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
227 struct list_head *li_list, u8 *win_loss)
228{
229 struct dccp_rx_hist_entry *entry, *next, *packet;
230 struct dccp_rx_hist_entry *a_loss = NULL;
231 struct dccp_rx_hist_entry *b_loss = NULL;
232 u64 seq_loss = DCCP_MAX_SEQNO + 1;
233 u8 num_later = TFRC_RECV_NUM_LATE_LOSS;
234
235 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
236 if (num_later == 0) {
237 b_loss = entry;
238 break;
239 } else if (dccp_rx_hist_entry_data_packet(entry))
240 --num_later;
241 }
242
243 if (b_loss == NULL)
244 goto out;
245
246 num_later = 1;
247 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
248 if (num_later == 0) {
249 a_loss = entry;
250 break;
251 } else if (dccp_rx_hist_entry_data_packet(entry))
252 --num_later;
253 }
254
255 if (a_loss == NULL) {
256 if (list_empty(li_list)) {
257 /* no loss event have occured yet */
258 LIMIT_NETDEBUG("%s: TODO: find a lost data packet by "
259 "comparing to initial seqno\n",
260 __FUNCTION__);
261 goto out;
262 } else {
263 LIMIT_NETDEBUG("%s: Less than 4 data pkts in history!",
264 __FUNCTION__);
265 goto out;
266 }
267 }
268
269 /* Locate a lost data packet */
270 entry = packet = b_loss;
271 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
272 u64 delta = dccp_delta_seqno(entry->dccphrx_seqno,
273 packet->dccphrx_seqno);
274
275 if (delta != 0) {
276 if (dccp_rx_hist_entry_data_packet(packet))
277 --delta;
278 /*
279 * FIXME: check this, probably this % usage is because
280 * in earlier drafts the ndp count was just 8 bits
281 * long, but now it cam be up to 24 bits long.
282 */
283#if 0
284 if (delta % DCCP_NDP_LIMIT !=
285 (packet->dccphrx_ndp -
286 entry->dccphrx_ndp) % DCCP_NDP_LIMIT)
287#endif
288 if (delta != packet->dccphrx_ndp - entry->dccphrx_ndp) {
289 seq_loss = entry->dccphrx_seqno;
290 dccp_inc_seqno(&seq_loss);
291 }
292 }
293 packet = entry;
294 if (packet == a_loss)
295 break;
296 }
297out:
298 if (seq_loss != DCCP_MAX_SEQNO + 1)
299 *win_loss = a_loss->dccphrx_ccval;
300 else
301 *win_loss = 0; /* Paranoia */
302
303 return seq_loss;
304}
305
306EXPORT_SYMBOL_GPL(dccp_rx_hist_detect_loss);
307
308struct dccp_tx_hist *dccp_tx_hist_new(const char *name) 187struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
309{ 188{
310 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); 189 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
@@ -365,6 +244,25 @@ struct dccp_tx_hist_entry *
365 244
366EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry); 245EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
367 246
247int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
248 u8 *ccval)
249{
250 struct dccp_rx_hist_entry *packet = NULL, *entry;
251
252 list_for_each_entry(entry, list, dccphrx_node)
253 if (entry->dccphrx_seqno == seq) {
254 packet = entry;
255 break;
256 }
257
258 if (packet)
259 *ccval = packet->dccphrx_ccval;
260
261 return packet != NULL;
262}
263
264EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
265
368void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, 266void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
369 struct list_head *list, 267 struct list_head *list,
370 struct dccp_tx_hist_entry *packet) 268 struct dccp_tx_hist_entry *packet)
@@ -391,7 +289,7 @@ void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
391 289
392EXPORT_SYMBOL_GPL(dccp_tx_hist_purge); 290EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
393 291
394MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " 292MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
395 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 293 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
396MODULE_DESCRIPTION("DCCP TFRC library"); 294MODULE_DESCRIPTION("DCCP TFRC library");
397MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 673c209e4e85..067cf1c85a37 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/packet_history.h 2 * net/dccp/packet_history.h
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -106,6 +106,8 @@ static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
106extern struct dccp_tx_hist_entry * 106extern struct dccp_tx_hist_entry *
107 dccp_tx_hist_find_entry(const struct list_head *list, 107 dccp_tx_hist_find_entry(const struct list_head *list,
108 const u64 seq); 108 const u64 seq);
109extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
110 u8 *ccval);
109 111
110static inline void dccp_tx_hist_add_entry(struct list_head *list, 112static inline void dccp_tx_hist_add_entry(struct list_head *list,
111 struct dccp_tx_hist_entry *entry) 113 struct dccp_tx_hist_entry *entry)
@@ -164,12 +166,6 @@ static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
164extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist, 166extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
165 struct list_head *list); 167 struct list_head *list);
166 168
167static inline void dccp_rx_hist_add_entry(struct list_head *list,
168 struct dccp_rx_hist_entry *entry)
169{
170 list_add(&entry->dccphrx_node, list);
171}
172
173static inline struct dccp_rx_hist_entry * 169static inline struct dccp_rx_hist_entry *
174 dccp_rx_hist_head(struct list_head *list) 170 dccp_rx_hist_head(struct list_head *list)
175{ 171{
@@ -188,10 +184,11 @@ static inline int
188 entry->dccphrx_type == DCCP_PKT_DATAACK; 184 entry->dccphrx_type == DCCP_PKT_DATAACK;
189} 185}
190 186
191extern int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, 187extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
192 struct list_head *rx_list, 188 struct list_head *rx_list,
193 struct list_head *li_list, 189 struct list_head *li_list,
194 struct dccp_rx_hist_entry *packet); 190 struct dccp_rx_hist_entry *packet,
191 u64 nonloss_seqno);
195 192
196extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, 193extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
197 struct list_head *li_list, u8 *win_loss); 194 struct list_head *li_list, u8 *win_loss);
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 130c4c40cfe3..45f30f59ea2a 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -4,7 +4,7 @@
4 * net/dccp/ccids/lib/tfrc.h 4 * net/dccp/ccids/lib/tfrc.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
10 * 10 *
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 4fd2ebebf5a0..44076e0c6591 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/lib/tfrc_equation.c 2 * net/dccp/ccids/lib/tfrc_equation.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
8 * 8 *
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index d00a2f4ee5dd..a5c5475724c0 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 8 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as 11 * under the terms of the GNU General Public License version 2 as
@@ -81,6 +81,14 @@ static inline u64 max48(const u64 seq1, const u64 seq2)
81 return after48(seq1, seq2) ? seq1 : seq2; 81 return after48(seq1, seq2) ? seq1 : seq2;
82} 82}
83 83
84/* is seq1 next seqno after seq2 */
85static inline int follows48(const u64 seq1, const u64 seq2)
86{
87 int diff = (seq1 & 0xFFFF) - (seq2 & 0xFFFF);
88
89 return diff==1;
90}
91
84enum { 92enum {
85 DCCP_MIB_NUM = 0, 93 DCCP_MIB_NUM = 0,
86 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ 94 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9f3d4d7cd0bf..610c722ac27f 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -230,7 +230,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
230 ipv6_addr_copy(&np->saddr, saddr); 230 ipv6_addr_copy(&np->saddr, saddr);
231 inet->rcv_saddr = LOOPBACK4_IPV6; 231 inet->rcv_saddr = LOOPBACK4_IPV6;
232 232
233 ip6_dst_store(sk, dst, NULL); 233 __ip6_dst_store(sk, dst, NULL);
234 234
235 icsk->icsk_ext_hdr_len = 0; 235 icsk->icsk_ext_hdr_len = 0;
236 if (np->opt != NULL) 236 if (np->opt != NULL)
@@ -863,7 +863,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
863 * comment in that function for the gory details. -acme 863 * comment in that function for the gory details. -acme
864 */ 864 */
865 865
866 ip6_dst_store(newsk, dst, NULL); 866 __ip6_dst_store(newsk, dst, NULL);
867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
868 NETIF_F_TSO); 868 NETIF_F_TSO);
869 newdp6 = (struct dccp6_sock *)newsk; 869 newdp6 = (struct dccp6_sock *)newsk;
diff --git a/net/dccp/options.c b/net/dccp/options.c
index daf72bb671f0..07a34696ac97 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -4,7 +4,7 @@
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> 5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1355614ec11b..743e9fcf7c5a 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -925,8 +925,13 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) {
926 if (!dev_out->dn_ptr) 926 if (!dev_out->dn_ptr)
927 continue; 927 continue;
928 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 928 if (!dn_dev_islocal(dev_out, oldflp->fld_src))
929 break; 929 continue;
930 if ((dev_out->flags & IFF_LOOPBACK) &&
931 oldflp->fld_dst &&
932 !dn_dev_islocal(dev_out, oldflp->fld_dst))
933 continue;
934 break;
930 } 935 }
931 read_unlock(&dev_base_lock); 936 read_unlock(&dev_base_lock);
932 if (dev_out == NULL) 937 if (dev_out == NULL)
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
index dbb08528ddf5..f7e84e9d13ad 100644
--- a/net/ieee80211/Kconfig
+++ b/net/ieee80211/Kconfig
@@ -58,6 +58,7 @@ config IEEE80211_CRYPT_TKIP
58 depends on IEEE80211 && NET_RADIO 58 depends on IEEE80211 && NET_RADIO
59 select CRYPTO 59 select CRYPTO
60 select CRYPTO_MICHAEL_MIC 60 select CRYPTO_MICHAEL_MIC
61 select CRC32
61 ---help--- 62 ---help---
62 Include software based cipher suites in support of IEEE 802.11i 63 Include software based cipher suites in support of IEEE 802.11i
63 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled 64 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index ebc33ca6e692..4cef39e171d0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -116,6 +116,16 @@ ieee80211softmac_auth_queue(void *data)
116 kfree(auth); 116 kfree(auth);
117} 117}
118 118
119/* Sends a response to an auth challenge (for shared key auth). */
120static void
121ieee80211softmac_auth_challenge_response(void *_aq)
122{
123 struct ieee80211softmac_auth_queue_item *aq = _aq;
124
125 /* Send our response */
126 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
127}
128
119/* Handle the auth response from the AP 129/* Handle the auth response from the AP
120 * This should be registered with ieee80211 as handle_auth 130 * This should be registered with ieee80211 as handle_auth
121 */ 131 */
@@ -197,24 +207,30 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
197 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE: 207 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE:
198 /* Check to make sure we have a challenge IE */ 208 /* Check to make sure we have a challenge IE */
199 data = (u8 *)auth->info_element; 209 data = (u8 *)auth->info_element;
200 if(*data++ != MFIE_TYPE_CHALLENGE){ 210 if (*data++ != MFIE_TYPE_CHALLENGE) {
201 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); 211 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n");
202 break; 212 break;
203 } 213 }
204 /* Save the challenge */ 214 /* Save the challenge */
205 spin_lock_irqsave(&mac->lock, flags); 215 spin_lock_irqsave(&mac->lock, flags);
206 net->challenge_len = *data++; 216 net->challenge_len = *data++;
207 if(net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) 217 if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
208 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; 218 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
209 if(net->challenge != NULL) 219 if (net->challenge != NULL)
210 kfree(net->challenge); 220 kfree(net->challenge);
211 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); 221 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
212 memcpy(net->challenge, data, net->challenge_len); 222 memcpy(net->challenge, data, net->challenge_len);
213 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 223 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
214 spin_unlock_irqrestore(&mac->lock, flags);
215 224
216 /* Send our response */ 225 /* We reuse the work struct from the auth request here.
217 ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); 226 * It is safe to do so as each one is per-request, and
227 * at this point (dealing with authentication response)
228 * we have obviously already sent the initial auth
229 * request. */
230 cancel_delayed_work(&aq->work);
231 INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
232 schedule_work(&aq->work);
233 spin_unlock_irqrestore(&mac->lock, flags);
218 return 0; 234 return 0;
219 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: 235 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
220 kfree(net->challenge); 236 kfree(net->challenge);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 9be53a8e72c3..51738000f3dc 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -159,7 +159,7 @@ void free_fib_info(struct fib_info *fi)
159 159
160void fib_release_info(struct fib_info *fi) 160void fib_release_info(struct fib_info *fi)
161{ 161{
162 write_lock(&fib_info_lock); 162 write_lock_bh(&fib_info_lock);
163 if (fi && --fi->fib_treeref == 0) { 163 if (fi && --fi->fib_treeref == 0) {
164 hlist_del(&fi->fib_hash); 164 hlist_del(&fi->fib_hash);
165 if (fi->fib_prefsrc) 165 if (fi->fib_prefsrc)
@@ -172,7 +172,7 @@ void fib_release_info(struct fib_info *fi)
172 fi->fib_dead = 1; 172 fi->fib_dead = 1;
173 fib_info_put(fi); 173 fib_info_put(fi);
174 } 174 }
175 write_unlock(&fib_info_lock); 175 write_unlock_bh(&fib_info_lock);
176} 176}
177 177
178static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) 178static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
@@ -598,7 +598,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
598 unsigned int old_size = fib_hash_size; 598 unsigned int old_size = fib_hash_size;
599 unsigned int i, bytes; 599 unsigned int i, bytes;
600 600
601 write_lock(&fib_info_lock); 601 write_lock_bh(&fib_info_lock);
602 old_info_hash = fib_info_hash; 602 old_info_hash = fib_info_hash;
603 old_laddrhash = fib_info_laddrhash; 603 old_laddrhash = fib_info_laddrhash;
604 fib_hash_size = new_size; 604 fib_hash_size = new_size;
@@ -639,7 +639,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
639 } 639 }
640 fib_info_laddrhash = new_laddrhash; 640 fib_info_laddrhash = new_laddrhash;
641 641
642 write_unlock(&fib_info_lock); 642 write_unlock_bh(&fib_info_lock);
643 643
644 bytes = old_size * sizeof(struct hlist_head *); 644 bytes = old_size * sizeof(struct hlist_head *);
645 fib_hash_free(old_info_hash, bytes); 645 fib_hash_free(old_info_hash, bytes);
@@ -820,7 +820,7 @@ link_it:
820 820
821 fi->fib_treeref++; 821 fi->fib_treeref++;
822 atomic_inc(&fi->fib_clntref); 822 atomic_inc(&fi->fib_clntref);
823 write_lock(&fib_info_lock); 823 write_lock_bh(&fib_info_lock);
824 hlist_add_head(&fi->fib_hash, 824 hlist_add_head(&fi->fib_hash,
825 &fib_info_hash[fib_info_hashfn(fi)]); 825 &fib_info_hash[fib_info_hashfn(fi)]);
826 if (fi->fib_prefsrc) { 826 if (fi->fib_prefsrc) {
@@ -839,7 +839,7 @@ link_it:
839 head = &fib_info_devhash[hash]; 839 head = &fib_info_devhash[hash];
840 hlist_add_head(&nh->nh_hash, head); 840 hlist_add_head(&nh->nh_hash, head);
841 } endfor_nexthops(fi) 841 } endfor_nexthops(fi)
842 write_unlock(&fib_info_lock); 842 write_unlock_bh(&fib_info_lock);
843 return fi; 843 return fi;
844 844
845err_inval: 845err_inval:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9f4b752f5a33..8e8117c19e4d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1793,29 +1793,35 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1793 struct in_device *in_dev; 1793 struct in_device *in_dev;
1794 u32 group = imr->imr_multiaddr.s_addr; 1794 u32 group = imr->imr_multiaddr.s_addr;
1795 u32 ifindex; 1795 u32 ifindex;
1796 int ret = -EADDRNOTAVAIL;
1796 1797
1797 rtnl_lock(); 1798 rtnl_lock();
1798 in_dev = ip_mc_find_dev(imr); 1799 in_dev = ip_mc_find_dev(imr);
1799 if (!in_dev) {
1800 rtnl_unlock();
1801 return -ENODEV;
1802 }
1803 ifindex = imr->imr_ifindex; 1800 ifindex = imr->imr_ifindex;
1804 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1801 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1805 if (iml->multi.imr_multiaddr.s_addr == group && 1802 if (iml->multi.imr_multiaddr.s_addr != group)
1806 iml->multi.imr_ifindex == ifindex) { 1803 continue;
1807 (void) ip_mc_leave_src(sk, iml, in_dev); 1804 if (ifindex) {
1805 if (iml->multi.imr_ifindex != ifindex)
1806 continue;
1807 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
1808 iml->multi.imr_address.s_addr)
1809 continue;
1810
1811 (void) ip_mc_leave_src(sk, iml, in_dev);
1808 1812
1809 *imlp = iml->next; 1813 *imlp = iml->next;
1810 1814
1815 if (in_dev)
1811 ip_mc_dec_group(in_dev, group); 1816 ip_mc_dec_group(in_dev, group);
1812 rtnl_unlock(); 1817 rtnl_unlock();
1813 sock_kfree_s(sk, iml, sizeof(*iml)); 1818 sock_kfree_s(sk, iml, sizeof(*iml));
1814 return 0; 1819 return 0;
1815 }
1816 } 1820 }
1821 if (!in_dev)
1822 ret = -ENODEV;
1817 rtnl_unlock(); 1823 rtnl_unlock();
1818 return -EADDRNOTAVAIL; 1824 return ret;
1819} 1825}
1820 1826
1821int ip_mc_source(int add, int omode, struct sock *sk, struct 1827int ip_mc_source(int add, int omode, struct sock *sk, struct
@@ -2199,13 +2205,13 @@ void ip_mc_drop_socket(struct sock *sk)
2199 struct in_device *in_dev; 2205 struct in_device *in_dev;
2200 inet->mc_list = iml->next; 2206 inet->mc_list = iml->next;
2201 2207
2202 if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) { 2208 in_dev = inetdev_by_index(iml->multi.imr_ifindex);
2203 (void) ip_mc_leave_src(sk, iml, in_dev); 2209 (void) ip_mc_leave_src(sk, iml, in_dev);
2210 if (in_dev != NULL) {
2204 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2211 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2205 in_dev_put(in_dev); 2212 in_dev_put(in_dev);
2206 } 2213 }
2207 sock_kfree_s(sk, iml, sizeof(*iml)); 2214 sock_kfree_s(sk, iml, sizeof(*iml));
2208
2209 } 2215 }
2210 rtnl_unlock(); 2216 rtnl_unlock();
2211} 2217}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7c9f9a6421b8..a2ede167e045 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -440,6 +440,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
440 iph = skb->nh.iph; 440 iph = skb->nh.iph;
441 441
442 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { 442 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
443 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
443 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 444 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
444 htonl(dst_mtu(&rt->u.dst))); 445 htonl(dst_mtu(&rt->u.dst)));
445 kfree_skb(skb); 446 kfree_skb(skb);
@@ -526,6 +527,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
526 527
527 err = output(skb); 528 err = output(skb);
528 529
530 if (!err)
531 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
529 if (err || !frag) 532 if (err || !frag)
530 break; 533 break;
531 534
@@ -649,9 +652,6 @@ slow_path:
649 /* 652 /*
650 * Put this fragment into the sending queue. 653 * Put this fragment into the sending queue.
651 */ 654 */
652
653 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
654
655 iph->tot_len = htons(len + hlen); 655 iph->tot_len = htons(len + hlen);
656 656
657 ip_send_check(iph); 657 ip_send_check(iph);
@@ -659,6 +659,8 @@ slow_path:
659 err = output(skb2); 659 err = output(skb2);
660 if (err) 660 if (err)
661 goto fail; 661 goto fail;
662
663 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
662 } 664 }
663 kfree_skb(skb); 665 kfree_skb(skb);
664 IP_INC_STATS(IPSTATS_MIB_FRAGOKS); 666 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
@@ -946,7 +948,7 @@ alloc_new_skb:
946 skb_prev->csum = csum_sub(skb_prev->csum, 948 skb_prev->csum = csum_sub(skb_prev->csum,
947 skb->csum); 949 skb->csum);
948 data += fraggap; 950 data += fraggap;
949 skb_trim(skb_prev, maxfraglen); 951 pskb_trim_unique(skb_prev, maxfraglen);
950 } 952 }
951 953
952 copy = datalen - transhdrlen - fraggap; 954 copy = datalen - transhdrlen - fraggap;
@@ -1141,7 +1143,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1141 data, fraggap, 0); 1143 data, fraggap, 0);
1142 skb_prev->csum = csum_sub(skb_prev->csum, 1144 skb_prev->csum = csum_sub(skb_prev->csum,
1143 skb->csum); 1145 skb->csum);
1144 skb_trim(skb_prev, maxfraglen); 1146 pskb_trim_unique(skb_prev, maxfraglen);
1145 } 1147 }
1146 1148
1147 /* 1149 /*
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 84f43a3c9098..2d05c4133d3e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -112,14 +112,19 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
112static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) 112static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
113{ 113{
114 char *secdata; 114 char *secdata;
115 u32 seclen; 115 u32 seclen, secid;
116 int err; 116 int err;
117 117
118 err = security_socket_getpeersec_dgram(skb, &secdata, &seclen); 118 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
119 if (err)
120 return;
121
122 err = security_secid_to_secctx(secid, &secdata, &seclen);
119 if (err) 123 if (err)
120 return; 124 return;
121 125
122 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); 126 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
127 security_release_secctx(secdata, seclen);
123} 128}
124 129
125 130
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 80c73ca90116..8d1d7a6e72a5 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -236,7 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
236 struct arpt_entry *e, *back; 236 struct arpt_entry *e, *back;
237 const char *indev, *outdev; 237 const char *indev, *outdev;
238 void *table_base; 238 void *table_base;
239 struct xt_table_info *private = table->private; 239 struct xt_table_info *private;
240 240
241 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 241 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
242 if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) + 242 if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) +
@@ -248,6 +248,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
248 outdev = out ? out->name : nulldevname; 248 outdev = out ? out->name : nulldevname;
249 249
250 read_lock_bh(&table->lock); 250 read_lock_bh(&table->lock);
251 private = table->private;
251 table_base = (void *)private->entries[smp_processor_id()]; 252 table_base = (void *)private->entries[smp_processor_id()];
252 e = get_entry(table_base, private->hook_entry[hook]); 253 e = get_entry(table_base, private->hook_entry[hook]);
253 back = get_entry(table_base, private->underflow[hook]); 254 back = get_entry(table_base, private->underflow[hook]);
@@ -1170,21 +1171,34 @@ static int __init arp_tables_init(void)
1170{ 1171{
1171 int ret; 1172 int ret;
1172 1173
1173 xt_proto_init(NF_ARP); 1174 ret = xt_proto_init(NF_ARP);
1175 if (ret < 0)
1176 goto err1;
1174 1177
1175 /* Noone else will be downing sem now, so we won't sleep */ 1178 /* Noone else will be downing sem now, so we won't sleep */
1176 xt_register_target(&arpt_standard_target); 1179 ret = xt_register_target(&arpt_standard_target);
1177 xt_register_target(&arpt_error_target); 1180 if (ret < 0)
1181 goto err2;
1182 ret = xt_register_target(&arpt_error_target);
1183 if (ret < 0)
1184 goto err3;
1178 1185
1179 /* Register setsockopt */ 1186 /* Register setsockopt */
1180 ret = nf_register_sockopt(&arpt_sockopts); 1187 ret = nf_register_sockopt(&arpt_sockopts);
1181 if (ret < 0) { 1188 if (ret < 0)
1182 duprintf("Unable to register sockopts.\n"); 1189 goto err4;
1183 return ret;
1184 }
1185 1190
1186 printk("arp_tables: (C) 2002 David S. Miller\n"); 1191 printk("arp_tables: (C) 2002 David S. Miller\n");
1187 return 0; 1192 return 0;
1193
1194err4:
1195 xt_unregister_target(&arpt_error_target);
1196err3:
1197 xt_unregister_target(&arpt_standard_target);
1198err2:
1199 xt_proto_fini(NF_ARP);
1200err1:
1201 return ret;
1188} 1202}
1189 1203
1190static void __exit arp_tables_fini(void) 1204static void __exit arp_tables_fini(void)
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 33891bb1fde4..0d4cc92391fa 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -415,21 +415,18 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
415 cb->args[0], *id); 415 cb->args[0], *id);
416 416
417 read_lock_bh(&ip_conntrack_lock); 417 read_lock_bh(&ip_conntrack_lock);
418 last = (struct ip_conntrack *)cb->args[1];
418 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) { 419 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
419restart: 420restart:
420 last = (struct ip_conntrack *)cb->args[1];
421 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) { 421 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
422 h = (struct ip_conntrack_tuple_hash *) i; 422 h = (struct ip_conntrack_tuple_hash *) i;
423 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) 423 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
424 continue; 424 continue;
425 ct = tuplehash_to_ctrack(h); 425 ct = tuplehash_to_ctrack(h);
426 if (last != NULL) { 426 if (cb->args[1]) {
427 if (ct == last) { 427 if (ct != last)
428 ip_conntrack_put(last);
429 cb->args[1] = 0;
430 last = NULL;
431 } else
432 continue; 428 continue;
429 cb->args[1] = 0;
433 } 430 }
434 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 431 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
435 cb->nlh->nlmsg_seq, 432 cb->nlh->nlmsg_seq,
@@ -440,17 +437,17 @@ restart:
440 goto out; 437 goto out;
441 } 438 }
442 } 439 }
443 if (last != NULL) { 440 if (cb->args[1]) {
444 ip_conntrack_put(last);
445 cb->args[1] = 0; 441 cb->args[1] = 0;
446 goto restart; 442 goto restart;
447 } 443 }
448 } 444 }
449out: 445out:
450 read_unlock_bh(&ip_conntrack_lock); 446 read_unlock_bh(&ip_conntrack_lock);
447 if (last)
448 ip_conntrack_put(last);
451 449
452 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); 450 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
453
454 return skb->len; 451 return skb->len;
455} 452}
456 453
diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c
index fc87ce0da40d..4f222d6be009 100644
--- a/net/ipv4/netfilter/ip_conntrack_sip.c
+++ b/net/ipv4/netfilter/ip_conntrack_sip.c
@@ -442,7 +442,7 @@ static int __init init(void)
442 sip[i].tuple.src.u.udp.port = htons(ports[i]); 442 sip[i].tuple.src.u.udp.port = htons(ports[i]);
443 sip[i].mask.src.u.udp.port = 0xFFFF; 443 sip[i].mask.src.u.udp.port = 0xFFFF;
444 sip[i].mask.dst.protonum = 0xFF; 444 sip[i].mask.dst.protonum = 0xFF;
445 sip[i].max_expected = 1; 445 sip[i].max_expected = 2;
446 sip[i].timeout = 3 * 60; /* 3 minutes */ 446 sip[i].timeout = 3 * 60; /* 3 minutes */
447 sip[i].me = THIS_MODULE; 447 sip[i].me = THIS_MODULE;
448 sip[i].help = sip_help; 448 sip[i].help = sip_help;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index fc5bdd5eb7d3..048514f15f2f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -230,7 +230,7 @@ ipt_do_table(struct sk_buff **pskb,
230 const char *indev, *outdev; 230 const char *indev, *outdev;
231 void *table_base; 231 void *table_base;
232 struct ipt_entry *e, *back; 232 struct ipt_entry *e, *back;
233 struct xt_table_info *private = table->private; 233 struct xt_table_info *private;
234 234
235 /* Initialization */ 235 /* Initialization */
236 ip = (*pskb)->nh.iph; 236 ip = (*pskb)->nh.iph;
@@ -247,6 +247,7 @@ ipt_do_table(struct sk_buff **pskb,
247 247
248 read_lock_bh(&table->lock); 248 read_lock_bh(&table->lock);
249 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 249 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
250 private = table->private;
250 table_base = (void *)private->entries[smp_processor_id()]; 251 table_base = (void *)private->entries[smp_processor_id()];
251 e = get_entry(table_base, private->hook_entry[hook]); 252 e = get_entry(table_base, private->hook_entry[hook]);
252 253
@@ -2239,22 +2240,39 @@ static int __init ip_tables_init(void)
2239{ 2240{
2240 int ret; 2241 int ret;
2241 2242
2242 xt_proto_init(AF_INET); 2243 ret = xt_proto_init(AF_INET);
2244 if (ret < 0)
2245 goto err1;
2243 2246
2244 /* Noone else will be downing sem now, so we won't sleep */ 2247 /* Noone else will be downing sem now, so we won't sleep */
2245 xt_register_target(&ipt_standard_target); 2248 ret = xt_register_target(&ipt_standard_target);
2246 xt_register_target(&ipt_error_target); 2249 if (ret < 0)
2247 xt_register_match(&icmp_matchstruct); 2250 goto err2;
2251 ret = xt_register_target(&ipt_error_target);
2252 if (ret < 0)
2253 goto err3;
2254 ret = xt_register_match(&icmp_matchstruct);
2255 if (ret < 0)
2256 goto err4;
2248 2257
2249 /* Register setsockopt */ 2258 /* Register setsockopt */
2250 ret = nf_register_sockopt(&ipt_sockopts); 2259 ret = nf_register_sockopt(&ipt_sockopts);
2251 if (ret < 0) { 2260 if (ret < 0)
2252 duprintf("Unable to register sockopts.\n"); 2261 goto err5;
2253 return ret;
2254 }
2255 2262
2256 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n"); 2263 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2257 return 0; 2264 return 0;
2265
2266err5:
2267 xt_unregister_match(&icmp_matchstruct);
2268err4:
2269 xt_unregister_target(&ipt_error_target);
2270err3:
2271 xt_unregister_target(&ipt_standard_target);
2272err2:
2273 xt_proto_fini(AF_INET);
2274err1:
2275 return ret;
2258} 2276}
2259 2277
2260static void __exit ip_tables_fini(void) 2278static void __exit ip_tables_fini(void)
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index d7dd7fe7051c..d46fd677fa11 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -115,6 +115,11 @@ static void ulog_send(unsigned int nlgroupnum)
115 del_timer(&ub->timer); 115 del_timer(&ub->timer);
116 } 116 }
117 117
118 if (!ub->skb) {
119 DEBUGP("ipt_ULOG: ulog_send: nothing to send\n");
120 return;
121 }
122
118 /* last nlmsg needs NLMSG_DONE */ 123 /* last nlmsg needs NLMSG_DONE */
119 if (ub->qlen > 1) 124 if (ub->qlen > 1)
120 ub->lastnlh->nlmsg_type = NLMSG_DONE; 125 ub->lastnlh->nlmsg_type = NLMSG_DONE;
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
index 92980ab8ce48..3bd2368e1fc9 100644
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ b/net/ipv4/netfilter/ipt_hashlimit.c
@@ -454,15 +454,12 @@ hashlimit_match(const struct sk_buff *skb,
454 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * 454 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
455 hinfo->cfg.burst); 455 hinfo->cfg.burst);
456 dh->rateinfo.cost = user2credits(hinfo->cfg.avg); 456 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
457 457 } else {
458 spin_unlock_bh(&hinfo->lock); 458 /* update expiration timeout */
459 return 1; 459 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
460 rateinfo_recalc(dh, now);
460 } 461 }
461 462
462 /* update expiration timeout */
463 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
464
465 rateinfo_recalc(dh, now);
466 if (dh->rateinfo.credit >= dh->rateinfo.cost) { 463 if (dh->rateinfo.credit >= dh->rateinfo.cost) {
467 /* We're underlimit. */ 464 /* We're underlimit. */
468 dh->rateinfo.credit -= dh->rateinfo.cost; 465 dh->rateinfo.credit -= dh->rateinfo.cost;
@@ -508,6 +505,9 @@ hashlimit_checkentry(const char *tablename,
508 if (!r->cfg.expire) 505 if (!r->cfg.expire)
509 return 0; 506 return 0;
510 507
508 if (r->name[sizeof(r->name) - 1] != '\0')
509 return 0;
510
511 /* This is the best we've got: We cannot release and re-grab lock, 511 /* This is the best we've got: We cannot release and re-grab lock,
512 * since checkentry() is called before ip_tables.c grabs ipt_mutex. 512 * since checkentry() is called before ip_tables.c grabs ipt_mutex.
513 * We also cannot grab the hashtable spinlock, since htable_create will 513 * We also cannot grab the hashtable spinlock, since htable_create will
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2dc6dbb28467..b873cbcdd0b8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -104,6 +104,7 @@
104#include <net/icmp.h> 104#include <net/icmp.h>
105#include <net/xfrm.h> 105#include <net/xfrm.h>
106#include <net/ip_mp_alg.h> 106#include <net/ip_mp_alg.h>
107#include <net/netevent.h>
107#ifdef CONFIG_SYSCTL 108#ifdef CONFIG_SYSCTL
108#include <linux/sysctl.h> 109#include <linux/sysctl.h>
109#endif 110#endif
@@ -1125,6 +1126,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1125 struct rtable *rth, **rthp; 1126 struct rtable *rth, **rthp;
1126 u32 skeys[2] = { saddr, 0 }; 1127 u32 skeys[2] = { saddr, 0 };
1127 int ikeys[2] = { dev->ifindex, 0 }; 1128 int ikeys[2] = { dev->ifindex, 0 };
1129 struct netevent_redirect netevent;
1128 1130
1129 if (!in_dev) 1131 if (!in_dev)
1130 return; 1132 return;
@@ -1216,6 +1218,11 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1216 rt_drop(rt); 1218 rt_drop(rt);
1217 goto do_next; 1219 goto do_next;
1218 } 1220 }
1221
1222 netevent.old = &rth->u.dst;
1223 netevent.new = &rt->u.dst;
1224 call_netevent_notifiers(NETEVENT_REDIRECT,
1225 &netevent);
1219 1226
1220 rt_del(hash, rth); 1227 rt_del(hash, rth);
1221 if (!rt_intern_hash(hash, rt, &rt)) 1228 if (!rt_intern_hash(hash, rt, &rt))
@@ -1452,6 +1459,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1452 } 1459 }
1453 dst->metrics[RTAX_MTU-1] = mtu; 1460 dst->metrics[RTAX_MTU-1] = mtu;
1454 dst_set_expires(dst, ip_rt_mtu_expires); 1461 dst_set_expires(dst, ip_rt_mtu_expires);
1462 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1455 } 1463 }
1456} 1464}
1457 1465
@@ -3149,7 +3157,7 @@ int __init ip_rt_init(void)
3149 rhash_entries, 3157 rhash_entries,
3150 (num_physpages >= 128 * 1024) ? 3158 (num_physpages >= 128 * 1024) ?
3151 15 : 17, 3159 15 : 17,
3152 HASH_HIGHMEM, 3160 0,
3153 &rt_hash_log, 3161 &rt_hash_log,
3154 &rt_hash_mask, 3162 &rt_hash_mask,
3155 0); 3163 0);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f6a2d9223d07..934396bb1376 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1132,7 +1132,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1132 tp->ucopy.dma_chan = NULL; 1132 tp->ucopy.dma_chan = NULL;
1133 preempt_disable(); 1133 preempt_disable();
1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1135 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) { 1135 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
1136 preempt_enable_no_resched(); 1136 preempt_enable_no_resched();
1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); 1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1138 } else 1138 } else
@@ -1659,7 +1659,8 @@ adjudge_to_death:
1659 const int tmo = tcp_fin_time(sk); 1659 const int tmo = tcp_fin_time(sk);
1660 1660
1661 if (tmo > TCP_TIMEWAIT_LEN) { 1661 if (tmo > TCP_TIMEWAIT_LEN) {
1662 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); 1662 inet_csk_reset_keepalive_timer(sk,
1663 tmo - TCP_TIMEWAIT_LEN);
1663 } else { 1664 } else {
1664 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1665 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1665 goto out; 1666 goto out;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 5765f9d03174..7ff2e4273a7c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -189,7 +189,7 @@ void tcp_slow_start(struct tcp_sock *tp)
189 return; 189 return;
190 190
191 /* We MAY increase by 2 if discovered delayed ack */ 191 /* We MAY increase by 2 if discovered delayed ack */
192 if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) { 192 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) {
193 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 193 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
194 tp->snd_cwnd++; 194 tp->snd_cwnd++;
195 } 195 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 738dad9f7d49..111ff39a08c5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2505,8 +2505,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2505 if (before(ack, prior_snd_una)) 2505 if (before(ack, prior_snd_una))
2506 goto old_ack; 2506 goto old_ack;
2507 2507
2508 if (sysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR) 2508 if (sysctl_tcp_abc) {
2509 tp->bytes_acked += ack - prior_snd_una; 2509 if (icsk->icsk_ca_state < TCP_CA_CWR)
2510 tp->bytes_acked += ack - prior_snd_una;
2511 else if (icsk->icsk_ca_state == TCP_CA_Loss)
2512 /* we assume just one segment left network */
2513 tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
2514 }
2510 2515
2511 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 2516 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
2512 /* Window is constant, pure forward advance. 2517 /* Window is constant, pure forward advance.
@@ -3541,7 +3546,8 @@ void tcp_cwnd_application_limited(struct sock *sk)
3541 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 3546 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
3542 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 3547 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
3543 /* Limited by application or receiver window. */ 3548 /* Limited by application or receiver window. */
3544 u32 win_used = max(tp->snd_cwnd_used, 2U); 3549 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
3550 u32 win_used = max(tp->snd_cwnd_used, init_win);
3545 if (win_used < tp->snd_cwnd) { 3551 if (win_used < tp->snd_cwnd) {
3546 tp->snd_ssthresh = tcp_current_ssthresh(sk); 3552 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3547 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 3553 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f6f39e814291..4b04c3edd4a9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -438,7 +438,6 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
438 It can f.e. if SYNs crossed. 438 It can f.e. if SYNs crossed.
439 */ 439 */
440 if (!sock_owned_by_user(sk)) { 440 if (!sock_owned_by_user(sk)) {
441 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
442 sk->sk_err = err; 441 sk->sk_err = err;
443 442
444 sk->sk_error_report(sk); 443 sk->sk_error_report(sk);
@@ -874,7 +873,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
874drop_and_free: 873drop_and_free:
875 reqsk_free(req); 874 reqsk_free(req);
876drop: 875drop:
877 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
878 return 0; 876 return 0;
879} 877}
880 878
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0ccb7cb22b15..624e2b2c7f53 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -589,8 +589,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
589 /* RFC793: "second check the RST bit" and 589 /* RFC793: "second check the RST bit" and
590 * "fourth, check the SYN bit" 590 * "fourth, check the SYN bit"
591 */ 591 */
592 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) 592 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
593 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
593 goto embryonic_reset; 594 goto embryonic_reset;
595 }
594 596
595 /* ACK sequence verified above, just make sure ACK is 597 /* ACK sequence verified above, just make sure ACK is
596 * set. If ACK not set, just silently drop the packet. 598 * set. If ACK not set, just silently drop the packet.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5c08ea20a18d..b4f3ffe1b3b4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -201,6 +201,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
201 * See RFC1323 for an explanation of the limit to 14 201 * See RFC1323 for an explanation of the limit to 14
202 */ 202 */
203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 space = min_t(u32, space, *window_clamp);
204 while (space > 65535 && (*rcv_wscale) < 14) { 205 while (space > 65535 && (*rcv_wscale) < 14) {
205 space >>= 1; 206 space >>= 1;
206 (*rcv_wscale)++; 207 (*rcv_wscale)++;
@@ -466,7 +467,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
466 if (skb->len != tcp_header_size) 467 if (skb->len != tcp_header_size)
467 tcp_event_data_sent(tp, skb, sk); 468 tcp_event_data_sent(tp, skb, sk);
468 469
469 TCP_INC_STATS(TCP_MIB_OUTSEGS); 470 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
471 TCP_INC_STATS(TCP_MIB_OUTSEGS);
470 472
471 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 473 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
472 if (likely(err <= 0)) 474 if (likely(err <= 0))
@@ -2157,10 +2159,9 @@ int tcp_connect(struct sock *sk)
2157 skb_shinfo(buff)->gso_size = 0; 2159 skb_shinfo(buff)->gso_size = 0;
2158 skb_shinfo(buff)->gso_type = 0; 2160 skb_shinfo(buff)->gso_type = 0;
2159 buff->csum = 0; 2161 buff->csum = 0;
2162 tp->snd_nxt = tp->write_seq;
2160 TCP_SKB_CB(buff)->seq = tp->write_seq++; 2163 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2161 TCP_SKB_CB(buff)->end_seq = tp->write_seq; 2164 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2162 tp->snd_nxt = tp->write_seq;
2163 tp->pushed_seq = tp->write_seq;
2164 2165
2165 /* Send it off. */ 2166 /* Send it off. */
2166 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2167 TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2170,6 +2171,12 @@ int tcp_connect(struct sock *sk)
2170 sk_charge_skb(sk, buff); 2171 sk_charge_skb(sk, buff);
2171 tp->packets_out += tcp_skb_pcount(buff); 2172 tp->packets_out += tcp_skb_pcount(buff);
2172 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2173 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2174
2175 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2176 * in order to make this packet get counted in tcpOutSegs.
2177 */
2178 tp->snd_nxt = tp->write_seq;
2179 tp->pushed_seq = tp->write_seq;
2173 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 2180 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2174 2181
2175 /* Timer for repeating the SYN until an answer. */ 2182 /* Timer for repeating the SYN until an answer. */
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index d7d517a3a238..dab37d2f65fc 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -114,7 +114,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file)
114static ssize_t tcpprobe_read(struct file *file, char __user *buf, 114static ssize_t tcpprobe_read(struct file *file, char __user *buf,
115 size_t len, loff_t *ppos) 115 size_t len, loff_t *ppos)
116{ 116{
117 int error = 0, cnt; 117 int error = 0, cnt = 0;
118 unsigned char *tbuf; 118 unsigned char *tbuf;
119 119
120 if (!buf || len < 0) 120 if (!buf || len < 0)
@@ -130,11 +130,12 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
130 error = wait_event_interruptible(tcpw.wait, 130 error = wait_event_interruptible(tcpw.wait,
131 __kfifo_len(tcpw.fifo) != 0); 131 __kfifo_len(tcpw.fifo) != 0);
132 if (error) 132 if (error)
133 return error; 133 goto out_free;
134 134
135 cnt = kfifo_get(tcpw.fifo, tbuf, len); 135 cnt = kfifo_get(tcpw.fifo, tbuf, len);
136 error = copy_to_user(buf, tbuf, cnt); 136 error = copy_to_user(buf, tbuf, cnt);
137 137
138out_free:
138 vfree(tbuf); 139 vfree(tbuf);
139 140
140 return error ? error : cnt; 141 return error ? error : cnt;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2316a4315a18..c7852b38e03e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -578,6 +578,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
578 ifa->flags = flags | IFA_F_TENTATIVE; 578 ifa->flags = flags | IFA_F_TENTATIVE;
579 ifa->cstamp = ifa->tstamp = jiffies; 579 ifa->cstamp = ifa->tstamp = jiffies;
580 580
581 ifa->rt = rt;
582
581 ifa->idev = idev; 583 ifa->idev = idev;
582 in6_dev_hold(idev); 584 in6_dev_hold(idev);
583 /* For caller */ 585 /* For caller */
@@ -603,8 +605,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
603 } 605 }
604#endif 606#endif
605 607
606 ifa->rt = rt;
607
608 in6_ifa_hold(ifa); 608 in6_ifa_hold(ifa);
609 write_unlock(&idev->lock); 609 write_unlock(&idev->lock);
610out2: 610out2:
@@ -1869,15 +1869,21 @@ err_exit:
1869/* 1869/*
1870 * Manual configuration of address on an interface 1870 * Manual configuration of address on an interface
1871 */ 1871 */
1872static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen) 1872static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
1873 __u32 prefered_lft, __u32 valid_lft)
1873{ 1874{
1874 struct inet6_ifaddr *ifp; 1875 struct inet6_ifaddr *ifp;
1875 struct inet6_dev *idev; 1876 struct inet6_dev *idev;
1876 struct net_device *dev; 1877 struct net_device *dev;
1878 __u8 ifa_flags = 0;
1877 int scope; 1879 int scope;
1878 1880
1879 ASSERT_RTNL(); 1881 ASSERT_RTNL();
1880 1882
1883 /* check the lifetime */
1884 if (!valid_lft || prefered_lft > valid_lft)
1885 return -EINVAL;
1886
1881 if ((dev = __dev_get_by_index(ifindex)) == NULL) 1887 if ((dev = __dev_get_by_index(ifindex)) == NULL)
1882 return -ENODEV; 1888 return -ENODEV;
1883 1889
@@ -1889,10 +1895,29 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen)
1889 1895
1890 scope = ipv6_addr_scope(pfx); 1896 scope = ipv6_addr_scope(pfx);
1891 1897
1892 ifp = ipv6_add_addr(idev, pfx, plen, scope, IFA_F_PERMANENT); 1898 if (valid_lft == INFINITY_LIFE_TIME)
1899 ifa_flags |= IFA_F_PERMANENT;
1900 else if (valid_lft >= 0x7FFFFFFF/HZ)
1901 valid_lft = 0x7FFFFFFF/HZ;
1902
1903 if (prefered_lft == 0)
1904 ifa_flags |= IFA_F_DEPRECATED;
1905 else if ((prefered_lft >= 0x7FFFFFFF/HZ) &&
1906 (prefered_lft != INFINITY_LIFE_TIME))
1907 prefered_lft = 0x7FFFFFFF/HZ;
1908
1909 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
1910
1893 if (!IS_ERR(ifp)) { 1911 if (!IS_ERR(ifp)) {
1912 spin_lock_bh(&ifp->lock);
1913 ifp->valid_lft = valid_lft;
1914 ifp->prefered_lft = prefered_lft;
1915 ifp->tstamp = jiffies;
1916 spin_unlock_bh(&ifp->lock);
1917
1894 addrconf_dad_start(ifp, 0); 1918 addrconf_dad_start(ifp, 0);
1895 in6_ifa_put(ifp); 1919 in6_ifa_put(ifp);
1920 addrconf_verify(0);
1896 return 0; 1921 return 0;
1897 } 1922 }
1898 1923
@@ -1945,7 +1970,8 @@ int addrconf_add_ifaddr(void __user *arg)
1945 return -EFAULT; 1970 return -EFAULT;
1946 1971
1947 rtnl_lock(); 1972 rtnl_lock();
1948 err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); 1973 err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen,
1974 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1949 rtnl_unlock(); 1975 rtnl_unlock();
1950 return err; 1976 return err;
1951} 1977}
@@ -2771,12 +2797,16 @@ restart:
2771 ifp->idev->nd_parms->retrans_time / HZ; 2797 ifp->idev->nd_parms->retrans_time / HZ;
2772#endif 2798#endif
2773 2799
2774 if (age >= ifp->valid_lft) { 2800 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
2801 age >= ifp->valid_lft) {
2775 spin_unlock(&ifp->lock); 2802 spin_unlock(&ifp->lock);
2776 in6_ifa_hold(ifp); 2803 in6_ifa_hold(ifp);
2777 read_unlock(&addrconf_hash_lock); 2804 read_unlock(&addrconf_hash_lock);
2778 ipv6_del_addr(ifp); 2805 ipv6_del_addr(ifp);
2779 goto restart; 2806 goto restart;
2807 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
2808 spin_unlock(&ifp->lock);
2809 continue;
2780 } else if (age >= ifp->prefered_lft) { 2810 } else if (age >= ifp->prefered_lft) {
2781 /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */ 2811 /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */
2782 int deprecate = 0; 2812 int deprecate = 0;
@@ -2853,7 +2883,8 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2853 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2883 pfx = RTA_DATA(rta[IFA_ADDRESS-1]);
2854 } 2884 }
2855 if (rta[IFA_LOCAL-1]) { 2885 if (rta[IFA_LOCAL-1]) {
2856 if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2886 if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) ||
2887 (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))))
2857 return -EINVAL; 2888 return -EINVAL;
2858 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2889 pfx = RTA_DATA(rta[IFA_LOCAL-1]);
2859 } 2890 }
@@ -2864,11 +2895,61 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2864} 2895}
2865 2896
2866static int 2897static int
2898inet6_addr_modify(int ifindex, struct in6_addr *pfx,
2899 __u32 prefered_lft, __u32 valid_lft)
2900{
2901 struct inet6_ifaddr *ifp = NULL;
2902 struct net_device *dev;
2903 int ifa_flags = 0;
2904
2905 if ((dev = __dev_get_by_index(ifindex)) == NULL)
2906 return -ENODEV;
2907
2908 if (!(dev->flags&IFF_UP))
2909 return -ENETDOWN;
2910
2911 if (!valid_lft || (prefered_lft > valid_lft))
2912 return -EINVAL;
2913
2914 ifp = ipv6_get_ifaddr(pfx, dev, 1);
2915 if (ifp == NULL)
2916 return -ENOENT;
2917
2918 if (valid_lft == INFINITY_LIFE_TIME)
2919 ifa_flags = IFA_F_PERMANENT;
2920 else if (valid_lft >= 0x7FFFFFFF/HZ)
2921 valid_lft = 0x7FFFFFFF/HZ;
2922
2923 if (prefered_lft == 0)
2924 ifa_flags = IFA_F_DEPRECATED;
2925 else if ((prefered_lft >= 0x7FFFFFFF/HZ) &&
2926 (prefered_lft != INFINITY_LIFE_TIME))
2927 prefered_lft = 0x7FFFFFFF/HZ;
2928
2929 spin_lock_bh(&ifp->lock);
2930 ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED|IFA_F_PERMANENT)) | ifa_flags;
2931
2932 ifp->tstamp = jiffies;
2933 ifp->valid_lft = valid_lft;
2934 ifp->prefered_lft = prefered_lft;
2935
2936 spin_unlock_bh(&ifp->lock);
2937 if (!(ifp->flags&IFA_F_TENTATIVE))
2938 ipv6_ifa_notify(0, ifp);
2939 in6_ifa_put(ifp);
2940
2941 addrconf_verify(0);
2942
2943 return 0;
2944}
2945
2946static int
2867inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2947inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2868{ 2948{
2869 struct rtattr **rta = arg; 2949 struct rtattr **rta = arg;
2870 struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 2950 struct ifaddrmsg *ifm = NLMSG_DATA(nlh);
2871 struct in6_addr *pfx; 2951 struct in6_addr *pfx;
2952 __u32 valid_lft = INFINITY_LIFE_TIME, prefered_lft = INFINITY_LIFE_TIME;
2872 2953
2873 pfx = NULL; 2954 pfx = NULL;
2874 if (rta[IFA_ADDRESS-1]) { 2955 if (rta[IFA_ADDRESS-1]) {
@@ -2877,14 +2958,34 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2877 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2958 pfx = RTA_DATA(rta[IFA_ADDRESS-1]);
2878 } 2959 }
2879 if (rta[IFA_LOCAL-1]) { 2960 if (rta[IFA_LOCAL-1]) {
2880 if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2961 if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) ||
2962 (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))))
2881 return -EINVAL; 2963 return -EINVAL;
2882 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2964 pfx = RTA_DATA(rta[IFA_LOCAL-1]);
2883 } 2965 }
2884 if (pfx == NULL) 2966 if (pfx == NULL)
2885 return -EINVAL; 2967 return -EINVAL;
2886 2968
2887 return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen); 2969 if (rta[IFA_CACHEINFO-1]) {
2970 struct ifa_cacheinfo *ci;
2971 if (RTA_PAYLOAD(rta[IFA_CACHEINFO-1]) < sizeof(*ci))
2972 return -EINVAL;
2973 ci = RTA_DATA(rta[IFA_CACHEINFO-1]);
2974 valid_lft = ci->ifa_valid;
2975 prefered_lft = ci->ifa_prefered;
2976 }
2977
2978 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
2979 int ret;
2980 ret = inet6_addr_modify(ifm->ifa_index, pfx,
2981 prefered_lft, valid_lft);
2982 if (ret == 0 || !(nlh->nlmsg_flags & NLM_F_CREATE))
2983 return ret;
2984 }
2985
2986 return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen,
2987 prefered_lft, valid_lft);
2988
2888} 2989}
2889 2990
2890/* Maximum length of ifa_cacheinfo attributes */ 2991/* Maximum length of ifa_cacheinfo attributes */
@@ -3121,6 +3222,62 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
3121 return inet6_dump_addr(skb, cb, type); 3222 return inet6_dump_addr(skb, cb, type);
3122} 3223}
3123 3224
3225static int inet6_rtm_getaddr(struct sk_buff *in_skb,
3226 struct nlmsghdr* nlh, void *arg)
3227{
3228 struct rtattr **rta = arg;
3229 struct ifaddrmsg *ifm = NLMSG_DATA(nlh);
3230 struct in6_addr *addr = NULL;
3231 struct net_device *dev = NULL;
3232 struct inet6_ifaddr *ifa;
3233 struct sk_buff *skb;
3234 int size = NLMSG_SPACE(sizeof(struct ifaddrmsg) + INET6_IFADDR_RTA_SPACE);
3235 int err;
3236
3237 if (rta[IFA_ADDRESS-1]) {
3238 if (RTA_PAYLOAD(rta[IFA_ADDRESS-1]) < sizeof(*addr))
3239 return -EINVAL;
3240 addr = RTA_DATA(rta[IFA_ADDRESS-1]);
3241 }
3242 if (rta[IFA_LOCAL-1]) {
3243 if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*addr) ||
3244 (addr && memcmp(addr, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*addr))))
3245 return -EINVAL;
3246 addr = RTA_DATA(rta[IFA_LOCAL-1]);
3247 }
3248 if (addr == NULL)
3249 return -EINVAL;
3250
3251 if (ifm->ifa_index)
3252 dev = __dev_get_by_index(ifm->ifa_index);
3253
3254 if ((ifa = ipv6_get_ifaddr(addr, dev, 1)) == NULL)
3255 return -EADDRNOTAVAIL;
3256
3257 if ((skb = alloc_skb(size, GFP_KERNEL)) == NULL) {
3258 err = -ENOBUFS;
3259 goto out;
3260 }
3261
3262 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
3263 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
3264 nlh->nlmsg_seq, RTM_NEWADDR, 0);
3265 if (err < 0) {
3266 err = -EMSGSIZE;
3267 goto out_free;
3268 }
3269
3270 err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
3271 if (err > 0)
3272 err = 0;
3273out:
3274 in6_ifa_put(ifa);
3275 return err;
3276out_free:
3277 kfree_skb(skb);
3278 goto out;
3279}
3280
3124static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) 3281static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
3125{ 3282{
3126 struct sk_buff *skb; 3283 struct sk_buff *skb;
@@ -3363,7 +3520,8 @@ static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = {
3363 [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, 3520 [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, },
3364 [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, 3521 [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, },
3365 [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, 3522 [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, },
3366 [RTM_GETADDR - RTM_BASE] = { .dumpit = inet6_dump_ifaddr, }, 3523 [RTM_GETADDR - RTM_BASE] = { .doit = inet6_rtm_getaddr,
3524 .dumpit = inet6_dump_ifaddr, },
3367 [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, }, 3525 [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, },
3368 [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, }, 3526 [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, },
3369 [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, }, 3527 [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, },
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 5a0ba58b86cc..ac85e9c532c2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -658,7 +658,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
658 return err; 658 return err;
659 } 659 }
660 660
661 ip6_dst_store(sk, dst, NULL); 661 __ip6_dst_store(sk, dst, NULL);
662 } 662 }
663 663
664 return 0; 664 return 0;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 9d0ee7f0eeb5..86dac106873b 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -635,14 +635,17 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
635 struct ipv6_txoptions *opt2; 635 struct ipv6_txoptions *opt2;
636 int err; 636 int err;
637 637
638 if (newtype != IPV6_HOPOPTS && opt->hopopt) 638 if (opt) {
639 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt)); 639 if (newtype != IPV6_HOPOPTS && opt->hopopt)
640 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt) 640 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
641 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt)); 641 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
642 if (newtype != IPV6_RTHDR && opt->srcrt) 642 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
643 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt)); 643 if (newtype != IPV6_RTHDR && opt->srcrt)
644 if (newtype != IPV6_DSTOPTS && opt->dst1opt) 644 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
645 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); 645 if (newtype != IPV6_DSTOPTS && opt->dst1opt)
646 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
647 }
648
646 if (newopt && newoptlen) 649 if (newopt && newoptlen)
647 tot_len += CMSG_ALIGN(newoptlen); 650 tot_len += CMSG_ALIGN(newoptlen);
648 651
@@ -659,25 +662,25 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
659 opt2->tot_len = tot_len; 662 opt2->tot_len = tot_len;
660 p = (char *)(opt2 + 1); 663 p = (char *)(opt2 + 1);
661 664
662 err = ipv6_renew_option(opt->hopopt, newopt, newoptlen, 665 err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
663 newtype != IPV6_HOPOPTS, 666 newtype != IPV6_HOPOPTS,
664 &opt2->hopopt, &p); 667 &opt2->hopopt, &p);
665 if (err) 668 if (err)
666 goto out; 669 goto out;
667 670
668 err = ipv6_renew_option(opt->dst0opt, newopt, newoptlen, 671 err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
669 newtype != IPV6_RTHDRDSTOPTS, 672 newtype != IPV6_RTHDRDSTOPTS,
670 &opt2->dst0opt, &p); 673 &opt2->dst0opt, &p);
671 if (err) 674 if (err)
672 goto out; 675 goto out;
673 676
674 err = ipv6_renew_option(opt->srcrt, newopt, newoptlen, 677 err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
675 newtype != IPV6_RTHDR, 678 newtype != IPV6_RTHDR,
676 (struct ipv6_opt_hdr **)opt2->srcrt, &p); 679 (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
677 if (err) 680 if (err)
678 goto out; 681 goto out;
679 682
680 err = ipv6_renew_option(opt->dst1opt, newopt, newoptlen, 683 err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
681 newtype != IPV6_DSTOPTS, 684 newtype != IPV6_DSTOPTS,
682 &opt2->dst1opt, &p); 685 &opt2->dst1opt, &p);
683 if (err) 686 if (err)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 1044b6fce0d5..3d6e9a351150 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -712,6 +712,11 @@ discard_it:
712 return 0; 712 return 0;
713} 713}
714 714
715/*
716 * Special lock-class for __icmpv6_socket:
717 */
718static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
719
715int __init icmpv6_init(struct net_proto_family *ops) 720int __init icmpv6_init(struct net_proto_family *ops)
716{ 721{
717 struct sock *sk; 722 struct sock *sk;
@@ -730,6 +735,14 @@ int __init icmpv6_init(struct net_proto_family *ops)
730 735
731 sk = per_cpu(__icmpv6_socket, i)->sk; 736 sk = per_cpu(__icmpv6_socket, i)->sk;
732 sk->sk_allocation = GFP_ATOMIC; 737 sk->sk_allocation = GFP_ATOMIC;
738 /*
739 * Split off their lock-class, because sk->sk_dst_lock
740 * gets used from softirqs, which is safe for
741 * __icmpv6_socket (because those never get directly used
742 * via userspace syscalls), but unsafe for normal sockets.
743 */
744 lockdep_set_class(&sk->sk_dst_lock,
745 &icmpv6_socket_sk_dst_lock_key);
733 746
734 /* Enough space for 2 64K ICMP packets, including 747 /* Enough space for 2 64K ICMP packets, including
735 * sk_buff struct overhead. 748 * sk_buff struct overhead.
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 5c950cc79d80..bf491077b822 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -185,7 +185,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
185 return err; 185 return err;
186 } 186 }
187 187
188 ip6_dst_store(sk, dst, NULL); 188 __ip6_dst_store(sk, dst, NULL);
189 } 189 }
190 190
191 skb->dst = dst_clone(dst); 191 skb->dst = dst_clone(dst);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 3bc74ce78800..4fb47a252913 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -356,6 +356,7 @@ int ip6_forward(struct sk_buff *skb)
356 skb->dev = dst->dev; 356 skb->dev = dst->dev;
357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
358 0, skb->dev); 358 0, skb->dev);
359 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
359 360
360 kfree_skb(skb); 361 kfree_skb(skb);
361 return -ETIMEDOUT; 362 return -ETIMEDOUT;
@@ -595,6 +596,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
595 } 596 }
596 597
597 err = output(skb); 598 err = output(skb);
599 if(!err)
600 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
601
598 if (err || !frag) 602 if (err || !frag)
599 break; 603 break;
600 604
@@ -706,12 +710,11 @@ slow_path:
706 /* 710 /*
707 * Put this fragment into the sending queue. 711 * Put this fragment into the sending queue.
708 */ 712 */
709
710 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
711
712 err = output(frag); 713 err = output(frag);
713 if (err) 714 if (err)
714 goto fail; 715 goto fail;
716
717 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
715 } 718 }
716 kfree_skb(skb); 719 kfree_skb(skb);
717 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); 720 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
@@ -723,48 +726,51 @@ fail:
723 return err; 726 return err;
724} 727}
725 728
726int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 729static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
730 struct dst_entry *dst,
731 struct flowi *fl)
727{ 732{
728 int err = 0; 733 struct ipv6_pinfo *np = inet6_sk(sk);
734 struct rt6_info *rt = (struct rt6_info *)dst;
729 735
730 *dst = NULL; 736 if (!dst)
731 if (sk) { 737 goto out;
732 struct ipv6_pinfo *np = inet6_sk(sk); 738
733 739 /* Yes, checking route validity in not connected
734 *dst = sk_dst_check(sk, np->dst_cookie); 740 * case is not very simple. Take into account,
735 if (*dst) { 741 * that we do not support routing by source, TOS,
736 struct rt6_info *rt = (struct rt6_info*)*dst; 742 * and MSG_DONTROUTE --ANK (980726)
737 743 *
738 /* Yes, checking route validity in not connected 744 * 1. If route was host route, check that
739 * case is not very simple. Take into account, 745 * cached destination is current.
740 * that we do not support routing by source, TOS, 746 * If it is network route, we still may
741 * and MSG_DONTROUTE --ANK (980726) 747 * check its validity using saved pointer
742 * 748 * to the last used address: daddr_cache.
743 * 1. If route was host route, check that 749 * We do not want to save whole address now,
744 * cached destination is current. 750 * (because main consumer of this service
745 * If it is network route, we still may 751 * is tcp, which has not this problem),
746 * check its validity using saved pointer 752 * so that the last trick works only on connected
747 * to the last used address: daddr_cache. 753 * sockets.
748 * We do not want to save whole address now, 754 * 2. oif also should be the same.
749 * (because main consumer of this service 755 */
750 * is tcp, which has not this problem), 756 if (((rt->rt6i_dst.plen != 128 ||
751 * so that the last trick works only on connected 757 !ipv6_addr_equal(&fl->fl6_dst, &rt->rt6i_dst.addr))
752 * sockets. 758 && (np->daddr_cache == NULL ||
753 * 2. oif also should be the same. 759 !ipv6_addr_equal(&fl->fl6_dst, np->daddr_cache)))
754 */ 760 || (fl->oif && fl->oif != dst->dev->ifindex)) {
755 if (((rt->rt6i_dst.plen != 128 || 761 dst_release(dst);
756 !ipv6_addr_equal(&fl->fl6_dst, 762 dst = NULL;
757 &rt->rt6i_dst.addr))
758 && (np->daddr_cache == NULL ||
759 !ipv6_addr_equal(&fl->fl6_dst,
760 np->daddr_cache)))
761 || (fl->oif && fl->oif != (*dst)->dev->ifindex)) {
762 dst_release(*dst);
763 *dst = NULL;
764 }
765 }
766 } 763 }
767 764
765out:
766 return dst;
767}
768
769static int ip6_dst_lookup_tail(struct sock *sk,
770 struct dst_entry **dst, struct flowi *fl)
771{
772 int err;
773
768 if (*dst == NULL) 774 if (*dst == NULL)
769 *dst = ip6_route_output(sk, fl); 775 *dst = ip6_route_output(sk, fl);
770 776
@@ -773,7 +779,6 @@ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
773 779
774 if (ipv6_addr_any(&fl->fl6_src)) { 780 if (ipv6_addr_any(&fl->fl6_src)) {
775 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); 781 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
776
777 if (err) 782 if (err)
778 goto out_err_release; 783 goto out_err_release;
779 } 784 }
@@ -786,8 +791,48 @@ out_err_release:
786 return err; 791 return err;
787} 792}
788 793
794/**
795 * ip6_dst_lookup - perform route lookup on flow
796 * @sk: socket which provides route info
797 * @dst: pointer to dst_entry * for result
798 * @fl: flow to lookup
799 *
800 * This function performs a route lookup on the given flow.
801 *
802 * It returns zero on success, or a standard errno code on error.
803 */
804int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
805{
806 *dst = NULL;
807 return ip6_dst_lookup_tail(sk, dst, fl);
808}
789EXPORT_SYMBOL_GPL(ip6_dst_lookup); 809EXPORT_SYMBOL_GPL(ip6_dst_lookup);
790 810
811/**
812 * ip6_sk_dst_lookup - perform socket cached route lookup on flow
813 * @sk: socket which provides the dst cache and route info
814 * @dst: pointer to dst_entry * for result
815 * @fl: flow to lookup
816 *
817 * This function performs a route lookup on the given flow with the
818 * possibility of using the cached route in the socket if it is valid.
819 * It will take the socket dst lock when operating on the dst cache.
820 * As a result, this function can only be used in process context.
821 *
822 * It returns zero on success, or a standard errno code on error.
823 */
824int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
825{
826 *dst = NULL;
827 if (sk) {
828 *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
829 *dst = ip6_sk_dst_check(sk, *dst, fl);
830 }
831
832 return ip6_dst_lookup_tail(sk, dst, fl);
833}
834EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
835
791static inline int ip6_ufo_append_data(struct sock *sk, 836static inline int ip6_ufo_append_data(struct sock *sk,
792 int getfrag(void *from, char *to, int offset, int len, 837 int getfrag(void *from, char *to, int offset, int len,
793 int odd, struct sk_buff *skb), 838 int odd, struct sk_buff *skb),
@@ -1050,7 +1095,7 @@ alloc_new_skb:
1050 skb_prev->csum = csum_sub(skb_prev->csum, 1095 skb_prev->csum = csum_sub(skb_prev->csum,
1051 skb->csum); 1096 skb->csum);
1052 data += fraggap; 1097 data += fraggap;
1053 skb_trim(skb_prev, maxfraglen); 1098 pskb_trim_unique(skb_prev, maxfraglen);
1054 } 1099 }
1055 copy = datalen - transhdrlen - fraggap; 1100 copy = datalen - transhdrlen - fraggap;
1056 if (copy < 0) { 1101 if (copy < 0) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 9d697d4dcffc..639eb20c9f1f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -268,13 +268,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
268 if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) { 268 if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) {
269 struct inet6_dev *idev = in6_dev_get(dev); 269 struct inet6_dev *idev = in6_dev_get(dev);
270 270
271 (void) ip6_mc_leave_src(sk, mc_lst, idev);
271 if (idev) { 272 if (idev) {
272 (void) ip6_mc_leave_src(sk,mc_lst,idev);
273 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 273 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
274 in6_dev_put(idev); 274 in6_dev_put(idev);
275 } 275 }
276 dev_put(dev); 276 dev_put(dev);
277 } 277 } else
278 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
278 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 279 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
279 return 0; 280 return 0;
280 } 281 }
@@ -334,13 +335,14 @@ void ipv6_sock_mc_close(struct sock *sk)
334 if (dev) { 335 if (dev) {
335 struct inet6_dev *idev = in6_dev_get(dev); 336 struct inet6_dev *idev = in6_dev_get(dev);
336 337
338 (void) ip6_mc_leave_src(sk, mc_lst, idev);
337 if (idev) { 339 if (idev) {
338 (void) ip6_mc_leave_src(sk, mc_lst, idev);
339 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 340 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
340 in6_dev_put(idev); 341 in6_dev_put(idev);
341 } 342 }
342 dev_put(dev); 343 dev_put(dev);
343 } 344 } else
345 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
344 346
345 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 347 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
346 348
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index f26898b00347..c9d6b23cd3f7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1398,23 +1398,39 @@ static int __init ip6_tables_init(void)
1398{ 1398{
1399 int ret; 1399 int ret;
1400 1400
1401 xt_proto_init(AF_INET6); 1401 ret = xt_proto_init(AF_INET6);
1402 if (ret < 0)
1403 goto err1;
1402 1404
1403 /* Noone else will be downing sem now, so we won't sleep */ 1405 /* Noone else will be downing sem now, so we won't sleep */
1404 xt_register_target(&ip6t_standard_target); 1406 ret = xt_register_target(&ip6t_standard_target);
1405 xt_register_target(&ip6t_error_target); 1407 if (ret < 0)
1406 xt_register_match(&icmp6_matchstruct); 1408 goto err2;
1409 ret = xt_register_target(&ip6t_error_target);
1410 if (ret < 0)
1411 goto err3;
1412 ret = xt_register_match(&icmp6_matchstruct);
1413 if (ret < 0)
1414 goto err4;
1407 1415
1408 /* Register setsockopt */ 1416 /* Register setsockopt */
1409 ret = nf_register_sockopt(&ip6t_sockopts); 1417 ret = nf_register_sockopt(&ip6t_sockopts);
1410 if (ret < 0) { 1418 if (ret < 0)
1411 duprintf("Unable to register sockopts.\n"); 1419 goto err5;
1412 xt_proto_fini(AF_INET6);
1413 return ret;
1414 }
1415 1420
1416 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n"); 1421 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
1417 return 0; 1422 return 0;
1423
1424err5:
1425 xt_unregister_match(&icmp6_matchstruct);
1426err4:
1427 xt_unregister_target(&ip6t_error_target);
1428err3:
1429 xt_unregister_target(&ip6t_standard_target);
1430err2:
1431 xt_proto_fini(AF_INET6);
1432err1:
1433 return ret;
1418} 1434}
1419 1435
1420static void __exit ip6_tables_fini(void) 1436static void __exit ip6_tables_fini(void)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 87c39c978cd0..d9baca062d24 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -53,6 +53,7 @@
53#include <linux/rtnetlink.h> 53#include <linux/rtnetlink.h>
54#include <net/dst.h> 54#include <net/dst.h>
55#include <net/xfrm.h> 55#include <net/xfrm.h>
56#include <net/netevent.h>
56 57
57#include <asm/uaccess.h> 58#include <asm/uaccess.h>
58 59
@@ -742,6 +743,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
742 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 743 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
743 } 744 }
744 dst->metrics[RTAX_MTU-1] = mtu; 745 dst->metrics[RTAX_MTU-1] = mtu;
746 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
745 } 747 }
746} 748}
747 749
@@ -1155,6 +1157,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr,
1155 struct rt6_info *rt, *nrt = NULL; 1157 struct rt6_info *rt, *nrt = NULL;
1156 int strict; 1158 int strict;
1157 struct fib6_node *fn; 1159 struct fib6_node *fn;
1160 struct netevent_redirect netevent;
1158 1161
1159 /* 1162 /*
1160 * Get the "current" route for this destination and 1163 * Get the "current" route for this destination and
@@ -1252,6 +1255,10 @@ restart:
1252 if (ip6_ins_rt(nrt, NULL, NULL, NULL)) 1255 if (ip6_ins_rt(nrt, NULL, NULL, NULL))
1253 goto out; 1256 goto out;
1254 1257
1258 netevent.old = &rt->u.dst;
1259 netevent.new = &nrt->u.dst;
1260 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1261
1255 if (rt->rt6i_flags&RTF_CACHE) { 1262 if (rt->rt6i_flags&RTF_CACHE) {
1256 ip6_del_rt(rt, NULL, NULL, NULL); 1263 ip6_del_rt(rt, NULL, NULL, NULL);
1257 return; 1264 return;
@@ -1525,6 +1532,10 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1525 1532
1526static int ip6_pkt_discard(struct sk_buff *skb) 1533static int ip6_pkt_discard(struct sk_buff *skb)
1527{ 1534{
1535 int type = ipv6_addr_type(&skb->nh.ipv6h->daddr);
1536 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED)
1537 IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS);
1538
1528 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); 1539 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
1529 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev); 1540 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
1530 kfree_skb(skb); 1541 kfree_skb(skb);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 923989d0520d..802a1a6b1037 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -270,7 +270,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
270 inet->rcv_saddr = LOOPBACK4_IPV6; 270 inet->rcv_saddr = LOOPBACK4_IPV6;
271 271
272 sk->sk_gso_type = SKB_GSO_TCPV6; 272 sk->sk_gso_type = SKB_GSO_TCPV6;
273 ip6_dst_store(sk, dst, NULL); 273 __ip6_dst_store(sk, dst, NULL);
274 274
275 icsk->icsk_ext_hdr_len = 0; 275 icsk->icsk_ext_hdr_len = 0;
276 if (np->opt) 276 if (np->opt)
@@ -427,7 +427,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
427 case TCP_SYN_RECV: /* Cannot happen. 427 case TCP_SYN_RECV: /* Cannot happen.
428 It can, it SYNs are crossed. --ANK */ 428 It can, it SYNs are crossed. --ANK */
429 if (!sock_owned_by_user(sk)) { 429 if (!sock_owned_by_user(sk)) {
430 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
431 sk->sk_err = err; 430 sk->sk_err = err;
432 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
433 432
@@ -831,7 +830,6 @@ drop:
831 if (req) 830 if (req)
832 reqsk_free(req); 831 reqsk_free(req);
833 832
834 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
835 return 0; /* don't send reset */ 833 return 0; /* don't send reset */
836} 834}
837 835
@@ -946,8 +944,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
946 * comment in that function for the gory details. -acme 944 * comment in that function for the gory details. -acme
947 */ 945 */
948 946
949 sk->sk_gso_type = SKB_GSO_TCPV6; 947 newsk->sk_gso_type = SKB_GSO_TCPV6;
950 ip6_dst_store(newsk, dst, NULL); 948 __ip6_dst_store(newsk, dst, NULL);
951 949
952 newtcp6sk = (struct tcp6_sock *)newsk; 950 newtcp6sk = (struct tcp6_sock *)newsk;
953 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 951 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ccc57f434cd3..3d54f246411e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -782,7 +782,7 @@ do_udp_sendmsg:
782 connected = 0; 782 connected = 0;
783 } 783 }
784 784
785 err = ip6_dst_lookup(sk, &dst, fl); 785 err = ip6_sk_dst_lookup(sk, &dst, fl);
786 if (err) 786 if (err)
787 goto out; 787 goto out;
788 if (final_p) 788 if (final_p)
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 0eea60ea9ebc..c8c8b44a0f58 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -125,7 +125,7 @@ static int xfrm6_output_finish(struct sk_buff *skb)
125 if (!skb_is_gso(skb)) 125 if (!skb_is_gso(skb))
126 return xfrm6_output_finish2(skb); 126 return xfrm6_output_finish2(skb);
127 127
128 skb->protocol = htons(ETH_P_IP); 128 skb->protocol = htons(ETH_P_IPV6);
129 segs = skb_gso_segment(skb, 0); 129 segs = skb_gso_segment(skb, 0);
130 kfree_skb(skb); 130 kfree_skb(skb);
131 if (unlikely(IS_ERR(segs))) 131 if (unlikely(IS_ERR(segs)))
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index aa34ff4b707c..bef3f61569f7 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
1642 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1642 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1643 goto out; 1643 goto out;
1644 1644
1645 ipx = ipx_hdr(skb); 1645 if (!pskb_may_pull(skb, sizeof(struct ipxhdr)))
1646 ipx_pktsize = ntohs(ipx->ipx_pktsize); 1646 goto drop;
1647
1648 ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize);
1647 1649
1648 /* Too small or invalid header? */ 1650 /* Too small or invalid header? */
1649 if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len) 1651 if (ipx_pktsize < sizeof(struct ipxhdr) ||
1652 !pskb_may_pull(skb, ipx_pktsize))
1650 goto drop; 1653 goto drop;
1651 1654
1655 ipx = ipx_hdr(skb);
1652 if (ipx->ipx_checksum != IPX_NO_CHECKSUM && 1656 if (ipx->ipx_checksum != IPX_NO_CHECKSUM &&
1653 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) 1657 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize))
1654 goto drop; 1658 goto drop;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index d504eed416f6..7e6bc41eeb21 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -238,11 +238,13 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
238 goto out_put; 238 goto out_put;
239 239
240 if (lapb->state == LAPB_STATE_0) { 240 if (lapb->state == LAPB_STATE_0) {
241 if (((parms->mode & LAPB_EXTENDED) && 241 if (parms->mode & LAPB_EXTENDED) {
242 (parms->window < 1 || parms->window > 127)) || 242 if (parms->window < 1 || parms->window > 127)
243 (parms->window < 1 || parms->window > 7)) 243 goto out_put;
244 goto out_put; 244 } else {
245 245 if (parms->window < 1 || parms->window > 7)
246 goto out_put;
247 }
246 lapb->mode = parms->mode; 248 lapb->mode = parms->mode;
247 lapb->window = parms->window; 249 lapb->window = parms->window;
248 } 250 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index d6cfe84d521b..2652ead96c64 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -784,24 +784,20 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
784 copied += used; 784 copied += used;
785 len -= used; 785 len -= used;
786 786
787 if (used + offset < skb->len)
788 continue;
789
790 if (!(flags & MSG_PEEK)) { 787 if (!(flags & MSG_PEEK)) {
791 sk_eat_skb(sk, skb, 0); 788 sk_eat_skb(sk, skb, 0);
792 *seq = 0; 789 *seq = 0;
793 } 790 }
791
792 /* For non stream protcols we get one packet per recvmsg call */
793 if (sk->sk_type != SOCK_STREAM)
794 goto copy_uaddr;
795
796 /* Partial read */
797 if (used + offset < skb->len)
798 continue;
794 } while (len > 0); 799 } while (len > 0);
795 800
796 /*
797 * According to UNIX98, msg_name/msg_namelen are ignored
798 * on connected socket. -ANK
799 * But... af_llc still doesn't have separate sets of methods for
800 * SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will
801 * eventually fix this tho :-) -acme
802 */
803 if (sk->sk_type == SOCK_DGRAM)
804 goto copy_uaddr;
805out: 801out:
806 release_sock(sk); 802 release_sock(sk);
807 return copied; 803 return copied;
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 20c4eb5c1ac6..61cb8cf7d153 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -51,10 +51,10 @@ void llc_save_primitive(struct sock *sk, struct sk_buff* skb, u8 prim)
51{ 51{
52 struct sockaddr_llc *addr; 52 struct sockaddr_llc *addr;
53 53
54 if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */
55 return;
56 /* save primitive for use by the user. */ 54 /* save primitive for use by the user. */
57 addr = llc_ui_skb_cb(skb); 55 addr = llc_ui_skb_cb(skb);
56
57 memset(addr, 0, sizeof(*addr));
58 addr->sllc_family = sk->sk_family; 58 addr->sllc_family = sk->sk_family;
59 addr->sllc_arphrd = skb->dev->type; 59 addr->sllc_arphrd = skb->dev->type;
60 addr->sllc_test = prim == LLC_TEST_PRIM; 60 addr->sllc_test = prim == LLC_TEST_PRIM;
@@ -330,6 +330,9 @@ static void llc_sap_mcast(struct llc_sap *sap,
330 if (llc->laddr.lsap != laddr->lsap) 330 if (llc->laddr.lsap != laddr->lsap)
331 continue; 331 continue;
332 332
333 if (llc->dev != skb->dev)
334 continue;
335
333 skb1 = skb_clone(skb, GFP_ATOMIC); 336 skb1 = skb_clone(skb, GFP_ATOMIC);
334 if (!skb1) 337 if (!skb1)
335 break; 338 break;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index af4845971f70..6527d4e048d8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -429,9 +429,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
429 cb->args[0], *id); 429 cb->args[0], *id);
430 430
431 read_lock_bh(&nf_conntrack_lock); 431 read_lock_bh(&nf_conntrack_lock);
432 last = (struct nf_conn *)cb->args[1];
432 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 433 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
433restart: 434restart:
434 last = (struct nf_conn *)cb->args[1];
435 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { 435 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) {
436 h = (struct nf_conntrack_tuple_hash *) i; 436 h = (struct nf_conntrack_tuple_hash *) i;
437 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) 437 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
@@ -442,13 +442,10 @@ restart:
442 * then dump everything. */ 442 * then dump everything. */
443 if (l3proto && L3PROTO(ct) != l3proto) 443 if (l3proto && L3PROTO(ct) != l3proto)
444 continue; 444 continue;
445 if (last != NULL) { 445 if (cb->args[1]) {
446 if (ct == last) { 446 if (ct != last)
447 nf_ct_put(last);
448 cb->args[1] = 0;
449 last = NULL;
450 } else
451 continue; 447 continue;
448 cb->args[1] = 0;
452 } 449 }
453 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 450 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
454 cb->nlh->nlmsg_seq, 451 cb->nlh->nlmsg_seq,
@@ -459,17 +456,17 @@ restart:
459 goto out; 456 goto out;
460 } 457 }
461 } 458 }
462 if (last != NULL) { 459 if (cb->args[1]) {
463 nf_ct_put(last);
464 cb->args[1] = 0; 460 cb->args[1] = 0;
465 goto restart; 461 goto restart;
466 } 462 }
467 } 463 }
468out: 464out:
469 read_unlock_bh(&nf_conntrack_lock); 465 read_unlock_bh(&nf_conntrack_lock);
466 if (last)
467 nf_ct_put(last);
470 468
471 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); 469 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
472
473 return skb->len; 470 return skb->len;
474} 471}
475 472
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 61cdda4e5d3b..b59d3b2bde21 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -366,6 +366,9 @@ __nfulnl_send(struct nfulnl_instance *inst)
366 if (timer_pending(&inst->timer)) 366 if (timer_pending(&inst->timer))
367 del_timer(&inst->timer); 367 del_timer(&inst->timer);
368 368
369 if (!inst->skb)
370 return 0;
371
369 if (inst->qlen > 1) 372 if (inst->qlen > 1)
370 inst->lastnlh->nlmsg_type = NLMSG_DONE; 373 inst->lastnlh->nlmsg_type = NLMSG_DONE;
371 374
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index c2ce9c4011cc..de9537ad9a7c 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -57,6 +57,8 @@ static int checkentry_selinux(struct xt_secmark_target_info *info)
57{ 57{
58 int err; 58 int err;
59 struct xt_secmark_target_selinux_info *sel = &info->u.sel; 59 struct xt_secmark_target_selinux_info *sel = &info->u.sel;
60
61 sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0';
60 62
61 err = selinux_string_to_sid(sel->selctx, &sel->selsid); 63 err = selinux_string_to_sid(sel->selctx, &sel->selsid);
62 if (err) { 64 if (err) {
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index a9f4f6f3c628..63a965467465 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/netfilter_bridge.h>
13#include <linux/netfilter/xt_physdev.h> 14#include <linux/netfilter/xt_physdev.h>
14#include <linux/netfilter/x_tables.h> 15#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter_bridge.h> 16#include <linux/netfilter_bridge.h>
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index 0ebb6ac2c8c7..275330fcdaaa 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -37,7 +37,7 @@ static int match(const struct sk_buff *skb,
37 37
38 return (skb_find_text((struct sk_buff *)skb, conf->from_offset, 38 return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
39 conf->to_offset, conf->config, &state) 39 conf->to_offset, conf->config, &state)
40 != UINT_MAX) && !conf->invert; 40 != UINT_MAX) ^ conf->invert;
41} 41}
42 42
43#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m) 43#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m)
@@ -55,7 +55,10 @@ static int checkentry(const char *tablename,
55 /* Damn, can't handle this case properly with iptables... */ 55 /* Damn, can't handle this case properly with iptables... */
56 if (conf->from_offset > conf->to_offset) 56 if (conf->from_offset > conf->to_offset)
57 return 0; 57 return 0;
58 58 if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0')
59 return 0;
60 if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
61 return 0;
59 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, 62 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
60 GFP_KERNEL, TS_AUTOLOAD); 63 GFP_KERNEL, TS_AUTOLOAD);
61 if (IS_ERR(ts_conf)) 64 if (IS_ERR(ts_conf))
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b85c1f9f1288..8b85036ba8e3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1273,8 +1273,7 @@ netlink_kernel_create(int unit, unsigned int groups,
1273 struct netlink_sock *nlk; 1273 struct netlink_sock *nlk;
1274 unsigned long *listeners = NULL; 1274 unsigned long *listeners = NULL;
1275 1275
1276 if (!nl_table) 1276 BUG_ON(!nl_table);
1277 return NULL;
1278 1277
1279 if (unit<0 || unit>=MAX_LINKS) 1278 if (unit<0 || unit>=MAX_LINKS)
1280 return NULL; 1279 return NULL;
@@ -1745,11 +1744,8 @@ static int __init netlink_proto_init(void)
1745 netlink_skb_parms_too_large(); 1744 netlink_skb_parms_too_large();
1746 1745
1747 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 1746 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1748 if (!nl_table) { 1747 if (!nl_table)
1749enomem: 1748 goto panic;
1750 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1751 return -ENOMEM;
1752 }
1753 1749
1754 if (num_physpages >= (128 * 1024)) 1750 if (num_physpages >= (128 * 1024))
1755 max = num_physpages >> (21 - PAGE_SHIFT); 1751 max = num_physpages >> (21 - PAGE_SHIFT);
@@ -1769,7 +1765,7 @@ enomem:
1769 nl_pid_hash_free(nl_table[i].hash.table, 1765 nl_pid_hash_free(nl_table[i].hash.table,
1770 1 * sizeof(*hash->table)); 1766 1 * sizeof(*hash->table));
1771 kfree(nl_table); 1767 kfree(nl_table);
1772 goto enomem; 1768 goto panic;
1773 } 1769 }
1774 memset(hash->table, 0, 1 * sizeof(*hash->table)); 1770 memset(hash->table, 0, 1 * sizeof(*hash->table));
1775 hash->max_shift = order; 1771 hash->max_shift = order;
@@ -1786,6 +1782,8 @@ enomem:
1786 rtnetlink_init(); 1782 rtnetlink_init();
1787out: 1783out:
1788 return err; 1784 return err;
1785panic:
1786 panic("netlink_init: Cannot allocate nl_table\n");
1789} 1787}
1790 1788
1791core_initcall(netlink_proto_init); 1789core_initcall(netlink_proto_init);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index eea366966740..0a6cfa0005be 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -796,7 +796,7 @@ static int __init init_u32(void)
796{ 796{
797 printk("u32 classifier\n"); 797 printk("u32 classifier\n");
798#ifdef CONFIG_CLS_U32_PERF 798#ifdef CONFIG_CLS_U32_PERF
799 printk(" Perfomance counters on\n"); 799 printk(" Performance counters on\n");
800#endif 800#endif
801#ifdef CONFIG_NET_CLS_POLICE 801#ifdef CONFIG_NET_CLS_POLICE
802 printk(" OLD policer on \n"); 802 printk(" OLD policer on \n");
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c7844bacbbcb..a19eff12cf78 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -430,7 +430,7 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
430 } 430 }
431#endif 431#endif
432 432
433 err = -EINVAL; 433 err = -ENOENT;
434 if (ops == NULL) 434 if (ops == NULL)
435 goto err_out; 435 goto err_out;
436 436
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 4f11f5858209..17b509282cf2 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -806,38 +806,26 @@ no_mem:
806 806
807/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ 807/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
808struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, 808struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
809 const struct sctp_chunk *chunk, 809 const struct msghdr *msg,
810 const struct msghdr *msg) 810 size_t paylen)
811{ 811{
812 struct sctp_chunk *retval; 812 struct sctp_chunk *retval;
813 void *payload = NULL, *payoff; 813 void *payload = NULL;
814 size_t paylen = 0; 814 int err;
815 struct iovec *iov = NULL;
816 int iovlen = 0;
817
818 if (msg) {
819 iov = msg->msg_iov;
820 iovlen = msg->msg_iovlen;
821 paylen = get_user_iov_size(iov, iovlen);
822 }
823 815
824 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen); 816 retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen);
825 if (!retval) 817 if (!retval)
826 goto err_chunk; 818 goto err_chunk;
827 819
828 if (paylen) { 820 if (paylen) {
829 /* Put the msg_iov together into payload. */ 821 /* Put the msg_iov together into payload. */
830 payload = kmalloc(paylen, GFP_ATOMIC); 822 payload = kmalloc(paylen, GFP_KERNEL);
831 if (!payload) 823 if (!payload)
832 goto err_payload; 824 goto err_payload;
833 payoff = payload;
834 825
835 for (; iovlen > 0; --iovlen) { 826 err = memcpy_fromiovec(payload, msg->msg_iov, paylen);
836 if (copy_from_user(payoff, iov->iov_base,iov->iov_len)) 827 if (err < 0)
837 goto err_copy; 828 goto err_copy;
838 payoff += iov->iov_len;
839 iov++;
840 }
841 } 829 }
842 830
843 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); 831 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index ead3f1b0ea3d..5b5ae7958322 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4031,18 +4031,12 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
4031 * from its upper layer, but retransmits data to the far end 4031 * from its upper layer, but retransmits data to the far end
4032 * if necessary to fill gaps. 4032 * if necessary to fill gaps.
4033 */ 4033 */
4034 struct msghdr *msg = arg; 4034 struct sctp_chunk *abort = arg;
4035 struct sctp_chunk *abort;
4036 sctp_disposition_t retval; 4035 sctp_disposition_t retval;
4037 4036
4038 retval = SCTP_DISPOSITION_CONSUME; 4037 retval = SCTP_DISPOSITION_CONSUME;
4039 4038
4040 /* Generate ABORT chunk to send the peer. */ 4039 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4041 abort = sctp_make_abort_user(asoc, NULL, msg);
4042 if (!abort)
4043 retval = SCTP_DISPOSITION_NOMEM;
4044 else
4045 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4046 4040
4047 /* Even if we can't send the ABORT due to low memory delete the 4041 /* Even if we can't send the ABORT due to low memory delete the
4048 * TCB. This is a departure from our typical NOMEM handling. 4042 * TCB. This is a departure from our typical NOMEM handling.
@@ -4166,8 +4160,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4166 void *arg, 4160 void *arg,
4167 sctp_cmd_seq_t *commands) 4161 sctp_cmd_seq_t *commands)
4168{ 4162{
4169 struct msghdr *msg = arg; 4163 struct sctp_chunk *abort = arg;
4170 struct sctp_chunk *abort;
4171 sctp_disposition_t retval; 4164 sctp_disposition_t retval;
4172 4165
4173 /* Stop T1-init timer */ 4166 /* Stop T1-init timer */
@@ -4175,12 +4168,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4175 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 4168 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
4176 retval = SCTP_DISPOSITION_CONSUME; 4169 retval = SCTP_DISPOSITION_CONSUME;
4177 4170
4178 /* Generate ABORT chunk to send the peer */ 4171 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4179 abort = sctp_make_abort_user(asoc, NULL, msg);
4180 if (!abort)
4181 retval = SCTP_DISPOSITION_NOMEM;
4182 else
4183 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4184 4172
4185 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 4173 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
4186 SCTP_STATE(SCTP_STATE_CLOSED)); 4174 SCTP_STATE(SCTP_STATE_CLOSED));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 54722e622e6d..dab15949958e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1289,9 +1289,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1289 } 1289 }
1290 } 1290 }
1291 1291
1292 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) 1292 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1293 sctp_primitive_ABORT(asoc, NULL); 1293 struct sctp_chunk *chunk;
1294 else 1294
1295 chunk = sctp_make_abort_user(asoc, NULL, 0);
1296 if (chunk)
1297 sctp_primitive_ABORT(asoc, chunk);
1298 } else
1295 sctp_primitive_SHUTDOWN(asoc, NULL); 1299 sctp_primitive_SHUTDOWN(asoc, NULL);
1296 } 1300 }
1297 1301
@@ -1520,8 +1524,16 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1520 goto out_unlock; 1524 goto out_unlock;
1521 } 1525 }
1522 if (sinfo_flags & SCTP_ABORT) { 1526 if (sinfo_flags & SCTP_ABORT) {
1527 struct sctp_chunk *chunk;
1528
1529 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1530 if (!chunk) {
1531 err = -ENOMEM;
1532 goto out_unlock;
1533 }
1534
1523 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); 1535 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
1524 sctp_primitive_ABORT(asoc, msg); 1536 sctp_primitive_ABORT(asoc, chunk);
1525 err = 0; 1537 err = 0;
1526 goto out_unlock; 1538 goto out_unlock;
1527 } 1539 }
diff --git a/net/socket.c b/net/socket.c
index b4848ce0d6ac..6d261bf206fc 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1178,7 +1178,8 @@ static int __sock_create(int family, int type, int protocol, struct socket **res
1178 */ 1178 */
1179 1179
1180 if (!(sock = sock_alloc())) { 1180 if (!(sock = sock_alloc())) {
1181 printk(KERN_WARNING "socket: no more sockets\n"); 1181 if (net_ratelimit())
1182 printk(KERN_WARNING "socket: no more sockets\n");
1182 err = -ENFILE; /* Not exactly a match, but its the 1183 err = -ENFILE; /* Not exactly a match, but its the
1183 closest posix thing */ 1184 closest posix thing */
1184 goto out; 1185 goto out;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 4a9aa9393b97..ef1cf5b476c8 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -718,8 +718,7 @@ gss_destroy(struct rpc_auth *auth)
718 auth, auth->au_flavor); 718 auth, auth->au_flavor);
719 719
720 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 720 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
721 rpc_unlink(gss_auth->path); 721 rpc_unlink(gss_auth->dentry);
722 dput(gss_auth->dentry);
723 gss_auth->dentry = NULL; 722 gss_auth->dentry = NULL;
724 gss_mech_put(gss_auth->mech); 723 gss_mech_put(gss_auth->mech);
725 724
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 7026b0866b7b..00cb388ece03 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -71,7 +71,12 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
71 new = detail->alloc(); 71 new = detail->alloc();
72 if (!new) 72 if (!new)
73 return NULL; 73 return NULL;
74 /* must fully initialise 'new', else
75 * we might get lose if we need to
76 * cache_put it soon.
77 */
74 cache_init(new); 78 cache_init(new);
79 detail->init(new, key);
75 80
76 write_lock(&detail->hash_lock); 81 write_lock(&detail->hash_lock);
77 82
@@ -85,7 +90,6 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
85 return tmp; 90 return tmp;
86 } 91 }
87 } 92 }
88 detail->init(new, key);
89 new->next = *head; 93 new->next = *head;
90 *head = new; 94 *head = new;
91 detail->entries++; 95 detail->entries++;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 4ba271f892c8..3e19d321067a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -183,8 +183,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
183 183
184out_no_auth: 184out_no_auth:
185 if (!IS_ERR(clnt->cl_dentry)) { 185 if (!IS_ERR(clnt->cl_dentry)) {
186 rpc_rmdir(clnt->cl_pathname); 186 rpc_rmdir(clnt->cl_dentry);
187 dput(clnt->cl_dentry);
188 rpc_put_mount(); 187 rpc_put_mount();
189 } 188 }
190out_no_path: 189out_no_path:
@@ -251,10 +250,8 @@ rpc_clone_client(struct rpc_clnt *clnt)
251 new->cl_autobind = 0; 250 new->cl_autobind = 0;
252 new->cl_oneshot = 0; 251 new->cl_oneshot = 0;
253 new->cl_dead = 0; 252 new->cl_dead = 0;
254 if (!IS_ERR(new->cl_dentry)) { 253 if (!IS_ERR(new->cl_dentry))
255 dget(new->cl_dentry); 254 dget(new->cl_dentry);
256 rpc_get_mount();
257 }
258 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 255 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
259 if (new->cl_auth) 256 if (new->cl_auth)
260 atomic_inc(&new->cl_auth->au_count); 257 atomic_inc(&new->cl_auth->au_count);
@@ -317,11 +314,15 @@ rpc_destroy_client(struct rpc_clnt *clnt)
317 clnt->cl_auth = NULL; 314 clnt->cl_auth = NULL;
318 } 315 }
319 if (clnt->cl_parent != clnt) { 316 if (clnt->cl_parent != clnt) {
317 if (!IS_ERR(clnt->cl_dentry))
318 dput(clnt->cl_dentry);
320 rpc_destroy_client(clnt->cl_parent); 319 rpc_destroy_client(clnt->cl_parent);
321 goto out_free; 320 goto out_free;
322 } 321 }
323 if (clnt->cl_pathname[0]) 322 if (!IS_ERR(clnt->cl_dentry)) {
324 rpc_rmdir(clnt->cl_pathname); 323 rpc_rmdir(clnt->cl_dentry);
324 rpc_put_mount();
325 }
325 if (clnt->cl_xprt) { 326 if (clnt->cl_xprt) {
326 xprt_destroy(clnt->cl_xprt); 327 xprt_destroy(clnt->cl_xprt);
327 clnt->cl_xprt = NULL; 328 clnt->cl_xprt = NULL;
@@ -331,10 +332,6 @@ rpc_destroy_client(struct rpc_clnt *clnt)
331out_free: 332out_free:
332 rpc_free_iostats(clnt->cl_metrics); 333 rpc_free_iostats(clnt->cl_metrics);
333 clnt->cl_metrics = NULL; 334 clnt->cl_metrics = NULL;
334 if (!IS_ERR(clnt->cl_dentry)) {
335 dput(clnt->cl_dentry);
336 rpc_put_mount();
337 }
338 kfree(clnt); 335 kfree(clnt);
339 return 0; 336 return 0;
340} 337}
@@ -921,26 +918,43 @@ call_transmit(struct rpc_task *task)
921 task->tk_status = xprt_prepare_transmit(task); 918 task->tk_status = xprt_prepare_transmit(task);
922 if (task->tk_status != 0) 919 if (task->tk_status != 0)
923 return; 920 return;
921 task->tk_action = call_transmit_status;
924 /* Encode here so that rpcsec_gss can use correct sequence number. */ 922 /* Encode here so that rpcsec_gss can use correct sequence number. */
925 if (rpc_task_need_encode(task)) { 923 if (rpc_task_need_encode(task)) {
926 task->tk_rqstp->rq_bytes_sent = 0; 924 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
927 call_encode(task); 925 call_encode(task);
928 /* Did the encode result in an error condition? */ 926 /* Did the encode result in an error condition? */
929 if (task->tk_status != 0) 927 if (task->tk_status != 0)
930 goto out_nosend; 928 return;
931 } 929 }
932 task->tk_action = call_transmit_status;
933 xprt_transmit(task); 930 xprt_transmit(task);
934 if (task->tk_status < 0) 931 if (task->tk_status < 0)
935 return; 932 return;
936 if (!task->tk_msg.rpc_proc->p_decode) { 933 /*
937 task->tk_action = rpc_exit_task; 934 * On success, ensure that we call xprt_end_transmit() before sleeping
938 rpc_wake_up_task(task); 935 * in order to allow access to the socket to other RPC requests.
939 } 936 */
940 return; 937 call_transmit_status(task);
941out_nosend: 938 if (task->tk_msg.rpc_proc->p_decode != NULL)
942 /* release socket write lock before attempting to handle error */ 939 return;
943 xprt_abort_transmit(task); 940 task->tk_action = rpc_exit_task;
941 rpc_wake_up_task(task);
942}
943
944/*
945 * 5a. Handle cleanup after a transmission
946 */
947static void
948call_transmit_status(struct rpc_task *task)
949{
950 task->tk_action = call_status;
951 /*
952 * Special case: if we've been waiting on the socket's write_space()
953 * callback, then don't call xprt_end_transmit().
954 */
955 if (task->tk_status == -EAGAIN)
956 return;
957 xprt_end_transmit(task);
944 rpc_task_force_reencode(task); 958 rpc_task_force_reencode(task);
945} 959}
946 960
@@ -992,18 +1006,7 @@ call_status(struct rpc_task *task)
992} 1006}
993 1007
994/* 1008/*
995 * 6a. Handle transmission errors. 1009 * 6a. Handle RPC timeout
996 */
997static void
998call_transmit_status(struct rpc_task *task)
999{
1000 if (task->tk_status != -EAGAIN)
1001 rpc_task_force_reencode(task);
1002 call_status(task);
1003}
1004
1005/*
1006 * 6b. Handle RPC timeout
1007 * We do not release the request slot, so we keep using the 1010 * We do not release the request slot, so we keep using the
1008 * same XID for all retransmits. 1011 * same XID for all retransmits.
1009 */ 1012 */
@@ -1178,6 +1181,17 @@ call_verify(struct rpc_task *task)
1178 u32 *p = iov->iov_base, n; 1181 u32 *p = iov->iov_base, n;
1179 int error = -EACCES; 1182 int error = -EACCES;
1180 1183
1184 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1185 /* RFC-1014 says that the representation of XDR data must be a
1186 * multiple of four bytes
1187 * - if it isn't pointer subtraction in the NFS client may give
1188 * undefined results
1189 */
1190 printk(KERN_WARNING
1191 "call_verify: XDR representation not a multiple of"
1192 " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len);
1193 goto out_eio;
1194 }
1181 if ((len -= 3) < 0) 1195 if ((len -= 3) < 0)
1182 goto out_overflow; 1196 goto out_overflow;
1183 p += 1; /* skip XID */ 1197 p += 1; /* skip XID */
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index dc6cb93c8830..0b1a1ac8a4bc 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -539,6 +539,7 @@ repeat:
539 rpc_close_pipes(dentry->d_inode); 539 rpc_close_pipes(dentry->d_inode);
540 simple_unlink(dir, dentry); 540 simple_unlink(dir, dentry);
541 } 541 }
542 inode_dir_notify(dir, DN_DELETE);
542 dput(dentry); 543 dput(dentry);
543 } while (n); 544 } while (n);
544 goto repeat; 545 goto repeat;
@@ -610,8 +611,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
610 int error; 611 int error;
611 612
612 shrink_dcache_parent(dentry); 613 shrink_dcache_parent(dentry);
613 if (dentry->d_inode) 614 if (d_unhashed(dentry))
614 rpc_close_pipes(dentry->d_inode); 615 return 0;
615 if ((error = simple_rmdir(dir, dentry)) != 0) 616 if ((error = simple_rmdir(dir, dentry)) != 0)
616 return error; 617 return error;
617 if (!error) { 618 if (!error) {
@@ -667,10 +668,11 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
667 RPCAUTH_info, RPCAUTH_EOF); 668 RPCAUTH_info, RPCAUTH_EOF);
668 if (error) 669 if (error)
669 goto err_depopulate; 670 goto err_depopulate;
671 dget(dentry);
670out: 672out:
671 mutex_unlock(&dir->i_mutex); 673 mutex_unlock(&dir->i_mutex);
672 rpc_release_path(&nd); 674 rpc_release_path(&nd);
673 return dget(dentry); 675 return dentry;
674err_depopulate: 676err_depopulate:
675 rpc_depopulate(dentry); 677 rpc_depopulate(dentry);
676 __rpc_rmdir(dir, dentry); 678 __rpc_rmdir(dir, dentry);
@@ -683,28 +685,20 @@ err_dput:
683} 685}
684 686
685int 687int
686rpc_rmdir(char *path) 688rpc_rmdir(struct dentry *dentry)
687{ 689{
688 struct nameidata nd; 690 struct dentry *parent;
689 struct dentry *dentry;
690 struct inode *dir; 691 struct inode *dir;
691 int error; 692 int error;
692 693
693 if ((error = rpc_lookup_parent(path, &nd)) != 0) 694 parent = dget_parent(dentry);
694 return error; 695 dir = parent->d_inode;
695 dir = nd.dentry->d_inode;
696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
697 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
698 if (IS_ERR(dentry)) {
699 error = PTR_ERR(dentry);
700 goto out_release;
701 }
702 rpc_depopulate(dentry); 697 rpc_depopulate(dentry);
703 error = __rpc_rmdir(dir, dentry); 698 error = __rpc_rmdir(dir, dentry);
704 dput(dentry); 699 dput(dentry);
705out_release:
706 mutex_unlock(&dir->i_mutex); 700 mutex_unlock(&dir->i_mutex);
707 rpc_release_path(&nd); 701 dput(parent);
708 return error; 702 return error;
709} 703}
710 704
@@ -731,10 +725,11 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
731 rpci->flags = flags; 725 rpci->flags = flags;
732 rpci->ops = ops; 726 rpci->ops = ops;
733 inode_dir_notify(dir, DN_CREATE); 727 inode_dir_notify(dir, DN_CREATE);
728 dget(dentry);
734out: 729out:
735 mutex_unlock(&dir->i_mutex); 730 mutex_unlock(&dir->i_mutex);
736 rpc_release_path(&nd); 731 rpc_release_path(&nd);
737 return dget(dentry); 732 return dentry;
738err_dput: 733err_dput:
739 dput(dentry); 734 dput(dentry);
740 dentry = ERR_PTR(-ENOMEM); 735 dentry = ERR_PTR(-ENOMEM);
@@ -744,32 +739,26 @@ err_dput:
744} 739}
745 740
746int 741int
747rpc_unlink(char *path) 742rpc_unlink(struct dentry *dentry)
748{ 743{
749 struct nameidata nd; 744 struct dentry *parent;
750 struct dentry *dentry;
751 struct inode *dir; 745 struct inode *dir;
752 int error; 746 int error = 0;
753 747
754 if ((error = rpc_lookup_parent(path, &nd)) != 0) 748 parent = dget_parent(dentry);
755 return error; 749 dir = parent->d_inode;
756 dir = nd.dentry->d_inode;
757 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 750 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
758 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 751 if (!d_unhashed(dentry)) {
759 if (IS_ERR(dentry)) { 752 d_drop(dentry);
760 error = PTR_ERR(dentry); 753 if (dentry->d_inode) {
761 goto out_release; 754 rpc_close_pipes(dentry->d_inode);
762 } 755 error = simple_unlink(dir, dentry);
763 d_drop(dentry); 756 }
764 if (dentry->d_inode) { 757 inode_dir_notify(dir, DN_DELETE);
765 rpc_close_pipes(dentry->d_inode);
766 error = simple_unlink(dir, dentry);
767 } 758 }
768 dput(dentry); 759 dput(dentry);
769 inode_dir_notify(dir, DN_DELETE);
770out_release:
771 mutex_unlock(&dir->i_mutex); 760 mutex_unlock(&dir->i_mutex);
772 rpc_release_path(&nd); 761 dput(parent);
773 return error; 762 return error;
774} 763}
775 764
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 313b68d892c6..e8c2bc4977f3 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -707,12 +707,9 @@ out_unlock:
707 return err; 707 return err;
708} 708}
709 709
710void 710void xprt_end_transmit(struct rpc_task *task)
711xprt_abort_transmit(struct rpc_task *task)
712{ 711{
713 struct rpc_xprt *xprt = task->tk_xprt; 712 xprt_release_write(task->tk_xprt, task);
714
715 xprt_release_write(xprt, task);
716} 713}
717 714
718/** 715/**
@@ -761,8 +758,6 @@ void xprt_transmit(struct rpc_task *task)
761 task->tk_status = -ENOTCONN; 758 task->tk_status = -ENOTCONN;
762 else if (!req->rq_received) 759 else if (!req->rq_received)
763 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 760 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
764
765 xprt->ops->release_xprt(xprt, task);
766 spin_unlock_bh(&xprt->transport_lock); 761 spin_unlock_bh(&xprt->transport_lock);
767 return; 762 return;
768 } 763 }
@@ -772,18 +767,8 @@ void xprt_transmit(struct rpc_task *task)
772 * schedq, and being picked up by a parallel run of rpciod(). 767 * schedq, and being picked up by a parallel run of rpciod().
773 */ 768 */
774 task->tk_status = status; 769 task->tk_status = status;
775 770 if (status == -ECONNREFUSED)
776 switch (status) {
777 case -ECONNREFUSED:
778 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 771 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
779 case -EAGAIN:
780 case -ENOTCONN:
781 return;
782 default:
783 break;
784 }
785 xprt_release_write(xprt, task);
786 return;
787} 772}
788 773
789static inline void do_xprt_reserve(struct rpc_task *task) 774static inline void do_xprt_reserve(struct rpc_task *task)
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ee678ed13b6f..441bd53f5eca 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -414,6 +414,33 @@ static int xs_tcp_send_request(struct rpc_task *task)
414} 414}
415 415
416/** 416/**
417 * xs_tcp_release_xprt - clean up after a tcp transmission
418 * @xprt: transport
419 * @task: rpc task
420 *
421 * This cleans up if an error causes us to abort the transmission of a request.
422 * In this case, the socket may need to be reset in order to avoid confusing
423 * the server.
424 */
425static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
426{
427 struct rpc_rqst *req;
428
429 if (task != xprt->snd_task)
430 return;
431 if (task == NULL)
432 goto out_release;
433 req = task->tk_rqstp;
434 if (req->rq_bytes_sent == 0)
435 goto out_release;
436 if (req->rq_bytes_sent == req->rq_snd_buf.len)
437 goto out_release;
438 set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
439out_release:
440 xprt_release_xprt(xprt, task);
441}
442
443/**
417 * xs_close - close a socket 444 * xs_close - close a socket
418 * @xprt: transport 445 * @xprt: transport
419 * 446 *
@@ -1250,7 +1277,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
1250 1277
1251static struct rpc_xprt_ops xs_tcp_ops = { 1278static struct rpc_xprt_ops xs_tcp_ops = {
1252 .reserve_xprt = xprt_reserve_xprt, 1279 .reserve_xprt = xprt_reserve_xprt,
1253 .release_xprt = xprt_release_xprt, 1280 .release_xprt = xs_tcp_release_xprt,
1254 .set_port = xs_set_port, 1281 .set_port = xs_set_port,
1255 .connect = xs_connect, 1282 .connect = xs_connect,
1256 .buf_alloc = rpc_malloc, 1283 .buf_alloc = rpc_malloc,
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 6f2909279268..de6ec519272e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -128,23 +128,17 @@ static atomic_t unix_nr_socks = ATOMIC_INIT(0);
128#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 128#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
129 129
130#ifdef CONFIG_SECURITY_NETWORK 130#ifdef CONFIG_SECURITY_NETWORK
131static void unix_get_peersec_dgram(struct sk_buff *skb) 131static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
132{ 132{
133 int err; 133 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
134
135 err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb),
136 UNIXSECLEN(skb));
137 if (err)
138 *(UNIXSECDATA(skb)) = NULL;
139} 134}
140 135
141static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 136static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142{ 137{
143 scm->secdata = *UNIXSECDATA(skb); 138 scm->secid = *UNIXSID(skb);
144 scm->seclen = *UNIXSECLEN(skb);
145} 139}
146#else 140#else
147static inline void unix_get_peersec_dgram(struct sk_buff *skb) 141static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
148{ } 142{ }
149 143
150static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 144static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -1322,8 +1316,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1322 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1316 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1323 if (siocb->scm->fp) 1317 if (siocb->scm->fp)
1324 unix_attach_fds(siocb->scm, skb); 1318 unix_attach_fds(siocb->scm, skb);
1325 1319 unix_get_secdata(siocb->scm, skb);
1326 unix_get_peersec_dgram(skb);
1327 1320
1328 skb->h.raw = skb->data; 1321 skb->h.raw = skb->data;
1329 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); 1322 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f35bc676128c..3da67ca2c3ce 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1134,12 +1134,33 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1134} 1134}
1135EXPORT_SYMBOL(__xfrm_route_forward); 1135EXPORT_SYMBOL(__xfrm_route_forward);
1136 1136
1137/* Optimize later using cookies and generation ids. */
1138
1137static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 1139static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1138{ 1140{
1139 /* If it is marked obsolete, which is how we even get here, 1141 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
1140 * then we have purged it from the policy bundle list and we 1142 * to "-1" to force all XFRM destinations to get validated by
1141 * did that for a good reason. 1143 * dst_ops->check on every use. We do this because when a
1144 * normal route referenced by an XFRM dst is obsoleted we do
1145 * not go looking around for all parent referencing XFRM dsts
1146 * so that we can invalidate them. It is just too much work.
1147 * Instead we make the checks here on every use. For example:
1148 *
1149 * XFRM dst A --> IPv4 dst X
1150 *
1151 * X is the "xdst->route" of A (X is also the "dst->path" of A
1152 * in this example). If X is marked obsolete, "A" will not
1153 * notice. That's what we are validating here via the
1154 * stale_bundle() check.
1155 *
1156 * When a policy's bundle is pruned, we dst_free() the XFRM
1157 * dst which causes it's ->obsolete field to be set to a
1158 * positive non-zero integer. If an XFRM dst has been pruned
1159 * like this, we want to force a new route lookup.
1142 */ 1160 */
1161 if (dst->obsolete < 0 && !stale_bundle(dst))
1162 return dst;
1163
1143 return NULL; 1164 return NULL;
1144} 1165}
1145 1166