aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor@insightbb.com>2006-09-19 01:56:44 -0400
committerDmitry Torokhov <dtor@insightbb.com>2006-09-19 01:56:44 -0400
commit0612ec48762bf8712db1925b2e67246d2237ebab (patch)
tree01b0d69c9c9915015c0f23ad4263646dd5413e99 /net
parent4263cf0fac28122c8381b6f4f9441a43cd93c81f (diff)
parent47a5c6fa0e204a2b63309c648bb2fde36836c826 (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c11
-rw-r--r--net/Kconfig2
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/atm/br2684.c3
-rw-r--r--net/atm/clip.c18
-rw-r--r--net/atm/ipcommon.c17
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/atm/pppoatm.c3
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/atm/resources.c3
-rw-r--r--net/ax25/af_ax25.c17
-rw-r--r--net/ax25/ax25_dev.c4
-rw-r--r--net/ax25/ax25_ds_subr.c8
-rw-r--r--net/ax25/ax25_ds_timer.c4
-rw-r--r--net/ax25/ax25_iface.c18
-rw-r--r--net/ax25/ax25_in.c2
-rw-r--r--net/ax25/sysctl_net_ax25.c4
-rw-r--r--net/bluetooth/cmtp/capi.c4
-rw-r--r--net/bluetooth/cmtp/core.c3
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hidp/Kconfig3
-rw-r--r--net/bluetooth/hidp/core.c3
-rw-r--r--net/bluetooth/l2cap.c18
-rw-r--r--net/bluetooth/rfcomm/core.c28
-rw-r--r--net/bluetooth/rfcomm/tty.c3
-rw-r--r--net/bluetooth/sco.c12
-rw-r--r--net/bridge/br_forward.c12
-rw-r--r--net/bridge/br_if.c7
-rw-r--r--net/bridge/br_ioctl.c7
-rw-r--r--net/bridge/br_netfilter.c7
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/netfilter/ebt_ulog.c3
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c78
-rw-r--r--net/core/dst.c3
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/neighbour.c17
-rw-r--r--net/core/netevent.c69
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c15
-rw-r--r--net/core/skbuff.c132
-rw-r--r--net/core/stream.c16
-rw-r--r--net/core/user_dma.c1
-rw-r--r--net/core/utils.c7
-rw-r--r--net/core/wireless.c24
-rw-r--r--net/dccp/ccids/ccid3.c153
-rw-r--r--net/dccp/ccids/ccid3.h9
-rw-r--r--net/dccp/ccids/lib/loss_interval.c36
-rw-r--r--net/dccp/ccids/lib/loss_interval.h9
-rw-r--r--net/dccp/ccids/lib/packet_history.c168
-rw-r--r--net/dccp/ccids/lib/packet_history.h17
-rw-r--r--net/dccp/ccids/lib/tfrc.h2
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c2
-rw-r--r--net/dccp/dccp.h10
-rw-r--r--net/dccp/feat.h2
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/options.c4
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/dn_dev.c9
-rw-r--r--net/decnet/dn_fib.c3
-rw-r--r--net/decnet/dn_neigh.c3
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/decnet/dn_rules.c6
-rw-r--r--net/decnet/dn_table.c11
-rw-r--r--net/econet/af_econet.c3
-rw-r--r--net/ieee80211/Kconfig1
-rw-r--r--net/ieee80211/ieee80211_crypt.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--net/ieee80211/ieee80211_rx.c4
-rw-r--r--net/ieee80211/ieee80211_tx.c15
-rw-r--r--net/ieee80211/ieee80211_wx.c7
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c31
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c32
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c6
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c36
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c36
-rw-r--r--net/ipv4/ah4.c4
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/esp4.c4
-rw-r--r--net/ipv4/fib_hash.c6
-rw-r--r--net/ipv4/fib_rules.c7
-rw-r--r--net/ipv4/fib_semantics.c27
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/igmp.c50
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/ip_output.c16
-rw-r--r--net/ipv4/ip_sockglue.c9
-rw-r--r--net/ipv4/ipcomp.c6
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/ipmr.c21
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c10
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c3
-rw-r--r--net/ipv4/ipvs/ip_vs_ftp.c27
-rw-r--r--net/ipv4/netfilter/arp_tables.c30
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c17
-rw-r--r--net/ipv4/netfilter/ip_conntrack_sip.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c36
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c5
-rw-r--r--net/ipv4/netfilter/ipt_hashlimit.c14
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c10
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_compound.c448
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_highspeed.c13
-rw-r--r--net/ipv4/tcp_input.c14
-rw-r--r--net/ipv4/tcp_ipv4.c23
-rw-r--r--net/ipv4/tcp_lp.c35
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv4/tcp_probe.c5
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/addrconf.c203
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/exthdrs.c29
-rw-r--r--net/ipv6/icmp.c17
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/ip6_output.c135
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/ipcomp6.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c93
-rw-r--r--net/ipv6/mcast.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c34
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tcp_ipv6.c27
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c140
-rw-r--r--net/ipx/af_ipx.c10
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/ircomm/ircomm_core.c4
-rw-r--r--net/irda/ircomm/ircomm_lmp.c4
-rw-r--r--net/irda/ircomm/ircomm_param.c2
-rw-r--r--net/irda/ircomm/ircomm_tty.c8
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/iriap.c9
-rw-r--r--net/irda/iriap_event.c2
-rw-r--r--net/irda/irias_object.c24
-rw-r--r--net/irda/irlan/irlan_common.c16
-rw-r--r--net/irda/irlan/irlan_provider.c2
-rw-r--r--net/irda/irlap.c8
-rw-r--r--net/irda/irlap_frame.c19
-rw-r--r--net/irda/irlmp.c11
-rw-r--r--net/irda/irnet/irnet_ppp.c3
-rw-r--r--net/irda/irttp.c20
-rw-r--r--net/lapb/lapb_iface.c16
-rw-r--r--net/llc/af_llc.c20
-rw-r--r--net/llc/llc_core.c3
-rw-r--r--net/llc/llc_sap.c7
-rw-r--r--net/netfilter/Kconfig4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c17
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_queue.c9
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/xt_SECMARK.c2
-rw-r--r--net/netfilter/xt_physdev.c16
-rw-r--r--net/netfilter/xt_pkttype.c12
-rw-r--r--net/netfilter/xt_string.c7
-rw-r--r--net/netlink/af_netlink.c27
-rw-r--r--net/netrom/af_netrom.c25
-rw-r--r--net/netrom/nr_timer.c2
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rose/af_rose.c12
-rw-r--r--net/rxrpc/connection.c6
-rw-r--r--net/rxrpc/peer.c3
-rw-r--r--net/rxrpc/transport.c6
-rw-r--r--net/sched/act_api.c31
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c6
-rw-r--r--net/sched/cls_basic.c6
-rw-r--r--net/sched/cls_fw.c6
-rw-r--r--net/sched/cls_route.c9
-rw-r--r--net/sched/cls_rsvp.h9
-rw-r--r--net/sched/cls_tcindex.c12
-rw-r--r--net/sched/cls_u32.c17
-rw-r--r--net/sched/em_meta.c3
-rw-r--r--net/sched/ematch.c3
-rw-r--r--net/sched/estimator.c3
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_hfsc.c3
-rw-r--r--net/sched/sch_htb.c7
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sctp/associola.c27
-rw-r--r--net/sctp/bind_addr.c8
-rw-r--r--net/sctp/endpointola.c11
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/outqueue.c9
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c44
-rw-r--r--net/sctp/sm_sideeffect.c12
-rw-r--r--net/sctp/sm_statefuns.c28
-rw-r--r--net/sctp/socket.c94
-rw-r--r--net/sctp/transport.c9
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c12
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c3
-rw-r--r--net/sunrpc/cache.c6
-rw-r--r--net/sunrpc/clnt.c85
-rw-r--r--net/sunrpc/rpc_pipe.c61
-rw-r--r--net/sunrpc/stats.c7
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svcsock.c3
-rw-r--r--net/sunrpc/xdr.c3
-rw-r--r--net/sunrpc/xprt.c24
-rw-r--r--net/sunrpc/xprtsock.c35
-rw-r--r--net/tipc/bearer.c6
-rw-r--r--net/tipc/cluster.c8
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tipc/name_table.c16
-rw-r--r--net/tipc/net.c5
-rw-r--r--net/tipc/port.c5
-rw-r--r--net/tipc/ref.c2
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/user_reg.c3
-rw-r--r--net/tipc/zone.c3
-rw-r--r--net/unix/af_unix.c20
-rw-r--r--net/wanrouter/af_wanpipe.c9
-rw-r--r--net/wanrouter/wanmain.c9
-rw-r--r--net/xfrm/xfrm_policy.c30
-rw-r--r--net/xfrm/xfrm_state.c3
248 files changed, 2092 insertions, 1941 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 458031bfff55..18fcb9fa518d 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -67,10 +67,6 @@ static struct packet_type vlan_packet_type = {
67 .func = vlan_skb_recv, /* VLAN receive method */ 67 .func = vlan_skb_recv, /* VLAN receive method */
68}; 68};
69 69
70/* Bits of netdev state that are propagated from real device to virtual */
71#define VLAN_LINK_STATE_MASK \
72 ((1<<__LINK_STATE_PRESENT)|(1<<__LINK_STATE_NOCARRIER)|(1<<__LINK_STATE_DORMANT))
73
74/* End of global variables definitions. */ 70/* End of global variables definitions. */
75 71
76/* 72/*
@@ -479,7 +475,9 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
479 new_dev->flags = real_dev->flags; 475 new_dev->flags = real_dev->flags;
480 new_dev->flags &= ~IFF_UP; 476 new_dev->flags &= ~IFF_UP;
481 477
482 new_dev->state = real_dev->state & ~(1<<__LINK_STATE_START); 478 new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
479 (1<<__LINK_STATE_DORMANT))) |
480 (1<<__LINK_STATE_PRESENT);
483 481
484 /* need 4 bytes for extra VLAN header info, 482 /* need 4 bytes for extra VLAN header info,
485 * hope the underlying device can handle it. 483 * hope the underlying device can handle it.
@@ -542,12 +540,11 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
542 * so it cannot "appear" on us. 540 * so it cannot "appear" on us.
543 */ 541 */
544 if (!grp) { /* need to add a new group */ 542 if (!grp) { /* need to add a new group */
545 grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL); 543 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
546 if (!grp) 544 if (!grp)
547 goto out_free_unregister; 545 goto out_free_unregister;
548 546
549 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 547 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
550 memset(grp, 0, sizeof(struct vlan_group));
551 grp->real_dev_ifindex = real_dev->ifindex; 548 grp->real_dev_ifindex = real_dev->ifindex;
552 549
553 hlist_add_head_rcu(&grp->hlist, 550 hlist_add_head_rcu(&grp->hlist,
diff --git a/net/Kconfig b/net/Kconfig
index c6cec5aa5486..4959a4e1e0fe 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -177,7 +177,7 @@ source "net/lapb/Kconfig"
177 177
178config NET_DIVERT 178config NET_DIVERT
179 bool "Frame Diverter (EXPERIMENTAL)" 179 bool "Frame Diverter (EXPERIMENTAL)"
180 depends on EXPERIMENTAL 180 depends on EXPERIMENTAL && BROKEN
181 ---help--- 181 ---help---
182 The Frame Diverter allows you to divert packets from the 182 The Frame Diverter allows you to divert packets from the
183 network, that are not aimed at the interface receiving it (in 183 network, that are not aimed at the interface receiving it (in
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 5ee96d4b40e9..96dc6bb52d14 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -227,12 +227,11 @@ static void atif_drop_device(struct net_device *dev)
227static struct atalk_iface *atif_add_device(struct net_device *dev, 227static struct atalk_iface *atif_add_device(struct net_device *dev,
228 struct atalk_addr *sa) 228 struct atalk_addr *sa)
229{ 229{
230 struct atalk_iface *iface = kmalloc(sizeof(*iface), GFP_KERNEL); 230 struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL);
231 231
232 if (!iface) 232 if (!iface)
233 goto out; 233 goto out;
234 234
235 memset(iface, 0, sizeof(*iface));
236 dev_hold(dev); 235 dev_hold(dev);
237 iface->dev = dev; 236 iface->dev = dev;
238 dev->atalk_ptr = iface; 237 dev->atalk_ptr = iface;
@@ -559,12 +558,11 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
559 } 558 }
560 559
561 if (!rt) { 560 if (!rt) {
562 rt = kmalloc(sizeof(*rt), GFP_ATOMIC); 561 rt = kzalloc(sizeof(*rt), GFP_ATOMIC);
563 562
564 retval = -ENOBUFS; 563 retval = -ENOBUFS;
565 if (!rt) 564 if (!rt)
566 goto out_unlock; 565 goto out_unlock;
567 memset(rt, 0, sizeof(*rt));
568 566
569 rt->next = atalk_routes; 567 rt->next = atalk_routes;
570 atalk_routes = rt; 568 atalk_routes = rt;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index a487233dc466..d00cca97eb33 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -508,10 +508,9 @@ Note: we do not have explicit unassign, but look at _push()
508 508
509 if (copy_from_user(&be, arg, sizeof be)) 509 if (copy_from_user(&be, arg, sizeof be))
510 return -EFAULT; 510 return -EFAULT;
511 brvcc = kmalloc(sizeof(struct br2684_vcc), GFP_KERNEL); 511 brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
512 if (!brvcc) 512 if (!brvcc)
513 return -ENOMEM; 513 return -ENOMEM;
514 memset(brvcc, 0, sizeof(struct br2684_vcc));
515 write_lock_irq(&devs_lock); 514 write_lock_irq(&devs_lock);
516 net_dev = br2684_find_dev(&be.ifspec); 515 net_dev = br2684_find_dev(&be.ifspec);
517 if (net_dev == NULL) { 516 if (net_dev == NULL) {
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 121bf6f49148..7af2c411da82 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -500,9 +500,11 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
500 } else { 500 } else {
501 unsigned int len = skb->len; 501 unsigned int len = skb->len;
502 502
503 skb_get(skb);
503 clip_push(vcc, skb); 504 clip_push(vcc, skb);
504 PRIV(skb->dev)->stats.rx_packets--; 505 PRIV(skb->dev)->stats.rx_packets--;
505 PRIV(skb->dev)->stats.rx_bytes -= len; 506 PRIV(skb->dev)->stats.rx_bytes -= len;
507 kfree_skb(skb);
506 } 508 }
507 return 0; 509 return 0;
508} 510}
@@ -929,12 +931,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
929 struct seq_file *seq; 931 struct seq_file *seq;
930 int rc = -EAGAIN; 932 int rc = -EAGAIN;
931 933
932 state = kmalloc(sizeof(*state), GFP_KERNEL); 934 state = kzalloc(sizeof(*state), GFP_KERNEL);
933 if (!state) { 935 if (!state) {
934 rc = -ENOMEM; 936 rc = -ENOMEM;
935 goto out_kfree; 937 goto out_kfree;
936 } 938 }
937 memset(state, 0, sizeof(*state));
938 state->ns.neigh_sub_iter = clip_seq_sub_iter; 939 state->ns.neigh_sub_iter = clip_seq_sub_iter;
939 940
940 rc = seq_open(file, &arp_seq_ops); 941 rc = seq_open(file, &arp_seq_ops);
@@ -962,7 +963,6 @@ static struct file_operations arp_seq_fops = {
962 963
963static int __init atm_clip_init(void) 964static int __init atm_clip_init(void)
964{ 965{
965 struct proc_dir_entry *p;
966 neigh_table_init_no_netlink(&clip_tbl); 966 neigh_table_init_no_netlink(&clip_tbl);
967 967
968 clip_tbl_hook = &clip_tbl; 968 clip_tbl_hook = &clip_tbl;
@@ -972,9 +972,15 @@ static int __init atm_clip_init(void)
972 972
973 setup_timer(&idle_timer, idle_timer_check, 0); 973 setup_timer(&idle_timer, idle_timer_check, 0);
974 974
975 p = create_proc_entry("arp", S_IRUGO, atm_proc_root); 975#ifdef CONFIG_PROC_FS
976 if (p) 976 {
977 p->proc_fops = &arp_seq_fops; 977 struct proc_dir_entry *p;
978
979 p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
980 if (p)
981 p->proc_fops = &arp_seq_fops;
982 }
983#endif
978 984
979 return 0; 985 return 0;
980} 986}
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 4b1faca5013f..1d3de42fada0 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -25,22 +25,27 @@
25/* 25/*
26 * skb_migrate appends the list at "from" to "to", emptying "from" in the 26 * skb_migrate appends the list at "from" to "to", emptying "from" in the
27 * process. skb_migrate is atomic with respect to all other skb operations on 27 * process. skb_migrate is atomic with respect to all other skb operations on
28 * "from" and "to". Note that it locks both lists at the same time, so beware 28 * "from" and "to". Note that it locks both lists at the same time, so to deal
29 * of potential deadlocks. 29 * with the lock ordering, the locks are taken in address order.
30 * 30 *
31 * This function should live in skbuff.c or skbuff.h. 31 * This function should live in skbuff.c or skbuff.h.
32 */ 32 */
33 33
34 34
35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) 35void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 struct sk_buff *skb_from = (struct sk_buff *) from; 38 struct sk_buff *skb_from = (struct sk_buff *) from;
39 struct sk_buff *skb_to = (struct sk_buff *) to; 39 struct sk_buff *skb_to = (struct sk_buff *) to;
40 struct sk_buff *prev; 40 struct sk_buff *prev;
41 41
42 spin_lock_irqsave(&from->lock,flags); 42 if ((unsigned long) from < (unsigned long) to) {
43 spin_lock(&to->lock); 43 spin_lock_irqsave(&from->lock, flags);
44 spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
45 } else {
46 spin_lock_irqsave(&to->lock, flags);
47 spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
48 }
44 prev = from->prev; 49 prev = from->prev;
45 from->next->prev = to->prev; 50 from->next->prev = to->prev;
46 prev->next = skb_to; 51 prev->next = skb_to;
@@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
51 from->prev = skb_from; 56 from->prev = skb_from;
52 from->next = skb_from; 57 from->next = skb_from;
53 from->qlen = 0; 58 from->qlen = 0;
54 spin_unlock_irqrestore(&from->lock,flags); 59 spin_unlock_irqrestore(&from->lock, flags);
55} 60}
56 61
57 62
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 4b68a18171cf..b4aa489849df 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1811,12 +1811,11 @@ make_entry(struct lec_priv *priv, unsigned char *mac_addr)
1811{ 1811{
1812 struct lec_arp_table *to_return; 1812 struct lec_arp_table *to_return;
1813 1813
1814 to_return = kmalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1814 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
1815 if (!to_return) { 1815 if (!to_return) {
1816 printk("LEC: Arp entry kmalloc failed\n"); 1816 printk("LEC: Arp entry kmalloc failed\n");
1817 return NULL; 1817 return NULL;
1818 } 1818 }
1819 memset(to_return, 0, sizeof(struct lec_arp_table));
1820 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1819 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
1821 init_timer(&to_return->timer); 1820 init_timer(&to_return->timer);
1822 to_return->timer.function = lec_arp_expire_arp; 1821 to_return->timer.function = lec_arp_expire_arp;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 9aafe1e2f048..00704661e83f 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -258,10 +258,9 @@ static struct mpoa_client *alloc_mpc(void)
258{ 258{
259 struct mpoa_client *mpc; 259 struct mpoa_client *mpc;
260 260
261 mpc = kmalloc(sizeof (struct mpoa_client), GFP_KERNEL); 261 mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL);
262 if (mpc == NULL) 262 if (mpc == NULL)
263 return NULL; 263 return NULL;
264 memset(mpc, 0, sizeof(struct mpoa_client));
265 rwlock_init(&mpc->ingress_lock); 264 rwlock_init(&mpc->ingress_lock);
266 rwlock_init(&mpc->egress_lock); 265 rwlock_init(&mpc->egress_lock);
267 mpc->next = mpcs; 266 mpc->next = mpcs;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 76a7d8ff6c0e..19d5dfc0702f 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -287,10 +287,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
287 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT && 287 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
288 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC) 288 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
289 return -EINVAL; 289 return -EINVAL;
290 pvcc = kmalloc(sizeof(*pvcc), GFP_KERNEL); 290 pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
291 if (pvcc == NULL) 291 if (pvcc == NULL)
292 return -ENOMEM; 292 return -ENOMEM;
293 memset(pvcc, 0, sizeof(*pvcc));
294 pvcc->atmvcc = atmvcc; 293 pvcc->atmvcc = atmvcc;
295 pvcc->old_push = atmvcc->push; 294 pvcc->old_push = atmvcc->push;
296 pvcc->old_pop = atmvcc->pop; 295 pvcc->old_pop = atmvcc->pop;
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 3f95b0886a6a..91fe5f53ff11 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -507,7 +507,7 @@ err_out:
507 goto out; 507 goto out;
508} 508}
509 509
510void __exit atm_proc_exit(void) 510void atm_proc_exit(void)
511{ 511{
512 atm_proc_dirs_remove(); 512 atm_proc_dirs_remove();
513} 513}
diff --git a/net/atm/resources.c b/net/atm/resources.c
index de25c6408b04..529f7e64aa2c 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -33,10 +33,9 @@ static struct atm_dev *__alloc_atm_dev(const char *type)
33{ 33{
34 struct atm_dev *dev; 34 struct atm_dev *dev;
35 35
36 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 36 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
37 if (!dev) 37 if (!dev)
38 return NULL; 38 return NULL;
39 memset(dev, 0, sizeof(*dev));
40 dev->type = type; 39 dev->type = type;
41 dev->signal = ATM_PHY_SIG_UNKNOWN; 40 dev->signal = ATM_PHY_SIG_UNKNOWN;
42 dev->link_rate = ATM_OC3_PCR; 41 dev->link_rate = ATM_OC3_PCR;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 10a3c0aa8398..000695c48583 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -145,7 +145,7 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
145 ax25_cb *s; 145 ax25_cb *s;
146 struct hlist_node *node; 146 struct hlist_node *node;
147 147
148 spin_lock_bh(&ax25_list_lock); 148 spin_lock(&ax25_list_lock);
149 ax25_for_each(s, node, &ax25_list) { 149 ax25_for_each(s, node, &ax25_list) {
150 if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) 150 if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
151 continue; 151 continue;
@@ -154,12 +154,12 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
154 /* If device is null we match any device */ 154 /* If device is null we match any device */
155 if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) { 155 if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
156 sock_hold(s->sk); 156 sock_hold(s->sk);
157 spin_unlock_bh(&ax25_list_lock); 157 spin_unlock(&ax25_list_lock);
158 return s->sk; 158 return s->sk;
159 } 159 }
160 } 160 }
161 } 161 }
162 spin_unlock_bh(&ax25_list_lock); 162 spin_unlock(&ax25_list_lock);
163 163
164 return NULL; 164 return NULL;
165} 165}
@@ -174,7 +174,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
174 ax25_cb *s; 174 ax25_cb *s;
175 struct hlist_node *node; 175 struct hlist_node *node;
176 176
177 spin_lock_bh(&ax25_list_lock); 177 spin_lock(&ax25_list_lock);
178 ax25_for_each(s, node, &ax25_list) { 178 ax25_for_each(s, node, &ax25_list) {
179 if (s->sk && !ax25cmp(&s->source_addr, my_addr) && 179 if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
180 !ax25cmp(&s->dest_addr, dest_addr) && 180 !ax25cmp(&s->dest_addr, dest_addr) &&
@@ -185,7 +185,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
185 } 185 }
186 } 186 }
187 187
188 spin_unlock_bh(&ax25_list_lock); 188 spin_unlock(&ax25_list_lock);
189 189
190 return sk; 190 return sk;
191} 191}
@@ -235,7 +235,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
235 struct sk_buff *copy; 235 struct sk_buff *copy;
236 struct hlist_node *node; 236 struct hlist_node *node;
237 237
238 spin_lock_bh(&ax25_list_lock); 238 spin_lock(&ax25_list_lock);
239 ax25_for_each(s, node, &ax25_list) { 239 ax25_for_each(s, node, &ax25_list) {
240 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && 240 if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
241 s->sk->sk_type == SOCK_RAW && 241 s->sk->sk_type == SOCK_RAW &&
@@ -248,7 +248,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
248 kfree_skb(copy); 248 kfree_skb(copy);
249 } 249 }
250 } 250 }
251 spin_unlock_bh(&ax25_list_lock); 251 spin_unlock(&ax25_list_lock);
252} 252}
253 253
254/* 254/*
@@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void)
486{ 486{
487 ax25_cb *ax25; 487 ax25_cb *ax25;
488 488
489 if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) 489 if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
490 return NULL; 490 return NULL;
491 491
492 memset(ax25, 0x00, sizeof(*ax25));
493 atomic_set(&ax25->refcount, 1); 492 atomic_set(&ax25->refcount, 1);
494 493
495 skb_queue_head_init(&ax25->write_queue); 494 skb_queue_head_init(&ax25->write_queue);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 47e6e790bd67..b787678220ff 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev)
55{ 55{
56 ax25_dev *ax25_dev; 56 ax25_dev *ax25_dev;
57 57
58 if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { 58 if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
59 printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); 59 printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
60 return; 60 return;
61 } 61 }
62 62
63 ax25_unregister_sysctl(); 63 ax25_unregister_sysctl();
64 64
65 memset(ax25_dev, 0x00, sizeof(*ax25_dev));
66
67 dev->ax25_ptr = ax25_dev; 65 dev->ax25_ptr = ax25_dev;
68 ax25_dev->dev = dev; 66 ax25_dev->dev = dev;
69 dev_hold(dev); 67 dev_hold(dev);
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index 1d4ab641f82b..4d22d4430ec8 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -80,7 +80,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
80 ax25_start_t3timer(ax25); 80 ax25_start_t3timer(ax25);
81 ax25_ds_set_timer(ax25->ax25_dev); 81 ax25_ds_set_timer(ax25->ax25_dev);
82 82
83 spin_lock_bh(&ax25_list_lock); 83 spin_lock(&ax25_list_lock);
84 ax25_for_each(ax25o, node, &ax25_list) { 84 ax25_for_each(ax25o, node, &ax25_list) {
85 if (ax25o == ax25) 85 if (ax25o == ax25)
86 continue; 86 continue;
@@ -106,7 +106,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
106 if (ax25o->state != AX25_STATE_0) 106 if (ax25o->state != AX25_STATE_0)
107 ax25_start_t3timer(ax25o); 107 ax25_start_t3timer(ax25o);
108 } 108 }
109 spin_unlock_bh(&ax25_list_lock); 109 spin_unlock(&ax25_list_lock);
110} 110}
111 111
112void ax25_ds_establish_data_link(ax25_cb *ax25) 112void ax25_ds_establish_data_link(ax25_cb *ax25)
@@ -162,13 +162,13 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev)
162 int res = 0; 162 int res = 0;
163 struct hlist_node *node; 163 struct hlist_node *node;
164 164
165 spin_lock_bh(&ax25_list_lock); 165 spin_lock(&ax25_list_lock);
166 ax25_for_each(ax25, node, &ax25_list) 166 ax25_for_each(ax25, node, &ax25_list)
167 if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { 167 if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
168 res = 1; 168 res = 1;
169 break; 169 break;
170 } 170 }
171 spin_unlock_bh(&ax25_list_lock); 171 spin_unlock(&ax25_list_lock);
172 172
173 return res; 173 return res;
174} 174}
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 5961459935eb..4f44185955c7 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -85,7 +85,7 @@ static void ax25_ds_timeout(unsigned long arg)
85 return; 85 return;
86 } 86 }
87 87
88 spin_lock_bh(&ax25_list_lock); 88 spin_lock(&ax25_list_lock);
89 ax25_for_each(ax25, node, &ax25_list) { 89 ax25_for_each(ax25, node, &ax25_list) {
90 if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) 90 if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
91 continue; 91 continue;
@@ -93,7 +93,7 @@ static void ax25_ds_timeout(unsigned long arg)
93 ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); 93 ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
94 ax25_disconnect(ax25, ETIMEDOUT); 94 ax25_disconnect(ax25, ETIMEDOUT);
95 } 95 }
96 spin_unlock_bh(&ax25_list_lock); 96 spin_unlock(&ax25_list_lock);
97 97
98 ax25_dev_dama_off(ax25_dev); 98 ax25_dev_dama_off(ax25_dev);
99} 99}
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 77ba07c67682..07ac0207eb69 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -66,10 +66,10 @@ int ax25_protocol_register(unsigned int pid,
66 protocol->pid = pid; 66 protocol->pid = pid;
67 protocol->func = func; 67 protocol->func = func;
68 68
69 write_lock(&protocol_list_lock); 69 write_lock_bh(&protocol_list_lock);
70 protocol->next = protocol_list; 70 protocol->next = protocol_list;
71 protocol_list = protocol; 71 protocol_list = protocol;
72 write_unlock(&protocol_list_lock); 72 write_unlock_bh(&protocol_list_lock);
73 73
74 return 1; 74 return 1;
75} 75}
@@ -80,16 +80,16 @@ void ax25_protocol_release(unsigned int pid)
80{ 80{
81 struct protocol_struct *s, *protocol; 81 struct protocol_struct *s, *protocol;
82 82
83 write_lock(&protocol_list_lock); 83 write_lock_bh(&protocol_list_lock);
84 protocol = protocol_list; 84 protocol = protocol_list;
85 if (protocol == NULL) { 85 if (protocol == NULL) {
86 write_unlock(&protocol_list_lock); 86 write_unlock_bh(&protocol_list_lock);
87 return; 87 return;
88 } 88 }
89 89
90 if (protocol->pid == pid) { 90 if (protocol->pid == pid) {
91 protocol_list = protocol->next; 91 protocol_list = protocol->next;
92 write_unlock(&protocol_list_lock); 92 write_unlock_bh(&protocol_list_lock);
93 kfree(protocol); 93 kfree(protocol);
94 return; 94 return;
95 } 95 }
@@ -98,14 +98,14 @@ void ax25_protocol_release(unsigned int pid)
98 if (protocol->next->pid == pid) { 98 if (protocol->next->pid == pid) {
99 s = protocol->next; 99 s = protocol->next;
100 protocol->next = protocol->next->next; 100 protocol->next = protocol->next->next;
101 write_unlock(&protocol_list_lock); 101 write_unlock_bh(&protocol_list_lock);
102 kfree(s); 102 kfree(s);
103 return; 103 return;
104 } 104 }
105 105
106 protocol = protocol->next; 106 protocol = protocol->next;
107 } 107 }
108 write_unlock(&protocol_list_lock); 108 write_unlock_bh(&protocol_list_lock);
109} 109}
110 110
111EXPORT_SYMBOL(ax25_protocol_release); 111EXPORT_SYMBOL(ax25_protocol_release);
@@ -266,13 +266,13 @@ int ax25_protocol_is_registered(unsigned int pid)
266 struct protocol_struct *protocol; 266 struct protocol_struct *protocol;
267 int res = 0; 267 int res = 0;
268 268
269 read_lock(&protocol_list_lock); 269 read_lock_bh(&protocol_list_lock);
270 for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) 270 for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
271 if (protocol->pid == pid) { 271 if (protocol->pid == pid) {
272 res = 1; 272 res = 1;
273 break; 273 break;
274 } 274 }
275 read_unlock(&protocol_list_lock); 275 read_unlock_bh(&protocol_list_lock);
276 276
277 return res; 277 return res;
278} 278}
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 4cf87540fb3a..e9d94291581e 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -102,8 +102,8 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
102int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) 102int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
103{ 103{
104 int (*func)(struct sk_buff *, ax25_cb *); 104 int (*func)(struct sk_buff *, ax25_cb *);
105 volatile int queued = 0;
106 unsigned char pid; 105 unsigned char pid;
106 int queued = 0;
107 107
108 if (skb == NULL) return 0; 108 if (skb == NULL) return 0;
109 109
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 369a75b160f2..867d42537979 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -203,13 +203,11 @@ void ax25_register_sysctl(void)
203 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) 203 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
204 ax25_table_size += sizeof(ctl_table); 204 ax25_table_size += sizeof(ctl_table);
205 205
206 if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 206 if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
207 spin_unlock_bh(&ax25_dev_lock); 207 spin_unlock_bh(&ax25_dev_lock);
208 return; 208 return;
209 } 209 }
210 210
211 memset(ax25_table, 0x00, ax25_table_size);
212
213 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { 211 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
214 ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC); 212 ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC);
215 if (!child) { 213 if (!child) {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 6fb47e00e188..be04e9fb11f6 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -75,15 +75,13 @@
75 75
76static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl) 76static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl)
77{ 77{
78 struct cmtp_application *app = kmalloc(sizeof(*app), GFP_KERNEL); 78 struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL);
79 79
80 BT_DBG("session %p application %p appl %d", session, app, appl); 80 BT_DBG("session %p application %p appl %d", session, app, appl);
81 81
82 if (!app) 82 if (!app)
83 return NULL; 83 return NULL;
84 84
85 memset(app, 0, sizeof(*app));
86
87 app->state = BT_OPEN; 85 app->state = BT_OPEN;
88 app->appl = appl; 86 app->appl = appl;
89 87
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 182254a580e2..b81a01c64aea 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -335,10 +335,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
335 baswap(&src, &bt_sk(sock->sk)->src); 335 baswap(&src, &bt_sk(sock->sk)->src);
336 baswap(&dst, &bt_sk(sock->sk)->dst); 336 baswap(&dst, &bt_sk(sock->sk)->dst);
337 337
338 session = kmalloc(sizeof(struct cmtp_session), GFP_KERNEL); 338 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
339 if (!session) 339 if (!session)
340 return -ENOMEM; 340 return -ENOMEM;
341 memset(session, 0, sizeof(struct cmtp_session));
342 341
343 down_write(&cmtp_session_sem); 342 down_write(&cmtp_session_sem);
344 343
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 54e8e5ea2154..5ed474277903 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -336,9 +336,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
336 336
337 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { 337 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
338 /* Entry not in the cache. Add new one. */ 338 /* Entry not in the cache. Add new one. */
339 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) 339 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
340 return; 340 return;
341 memset(e, 0, sizeof(struct inquiry_entry));
342 e->next = cache->list; 341 e->next = cache->list;
343 cache->list = e; 342 cache->list = e;
344 } 343 }
@@ -800,12 +799,10 @@ struct hci_dev *hci_alloc_dev(void)
800{ 799{
801 struct hci_dev *hdev; 800 struct hci_dev *hdev;
802 801
803 hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL); 802 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
804 if (!hdev) 803 if (!hdev)
805 return NULL; 804 return NULL;
806 805
807 memset(hdev, 0, sizeof(struct hci_dev));
808
809 skb_queue_head_init(&hdev->driver_init); 806 skb_queue_head_init(&hdev->driver_init);
810 807
811 return hdev; 808 return hdev;
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index edfea772fb67..c6abf2a5a932 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,7 +1,6 @@
1config BT_HIDP 1config BT_HIDP
2 tristate "HIDP protocol support" 2 tristate "HIDP protocol support"
3 depends on BT && BT_L2CAP && (BROKEN || !S390) 3 depends on BT && BT_L2CAP && INPUT
4 select INPUT
5 help 4 help
6 HIDP (Human Interface Device Protocol) is a transport layer 5 HIDP (Human Interface Device Protocol) is a transport layer
7 for HID reports. HIDP is required for the Bluetooth Human 6 for HID reports. HIDP is required for the Bluetooth Human
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b9c24a55425c..c6e3a2c27c6e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -582,10 +582,9 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
582 bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) 582 bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
583 return -ENOTUNIQ; 583 return -ENOTUNIQ;
584 584
585 session = kmalloc(sizeof(struct hidp_session), GFP_KERNEL); 585 session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
586 if (!session) 586 if (!session)
587 return -ENOMEM; 587 return -ENOMEM;
588 memset(session, 0, sizeof(struct hidp_session));
589 588
590 session->input = input_allocate_device(); 589 session->input = input_allocate_device();
591 if (!session->input) { 590 if (!session->input) {
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index eaaad658d11d..d56f60b392ac 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -185,7 +185,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
185{ 185{
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; 186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
187 187
188 write_lock(&l->lock); 188 write_lock_bh(&l->lock);
189 if (sk == l->head) 189 if (sk == l->head)
190 l->head = next; 190 l->head = next;
191 191
@@ -193,7 +193,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 l2cap_pi(next)->prev_c = prev; 193 l2cap_pi(next)->prev_c = prev;
194 if (prev) 194 if (prev)
195 l2cap_pi(prev)->next_c = next; 195 l2cap_pi(prev)->next_c = next;
196 write_unlock(&l->lock); 196 write_unlock_bh(&l->lock);
197 197
198 __sock_put(sk); 198 __sock_put(sk);
199} 199}
@@ -313,9 +313,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
313static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) 313static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
314{ 314{
315 struct l2cap_chan_list *l = &conn->chan_list; 315 struct l2cap_chan_list *l = &conn->chan_list;
316 write_lock(&l->lock); 316 write_lock_bh(&l->lock);
317 __l2cap_chan_add(conn, sk, parent); 317 __l2cap_chan_add(conn, sk, parent);
318 write_unlock(&l->lock); 318 write_unlock_bh(&l->lock);
319} 319}
320 320
321static inline u8 l2cap_get_ident(struct l2cap_conn *conn) 321static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
@@ -328,14 +328,14 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
328 * 200 - 254 are used by utilities like l2ping, etc. 328 * 200 - 254 are used by utilities like l2ping, etc.
329 */ 329 */
330 330
331 spin_lock(&conn->lock); 331 spin_lock_bh(&conn->lock);
332 332
333 if (++conn->tx_ident > 128) 333 if (++conn->tx_ident > 128)
334 conn->tx_ident = 1; 334 conn->tx_ident = 1;
335 335
336 id = conn->tx_ident; 336 id = conn->tx_ident;
337 337
338 spin_unlock(&conn->lock); 338 spin_unlock_bh(&conn->lock);
339 339
340 return id; 340 return id;
341} 341}
@@ -1416,11 +1416,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
1416 if (!sk) 1416 if (!sk)
1417 goto response; 1417 goto response;
1418 1418
1419 write_lock(&list->lock); 1419 write_lock_bh(&list->lock);
1420 1420
1421 /* Check if we already have channel with that dcid */ 1421 /* Check if we already have channel with that dcid */
1422 if (__l2cap_get_chan_by_dcid(list, scid)) { 1422 if (__l2cap_get_chan_by_dcid(list, scid)) {
1423 write_unlock(&list->lock); 1423 write_unlock_bh(&list->lock);
1424 sock_set_flag(sk, SOCK_ZAPPED); 1424 sock_set_flag(sk, SOCK_ZAPPED);
1425 l2cap_sock_kill(sk); 1425 l2cap_sock_kill(sk);
1426 goto response; 1426 goto response;
@@ -1458,7 +1458,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
1458 result = status = 0; 1458 result = status = 0;
1459 1459
1460done: 1460done:
1461 write_unlock(&list->lock); 1461 write_unlock_bh(&list->lock);
1462 1462
1463response: 1463response:
1464 bh_unlock_sock(parent); 1464 bh_unlock_sock(parent);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 155a2b93760e..332dd8f436ea 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -55,6 +55,7 @@
55#define VERSION "1.8" 55#define VERSION "1.8"
56 56
57static int disable_cfc = 0; 57static int disable_cfc = 0;
58static int channel_mtu = -1;
58static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; 59static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
59 60
60static struct task_struct *rfcomm_thread; 61static struct task_struct *rfcomm_thread;
@@ -273,10 +274,10 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d)
273 274
274struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) 275struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
275{ 276{
276 struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); 277 struct rfcomm_dlc *d = kzalloc(sizeof(*d), prio);
278
277 if (!d) 279 if (!d)
278 return NULL; 280 return NULL;
279 memset(d, 0, sizeof(*d));
280 281
281 init_timer(&d->timer); 282 init_timer(&d->timer);
282 d->timer.function = rfcomm_dlc_timeout; 283 d->timer.function = rfcomm_dlc_timeout;
@@ -289,6 +290,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
289 rfcomm_dlc_clear_state(d); 290 rfcomm_dlc_clear_state(d);
290 291
291 BT_DBG("%p", d); 292 BT_DBG("%p", d);
293
292 return d; 294 return d;
293} 295}
294 296
@@ -522,10 +524,10 @@ int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig)
522/* ---- RFCOMM sessions ---- */ 524/* ---- RFCOMM sessions ---- */
523static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) 525static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
524{ 526{
525 struct rfcomm_session *s = kmalloc(sizeof(*s), GFP_KERNEL); 527 struct rfcomm_session *s = kzalloc(sizeof(*s), GFP_KERNEL);
528
526 if (!s) 529 if (!s)
527 return NULL; 530 return NULL;
528 memset(s, 0, sizeof(*s));
529 531
530 BT_DBG("session %p sock %p", s, sock); 532 BT_DBG("session %p sock %p", s, sock);
531 533
@@ -811,7 +813,10 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
811 pn->credits = 0; 813 pn->credits = 0;
812 } 814 }
813 815
814 pn->mtu = htobs(d->mtu); 816 if (cr && channel_mtu >= 0)
817 pn->mtu = htobs(channel_mtu);
818 else
819 pn->mtu = htobs(d->mtu);
815 820
816 *ptr = __fcs(buf); ptr++; 821 *ptr = __fcs(buf); ptr++;
817 822
@@ -1242,7 +1247,10 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn)
1242 1247
1243 d->priority = pn->priority; 1248 d->priority = pn->priority;
1244 1249
1245 d->mtu = s->mtu = btohs(pn->mtu); 1250 d->mtu = btohs(pn->mtu);
1251
1252 if (cr && d->mtu > s->mtu)
1253 d->mtu = s->mtu;
1246 1254
1247 return 0; 1255 return 0;
1248} 1256}
@@ -1769,6 +1777,11 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1769 s = rfcomm_session_add(nsock, BT_OPEN); 1777 s = rfcomm_session_add(nsock, BT_OPEN);
1770 if (s) { 1778 if (s) {
1771 rfcomm_session_hold(s); 1779 rfcomm_session_hold(s);
1780
1781 /* We should adjust MTU on incoming sessions.
1782 * L2CAP MTU minus UIH header and FCS. */
1783 s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5;
1784
1772 rfcomm_schedule(RFCOMM_SCHED_RX); 1785 rfcomm_schedule(RFCOMM_SCHED_RX);
1773 } else 1786 } else
1774 sock_release(nsock); 1787 sock_release(nsock);
@@ -2086,6 +2099,9 @@ module_exit(rfcomm_exit);
2086module_param(disable_cfc, bool, 0644); 2099module_param(disable_cfc, bool, 0644);
2087MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); 2100MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control");
2088 2101
2102module_param(channel_mtu, int, 0644);
2103MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel");
2104
2089module_param(l2cap_mtu, uint, 0644); 2105module_param(l2cap_mtu, uint, 0644);
2090MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); 2106MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
2091 2107
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2ff2d5b87c93..bd8d671a0ba6 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -169,10 +169,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
169 169
170 BT_DBG("id %d channel %d", req->dev_id, req->channel); 170 BT_DBG("id %d channel %d", req->dev_id, req->channel);
171 171
172 dev = kmalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); 172 dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);
173 if (!dev) 173 if (!dev)
174 return -ENOMEM; 174 return -ENOMEM;
175 memset(dev, 0, sizeof(struct rfcomm_dev));
176 175
177 write_lock_bh(&rfcomm_dev_lock); 176 write_lock_bh(&rfcomm_dev_lock);
178 177
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 85defccc0287..7714a2ec3854 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -108,17 +108,14 @@ static void sco_sock_init_timer(struct sock *sk)
108static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) 108static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
109{ 109{
110 struct hci_dev *hdev = hcon->hdev; 110 struct hci_dev *hdev = hcon->hdev;
111 struct sco_conn *conn; 111 struct sco_conn *conn = hcon->sco_data;
112
113 if ((conn = hcon->sco_data))
114 return conn;
115 112
116 if (status) 113 if (conn || status)
117 return conn; 114 return conn;
118 115
119 if (!(conn = kmalloc(sizeof(struct sco_conn), GFP_ATOMIC))) 116 conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
117 if (!conn)
120 return NULL; 118 return NULL;
121 memset(conn, 0, sizeof(struct sco_conn));
122 119
123 spin_lock_init(&conn->lock); 120 spin_lock_init(&conn->lock);
124 121
@@ -134,6 +131,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
134 conn->mtu = 60; 131 conn->mtu = 60;
135 132
136 BT_DBG("hcon %p conn %p", hcon, conn); 133 BT_DBG("hcon %p conn %p", hcon, conn);
134
137 return conn; 135 return conn;
138} 136}
139 137
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 8be9f2123e54..864fbbc7b24d 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -35,16 +35,20 @@ static inline unsigned packet_length(const struct sk_buff *skb)
35int br_dev_queue_push_xmit(struct sk_buff *skb) 35int br_dev_queue_push_xmit(struct sk_buff *skb)
36{ 36{
37 /* drop mtu oversized packets except gso */ 37 /* drop mtu oversized packets except gso */
38 if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size) 38 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
39 kfree_skb(skb); 39 kfree_skb(skb);
40 else { 40 else {
41#ifdef CONFIG_BRIDGE_NETFILTER 41#ifdef CONFIG_BRIDGE_NETFILTER
42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ 42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
43 nf_bridge_maybe_copy_header(skb); 43 if (nf_bridge_maybe_copy_header(skb))
44 kfree_skb(skb);
45 else
44#endif 46#endif
45 skb_push(skb, ETH_HLEN); 47 {
48 skb_push(skb, ETH_HLEN);
46 49
47 dev_queue_xmit(skb); 50 dev_queue_xmit(skb);
51 }
48 } 52 }
49 53
50 return 0; 54 return 0;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f55ef682ef84..b1211d5342f6 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -386,12 +386,17 @@ void br_features_recompute(struct net_bridge *br)
386 checksum = 0; 386 checksum = 0;
387 387
388 if (feature & NETIF_F_GSO) 388 if (feature & NETIF_F_GSO)
389 feature |= NETIF_F_TSO; 389 feature |= NETIF_F_GSO_SOFTWARE;
390 feature |= NETIF_F_GSO; 390 feature |= NETIF_F_GSO;
391 391
392 features &= feature; 392 features &= feature;
393 } 393 }
394 394
395 if (!(checksum & NETIF_F_ALL_CSUM))
396 features &= ~NETIF_F_SG;
397 if (!(features & NETIF_F_SG))
398 features &= ~NETIF_F_GSO_MASK;
399
395 br->dev->features = features | checksum | NETIF_F_LLTX | 400 br->dev->features = features | checksum | NETIF_F_LLTX |
396 NETIF_F_GSO_ROBUST; 401 NETIF_F_GSO_ROBUST;
397} 402}
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 159fb8409824..4e4119a12139 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -162,12 +162,10 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
162 if (num > BR_MAX_PORTS) 162 if (num > BR_MAX_PORTS)
163 num = BR_MAX_PORTS; 163 num = BR_MAX_PORTS;
164 164
165 indices = kmalloc(num*sizeof(int), GFP_KERNEL); 165 indices = kcalloc(num, sizeof(int), GFP_KERNEL);
166 if (indices == NULL) 166 if (indices == NULL)
167 return -ENOMEM; 167 return -ENOMEM;
168 168
169 memset(indices, 0, num*sizeof(int));
170
171 get_port_ifindices(br, indices, num); 169 get_port_ifindices(br, indices, num);
172 if (copy_to_user((void __user *)args[1], indices, num*sizeof(int))) 170 if (copy_to_user((void __user *)args[1], indices, num*sizeof(int)))
173 num = -EFAULT; 171 num = -EFAULT;
@@ -327,11 +325,10 @@ static int old_deviceless(void __user *uarg)
327 325
328 if (args[2] >= 2048) 326 if (args[2] >= 2048)
329 return -ENOMEM; 327 return -ENOMEM;
330 indices = kmalloc(args[2]*sizeof(int), GFP_KERNEL); 328 indices = kcalloc(args[2], sizeof(int), GFP_KERNEL);
331 if (indices == NULL) 329 if (indices == NULL)
332 return -ENOMEM; 330 return -ENOMEM;
333 331
334 memset(indices, 0, args[2]*sizeof(int));
335 args[2] = get_bridge_ifindices(indices, args[2]); 332 args[2] = get_bridge_ifindices(indices, args[2]);
336 333
337 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) 334 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 8298a5179aef..05b3de888243 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -61,6 +61,9 @@ static int brnf_filter_vlan_tagged = 1;
61#define brnf_filter_vlan_tagged 1 61#define brnf_filter_vlan_tagged 1
62#endif 62#endif
63 63
64int brnf_deferred_hooks;
65EXPORT_SYMBOL_GPL(brnf_deferred_hooks);
66
64static __be16 inline vlan_proto(const struct sk_buff *skb) 67static __be16 inline vlan_proto(const struct sk_buff *skb)
65{ 68{
66 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 69 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -761,7 +764,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
761{ 764{
762 if (skb->protocol == htons(ETH_P_IP) && 765 if (skb->protocol == htons(ETH_P_IP) &&
763 skb->len > skb->dev->mtu && 766 skb->len > skb->dev->mtu &&
764 !skb_shinfo(skb)->gso_size) 767 !skb_is_gso(skb))
765 return ip_fragment(skb, br_dev_queue_push_xmit); 768 return ip_fragment(skb, br_dev_queue_push_xmit);
766 else 769 else
767 return br_dev_queue_push_xmit(skb); 770 return br_dev_queue_push_xmit(skb);
@@ -890,6 +893,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
890 return NF_ACCEPT; 893 return NF_ACCEPT;
891 else if (ip->version == 6 && !brnf_call_ip6tables) 894 else if (ip->version == 6 && !brnf_call_ip6tables)
892 return NF_ACCEPT; 895 return NF_ACCEPT;
896 else if (!brnf_deferred_hooks)
897 return NF_ACCEPT;
893#endif 898#endif
894 if (hook == NF_IP_POST_ROUTING) 899 if (hook == NF_IP_POST_ROUTING)
895 return NF_ACCEPT; 900 return NF_ACCEPT;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 06abb6634f5b..53086fb75089 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -85,7 +85,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
85 goto err_out; 85 goto err_out;
86 86
87 err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0); 87 err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0);
88 if (err) 88 if (err < 0)
89 goto err_kfree; 89 goto err_kfree;
90 90
91 NETLINK_CB(skb).dst_group = RTNLGRP_LINK; 91 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index a7ba0cce0b46..068d8afbf0a7 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -121,7 +121,7 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
121 buf[1] = 0; 121 buf[1] = 0;
122 buf[2] = 0; 122 buf[2] = 0;
123 buf[3] = BPDU_TYPE_TCN; 123 buf[3] = BPDU_TYPE_TCN;
124 br_send_bpdu(p, buf, 7); 124 br_send_bpdu(p, buf, 4);
125} 125}
126 126
127/* 127/*
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 02693a230dc1..9f950db3b76f 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -74,6 +74,9 @@ static void ulog_send(unsigned int nlgroup)
74 if (timer_pending(&ub->timer)) 74 if (timer_pending(&ub->timer))
75 del_timer(&ub->timer); 75 del_timer(&ub->timer);
76 76
77 if (!ub->skb)
78 return;
79
77 /* last nlmsg needs NLMSG_DONE */ 80 /* last nlmsg needs NLMSG_DONE */
78 if (ub->qlen > 1) 81 if (ub->qlen > 1)
79 ub->lastnlh->nlmsg_type = NLMSG_DONE; 82 ub->lastnlh->nlmsg_type = NLMSG_DONE;
diff --git a/net/core/Makefile b/net/core/Makefile
index e9bd2467d5a9..2645ba428d48 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_mcast.o dst.o \ 10obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o
12 12
13obj-$(CONFIG_XFRM) += flow.o 13obj-$(CONFIG_XFRM) += flow.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 066a60a75280..d4a1ec3bded5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -116,6 +116,7 @@
116#include <linux/audit.h> 116#include <linux/audit.h>
117#include <linux/dmaengine.h> 117#include <linux/dmaengine.h>
118#include <linux/err.h> 118#include <linux/err.h>
119#include <linux/ctype.h>
119 120
120/* 121/*
121 * The list of packet types we will receive (as opposed to discard) 122 * The list of packet types we will receive (as opposed to discard)
@@ -632,14 +633,22 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas
632 * @name: name string 633 * @name: name string
633 * 634 *
634 * Network device names need to be valid file names to 635 * Network device names need to be valid file names to
635 * to allow sysfs to work 636 * to allow sysfs to work. We also disallow any kind of
637 * whitespace.
636 */ 638 */
637int dev_valid_name(const char *name) 639int dev_valid_name(const char *name)
638{ 640{
639 return !(*name == '\0' 641 if (*name == '\0')
640 || !strcmp(name, ".") 642 return 0;
641 || !strcmp(name, "..") 643 if (!strcmp(name, ".") || !strcmp(name, ".."))
642 || strchr(name, '/')); 644 return 0;
645
646 while (*name) {
647 if (*name == '/' || isspace(*name))
648 return 0;
649 name++;
650 }
651 return 1;
643} 652}
644 653
645/** 654/**
@@ -1162,9 +1171,12 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
1162 unsigned int csum; 1171 unsigned int csum;
1163 int ret = 0, offset = skb->h.raw - skb->data; 1172 int ret = 0, offset = skb->h.raw - skb->data;
1164 1173
1165 if (inward) { 1174 if (inward)
1166 skb->ip_summed = CHECKSUM_NONE; 1175 goto out_set_summed;
1167 goto out; 1176
1177 if (unlikely(skb_shinfo(skb)->gso_size)) {
1178 /* Let GSO fix up the checksum. */
1179 goto out_set_summed;
1168 } 1180 }
1169 1181
1170 if (skb_cloned(skb)) { 1182 if (skb_cloned(skb)) {
@@ -1181,6 +1193,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
1181 BUG_ON(skb->csum + 2 > offset); 1193 BUG_ON(skb->csum + 2 > offset);
1182 1194
1183 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); 1195 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1196
1197out_set_summed:
1184 skb->ip_summed = CHECKSUM_NONE; 1198 skb->ip_summed = CHECKSUM_NONE;
1185out: 1199out:
1186 return ret; 1200 return ret;
@@ -1201,17 +1215,30 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1201 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1215 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1202 struct packet_type *ptype; 1216 struct packet_type *ptype;
1203 int type = skb->protocol; 1217 int type = skb->protocol;
1218 int err;
1204 1219
1205 BUG_ON(skb_shinfo(skb)->frag_list); 1220 BUG_ON(skb_shinfo(skb)->frag_list);
1206 BUG_ON(skb->ip_summed != CHECKSUM_HW);
1207 1221
1208 skb->mac.raw = skb->data; 1222 skb->mac.raw = skb->data;
1209 skb->mac_len = skb->nh.raw - skb->data; 1223 skb->mac_len = skb->nh.raw - skb->data;
1210 __skb_pull(skb, skb->mac_len); 1224 __skb_pull(skb, skb->mac_len);
1211 1225
1226 if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
1227 if (skb_header_cloned(skb) &&
1228 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1229 return ERR_PTR(err);
1230 }
1231
1212 rcu_read_lock(); 1232 rcu_read_lock();
1213 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 1233 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1214 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1234 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1235 if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
1236 err = ptype->gso_send_check(skb);
1237 segs = ERR_PTR(err);
1238 if (err || skb_gso_ok(skb, features))
1239 break;
1240 __skb_push(skb, skb->data - skb->nh.raw);
1241 }
1215 segs = ptype->gso_segment(skb, features); 1242 segs = ptype->gso_segment(skb, features);
1216 break; 1243 break;
1217 } 1244 }
@@ -1601,26 +1628,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb)
1601 struct net_device *dev = skb->dev; 1628 struct net_device *dev = skb->dev;
1602 1629
1603 if (dev->master) { 1630 if (dev->master) {
1604 /* 1631 if (skb_bond_should_drop(skb)) {
1605 * On bonding slaves other than the currently active
1606 * slave, suppress duplicates except for 802.3ad
1607 * ETH_P_SLOW and alb non-mcast/bcast.
1608 */
1609 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1610 if (dev->master->priv_flags & IFF_MASTER_ALB) {
1611 if (skb->pkt_type != PACKET_BROADCAST &&
1612 skb->pkt_type != PACKET_MULTICAST)
1613 goto keep;
1614 }
1615
1616 if (dev->master->priv_flags & IFF_MASTER_8023AD &&
1617 skb->protocol == __constant_htons(ETH_P_SLOW))
1618 goto keep;
1619
1620 kfree_skb(skb); 1632 kfree_skb(skb);
1621 return NULL; 1633 return NULL;
1622 } 1634 }
1623keep:
1624 skb->dev = dev->master; 1635 skb->dev = dev->master;
1625 } 1636 }
1626 1637
@@ -1727,7 +1738,7 @@ static int ing_filter(struct sk_buff *skb)
1727 if (dev->qdisc_ingress) { 1738 if (dev->qdisc_ingress) {
1728 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); 1739 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1729 if (MAX_RED_LOOP < ttl++) { 1740 if (MAX_RED_LOOP < ttl++) {
1730 printk("Redir loop detected Dropping packet (%s->%s)\n", 1741 printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n",
1731 skb->input_dev->name, skb->dev->name); 1742 skb->input_dev->name, skb->dev->name);
1732 return TC_ACT_SHOT; 1743 return TC_ACT_SHOT;
1733 } 1744 }
@@ -2922,7 +2933,7 @@ int register_netdevice(struct net_device *dev)
2922 /* Fix illegal SG+CSUM combinations. */ 2933 /* Fix illegal SG+CSUM combinations. */
2923 if ((dev->features & NETIF_F_SG) && 2934 if ((dev->features & NETIF_F_SG) &&
2924 !(dev->features & NETIF_F_ALL_CSUM)) { 2935 !(dev->features & NETIF_F_ALL_CSUM)) {
2925 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", 2936 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
2926 dev->name); 2937 dev->name);
2927 dev->features &= ~NETIF_F_SG; 2938 dev->features &= ~NETIF_F_SG;
2928 } 2939 }
@@ -2930,7 +2941,7 @@ int register_netdevice(struct net_device *dev)
2930 /* TSO requires that SG is present as well. */ 2941 /* TSO requires that SG is present as well. */
2931 if ((dev->features & NETIF_F_TSO) && 2942 if ((dev->features & NETIF_F_TSO) &&
2932 !(dev->features & NETIF_F_SG)) { 2943 !(dev->features & NETIF_F_SG)) {
2933 printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", 2944 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
2934 dev->name); 2945 dev->name);
2935 dev->features &= ~NETIF_F_TSO; 2946 dev->features &= ~NETIF_F_TSO;
2936 } 2947 }
@@ -3401,12 +3412,9 @@ static void net_dma_rebalance(void)
3401 unsigned int cpu, i, n; 3412 unsigned int cpu, i, n;
3402 struct dma_chan *chan; 3413 struct dma_chan *chan;
3403 3414
3404 lock_cpu_hotplug();
3405
3406 if (net_dma_count == 0) { 3415 if (net_dma_count == 0) {
3407 for_each_online_cpu(cpu) 3416 for_each_online_cpu(cpu)
3408 rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); 3417 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
3409 unlock_cpu_hotplug();
3410 return; 3418 return;
3411 } 3419 }
3412 3420
@@ -3419,15 +3427,13 @@ static void net_dma_rebalance(void)
3419 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3427 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
3420 3428
3421 while(n) { 3429 while(n) {
3422 per_cpu(softnet_data.net_dma, cpu) = chan; 3430 per_cpu(softnet_data, cpu).net_dma = chan;
3423 cpu = next_cpu(cpu, cpu_online_map); 3431 cpu = next_cpu(cpu, cpu_online_map);
3424 n--; 3432 n--;
3425 } 3433 }
3426 i++; 3434 i++;
3427 } 3435 }
3428 rcu_read_unlock(); 3436 rcu_read_unlock();
3429
3430 unlock_cpu_hotplug();
3431} 3437}
3432 3438
3433/** 3439/**
diff --git a/net/core/dst.c b/net/core/dst.c
index 470c05bc4cb2..1a5e49da0e77 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy)
95 dst_gc_timer_inc = DST_GC_INC; 95 dst_gc_timer_inc = DST_GC_INC;
96 dst_gc_timer_expires = DST_GC_MIN; 96 dst_gc_timer_expires = DST_GC_MIN;
97 } 97 }
98 dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
99#if RT_CACHE_DEBUG >= 2 98#if RT_CACHE_DEBUG >= 2
100 printk("dst_total: %d/%d %ld\n", 99 printk("dst_total: %d/%d %ld\n",
101 atomic_read(&dst_total), delayed, dst_gc_timer_expires); 100 atomic_read(&dst_total), delayed, dst_gc_timer_expires);
102#endif 101#endif
103 add_timer(&dst_gc_timer); 102 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
104 103
105out: 104out:
106 spin_unlock(&dst_lock); 105 spin_unlock(&dst_lock);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 27ce1683caf5..2797e2815418 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -437,7 +437,7 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
437{ 437{
438 struct ethtool_pauseparam pauseparam; 438 struct ethtool_pauseparam pauseparam;
439 439
440 if (!dev->ethtool_ops->get_pauseparam) 440 if (!dev->ethtool_ops->set_pauseparam)
441 return -EOPNOTSUPP; 441 return -EOPNOTSUPP;
442 442
443 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) 443 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7ad681f5e712..fe2113f54e2b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -29,6 +29,7 @@
29#include <net/neighbour.h> 29#include <net/neighbour.h>
30#include <net/dst.h> 30#include <net/dst.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <net/netevent.h>
32#include <linux/rtnetlink.h> 33#include <linux/rtnetlink.h>
33#include <linux/random.h> 34#include <linux/random.h>
34#include <linux/string.h> 35#include <linux/string.h>
@@ -754,6 +755,7 @@ static void neigh_timer_handler(unsigned long arg)
754 neigh->nud_state = NUD_STALE; 755 neigh->nud_state = NUD_STALE;
755 neigh->updated = jiffies; 756 neigh->updated = jiffies;
756 neigh_suspect(neigh); 757 neigh_suspect(neigh);
758 notify = 1;
757 } 759 }
758 } else if (state & NUD_DELAY) { 760 } else if (state & NUD_DELAY) {
759 if (time_before_eq(now, 761 if (time_before_eq(now,
@@ -762,6 +764,7 @@ static void neigh_timer_handler(unsigned long arg)
762 neigh->nud_state = NUD_REACHABLE; 764 neigh->nud_state = NUD_REACHABLE;
763 neigh->updated = jiffies; 765 neigh->updated = jiffies;
764 neigh_connect(neigh); 766 neigh_connect(neigh);
767 notify = 1;
765 next = neigh->confirmed + neigh->parms->reachable_time; 768 next = neigh->confirmed + neigh->parms->reachable_time;
766 } else { 769 } else {
767 NEIGH_PRINTK2("neigh %p is probed.\n", neigh); 770 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
@@ -819,6 +822,8 @@ static void neigh_timer_handler(unsigned long arg)
819out: 822out:
820 write_unlock(&neigh->lock); 823 write_unlock(&neigh->lock);
821 } 824 }
825 if (notify)
826 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
822 827
823#ifdef CONFIG_ARPD 828#ifdef CONFIG_ARPD
824 if (notify && neigh->parms->app_probes) 829 if (notify && neigh->parms->app_probes)
@@ -926,9 +931,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
926{ 931{
927 u8 old; 932 u8 old;
928 int err; 933 int err;
929#ifdef CONFIG_ARPD
930 int notify = 0; 934 int notify = 0;
931#endif
932 struct net_device *dev; 935 struct net_device *dev;
933 int update_isrouter = 0; 936 int update_isrouter = 0;
934 937
@@ -948,9 +951,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
948 neigh_suspect(neigh); 951 neigh_suspect(neigh);
949 neigh->nud_state = new; 952 neigh->nud_state = new;
950 err = 0; 953 err = 0;
951#ifdef CONFIG_ARPD
952 notify = old & NUD_VALID; 954 notify = old & NUD_VALID;
953#endif
954 goto out; 955 goto out;
955 } 956 }
956 957
@@ -1022,9 +1023,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1022 if (!(new & NUD_CONNECTED)) 1023 if (!(new & NUD_CONNECTED))
1023 neigh->confirmed = jiffies - 1024 neigh->confirmed = jiffies -
1024 (neigh->parms->base_reachable_time << 1); 1025 (neigh->parms->base_reachable_time << 1);
1025#ifdef CONFIG_ARPD
1026 notify = 1; 1026 notify = 1;
1027#endif
1028 } 1027 }
1029 if (new == old) 1028 if (new == old)
1030 goto out; 1029 goto out;
@@ -1056,6 +1055,9 @@ out:
1056 (neigh->flags & ~NTF_ROUTER); 1055 (neigh->flags & ~NTF_ROUTER);
1057 } 1056 }
1058 write_unlock_bh(&neigh->lock); 1057 write_unlock_bh(&neigh->lock);
1058
1059 if (notify)
1060 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1059#ifdef CONFIG_ARPD 1061#ifdef CONFIG_ARPD
1060 if (notify && neigh->parms->app_probes) 1062 if (notify && neigh->parms->app_probes)
1061 neigh_app_notify(neigh); 1063 neigh_app_notify(neigh);
@@ -1430,6 +1432,9 @@ int neigh_table_clear(struct neigh_table *tbl)
1430 kfree(tbl->phash_buckets); 1432 kfree(tbl->phash_buckets);
1431 tbl->phash_buckets = NULL; 1433 tbl->phash_buckets = NULL;
1432 1434
1435 free_percpu(tbl->stats);
1436 tbl->stats = NULL;
1437
1433 return 0; 1438 return 0;
1434} 1439}
1435 1440
diff --git a/net/core/netevent.c b/net/core/netevent.c
new file mode 100644
index 000000000000..35d02c38554e
--- /dev/null
+++ b/net/core/netevent.c
@@ -0,0 +1,69 @@
1/*
2 * Network event notifiers
3 *
4 * Authors:
5 * Tom Tucker <tom@opengridcomputing.com>
6 * Steve Wise <swise@opengridcomputing.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 */
15
16#include <linux/rtnetlink.h>
17#include <linux/notifier.h>
18
19static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
20
21/**
22 * register_netevent_notifier - register a netevent notifier block
23 * @nb: notifier
24 *
25 * Register a notifier to be called when a netevent occurs.
26 * The notifier passed is linked into the kernel structures and must
27 * not be reused until it has been unregistered. A negative errno code
28 * is returned on a failure.
29 */
30int register_netevent_notifier(struct notifier_block *nb)
31{
32 int err;
33
34 err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
35 return err;
36}
37
38/**
39 * netevent_unregister_notifier - unregister a netevent notifier block
40 * @nb: notifier
41 *
42 * Unregister a notifier previously registered by
43 * register_neigh_notifier(). The notifier is unlinked into the
44 * kernel structures and may then be reused. A negative errno code
45 * is returned on a failure.
46 */
47
48int unregister_netevent_notifier(struct notifier_block *nb)
49{
50 return atomic_notifier_chain_unregister(&netevent_notif_chain, nb);
51}
52
53/**
54 * call_netevent_notifiers - call all netevent notifier blocks
55 * @val: value passed unmodified to notifier function
56 * @v: pointer passed unmodified to notifier function
57 *
58 * Call all neighbour notifier blocks. Parameters and return value
59 * are as for notifier_call_chain().
60 */
61
62int call_netevent_notifiers(unsigned long val, void *v)
63{
64 return atomic_notifier_call_chain(&netevent_notif_chain, val, v);
65}
66
67EXPORT_SYMBOL_GPL(register_netevent_notifier);
68EXPORT_SYMBOL_GPL(unregister_netevent_notifier);
69EXPORT_SYMBOL_GPL(call_netevent_notifiers);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 67ed14ddabd2..6a7320b39ed0 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32);
2150 skb->dev = odev; 2150 skb->dev = odev;
2151 skb->pkt_type = PACKET_HOST; 2151 skb->pkt_type = PACKET_HOST;
2152 skb->nh.iph = iph;
2153 skb->h.uh = udph;
2152 2154
2153 if (pkt_dev->nfrags <= 0) 2155 if (pkt_dev->nfrags <= 0)
2154 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2156 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
@@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2460 skb->protocol = protocol; 2462 skb->protocol = protocol;
2461 skb->dev = odev; 2463 skb->dev = odev;
2462 skb->pkt_type = PACKET_HOST; 2464 skb->pkt_type = PACKET_HOST;
2465 skb->nh.ipv6h = iph;
2466 skb->h.uh = udph;
2463 2467
2464 if (pkt_dev->nfrags <= 0) 2468 if (pkt_dev->nfrags <= 0)
2465 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2469 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 20e5bb73f147..30cc1ba6ed5c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
394 } 394 }
395 395
396 if (ida[IFLA_ADDRESS - 1]) { 396 if (ida[IFLA_ADDRESS - 1]) {
397 struct sockaddr *sa;
398 int len;
399
397 if (!dev->set_mac_address) { 400 if (!dev->set_mac_address) {
398 err = -EOPNOTSUPP; 401 err = -EOPNOTSUPP;
399 goto out; 402 goto out;
@@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
405 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len)) 408 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len))
406 goto out; 409 goto out;
407 410
408 err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1])); 411 len = sizeof(sa_family_t) + dev->addr_len;
412 sa = kmalloc(len, GFP_KERNEL);
413 if (!sa) {
414 err = -ENOMEM;
415 goto out;
416 }
417 sa->sa_family = dev->type;
418 memcpy(sa->sa_data, RTA_DATA(ida[IFLA_ADDRESS - 1]),
419 dev->addr_len);
420 err = dev->set_mac_address(dev, sa);
421 kfree(sa);
409 if (err) 422 if (err)
410 goto out; 423 goto out;
411 send_addr_notify = 1; 424 send_addr_notify = 1;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 44f6a181a754..c54f3664bce5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -71,13 +71,6 @@ static kmem_cache_t *skbuff_head_cache __read_mostly;
71static kmem_cache_t *skbuff_fclone_cache __read_mostly; 71static kmem_cache_t *skbuff_fclone_cache __read_mostly;
72 72
73/* 73/*
74 * lockdep: lock class key used by skb_queue_head_init():
75 */
76struct lock_class_key skb_queue_lock_key;
77
78EXPORT_SYMBOL(skb_queue_lock_key);
79
80/*
81 * Keep out-of-line to prevent kernel bloat. 74 * Keep out-of-line to prevent kernel bloat.
82 * __builtin_return_address is not used because it is not always 75 * __builtin_return_address is not used because it is not always
83 * reliable. 76 * reliable.
@@ -256,12 +249,37 @@ nodata:
256 goto out; 249 goto out;
257} 250}
258 251
252/**
253 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
254 * @dev: network device to receive on
255 * @length: length to allocate
256 * @gfp_mask: get_free_pages mask, passed to alloc_skb
257 *
258 * Allocate a new &sk_buff and assign it a usage count of one. The
259 * buffer has unspecified headroom built in. Users should allocate
260 * the headroom they think they need without accounting for the
261 * built in space. The built in space is used for optimisations.
262 *
263 * %NULL is returned if there is no free memory.
264 */
265struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
266 unsigned int length, gfp_t gfp_mask)
267{
268 struct sk_buff *skb;
259 269
260static void skb_drop_fraglist(struct sk_buff *skb) 270 skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
271 if (likely(skb)) {
272 skb_reserve(skb, NET_SKB_PAD);
273 skb->dev = dev;
274 }
275 return skb;
276}
277
278static void skb_drop_list(struct sk_buff **listp)
261{ 279{
262 struct sk_buff *list = skb_shinfo(skb)->frag_list; 280 struct sk_buff *list = *listp;
263 281
264 skb_shinfo(skb)->frag_list = NULL; 282 *listp = NULL;
265 283
266 do { 284 do {
267 struct sk_buff *this = list; 285 struct sk_buff *this = list;
@@ -270,6 +288,11 @@ static void skb_drop_fraglist(struct sk_buff *skb)
270 } while (list); 288 } while (list);
271} 289}
272 290
291static inline void skb_drop_fraglist(struct sk_buff *skb)
292{
293 skb_drop_list(&skb_shinfo(skb)->frag_list);
294}
295
273static void skb_clone_fraglist(struct sk_buff *skb) 296static void skb_clone_fraglist(struct sk_buff *skb)
274{ 297{
275 struct sk_buff *list; 298 struct sk_buff *list;
@@ -830,41 +853,81 @@ free_skb:
830 853
831int ___pskb_trim(struct sk_buff *skb, unsigned int len) 854int ___pskb_trim(struct sk_buff *skb, unsigned int len)
832{ 855{
856 struct sk_buff **fragp;
857 struct sk_buff *frag;
833 int offset = skb_headlen(skb); 858 int offset = skb_headlen(skb);
834 int nfrags = skb_shinfo(skb)->nr_frags; 859 int nfrags = skb_shinfo(skb)->nr_frags;
835 int i; 860 int i;
861 int err;
836 862
837 for (i = 0; i < nfrags; i++) { 863 if (skb_cloned(skb) &&
864 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
865 return err;
866
867 i = 0;
868 if (offset >= len)
869 goto drop_pages;
870
871 for (; i < nfrags; i++) {
838 int end = offset + skb_shinfo(skb)->frags[i].size; 872 int end = offset + skb_shinfo(skb)->frags[i].size;
839 if (end > len) { 873
840 if (skb_cloned(skb)) { 874 if (end < len) {
841 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 875 offset = end;
842 return -ENOMEM; 876 continue;
843 }
844 if (len <= offset) {
845 put_page(skb_shinfo(skb)->frags[i].page);
846 skb_shinfo(skb)->nr_frags--;
847 } else {
848 skb_shinfo(skb)->frags[i].size = len - offset;
849 }
850 } 877 }
851 offset = end; 878
879 skb_shinfo(skb)->frags[i++].size = len - offset;
880
881drop_pages:
882 skb_shinfo(skb)->nr_frags = i;
883
884 for (; i < nfrags; i++)
885 put_page(skb_shinfo(skb)->frags[i].page);
886
887 if (skb_shinfo(skb)->frag_list)
888 skb_drop_fraglist(skb);
889 goto done;
852 } 890 }
853 891
854 if (offset < len) { 892 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
893 fragp = &frag->next) {
894 int end = offset + frag->len;
895
896 if (skb_shared(frag)) {
897 struct sk_buff *nfrag;
898
899 nfrag = skb_clone(frag, GFP_ATOMIC);
900 if (unlikely(!nfrag))
901 return -ENOMEM;
902
903 nfrag->next = frag->next;
904 kfree_skb(frag);
905 frag = nfrag;
906 *fragp = frag;
907 }
908
909 if (end < len) {
910 offset = end;
911 continue;
912 }
913
914 if (end > len &&
915 unlikely((err = pskb_trim(frag, len - offset))))
916 return err;
917
918 if (frag->next)
919 skb_drop_list(&frag->next);
920 break;
921 }
922
923done:
924 if (len > skb_headlen(skb)) {
855 skb->data_len -= skb->len - len; 925 skb->data_len -= skb->len - len;
856 skb->len = len; 926 skb->len = len;
857 } else { 927 } else {
858 if (len <= skb_headlen(skb)) { 928 skb->len = len;
859 skb->len = len; 929 skb->data_len = 0;
860 skb->data_len = 0; 930 skb->tail = skb->data + len;
861 skb->tail = skb->data + len;
862 if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
863 skb_drop_fraglist(skb);
864 } else {
865 skb->data_len -= skb->len - len;
866 skb->len = len;
867 }
868 } 931 }
869 932
870 return 0; 933 return 0;
@@ -2003,6 +2066,7 @@ EXPORT_SYMBOL(__kfree_skb);
2003EXPORT_SYMBOL(kfree_skb); 2066EXPORT_SYMBOL(kfree_skb);
2004EXPORT_SYMBOL(__pskb_pull_tail); 2067EXPORT_SYMBOL(__pskb_pull_tail);
2005EXPORT_SYMBOL(__alloc_skb); 2068EXPORT_SYMBOL(__alloc_skb);
2069EXPORT_SYMBOL(__netdev_alloc_skb);
2006EXPORT_SYMBOL(pskb_copy); 2070EXPORT_SYMBOL(pskb_copy);
2007EXPORT_SYMBOL(pskb_expand_head); 2071EXPORT_SYMBOL(pskb_expand_head);
2008EXPORT_SYMBOL(skb_checksum); 2072EXPORT_SYMBOL(skb_checksum);
diff --git a/net/core/stream.c b/net/core/stream.c
index e9489696f694..d1d7decf70b0 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -196,15 +196,13 @@ EXPORT_SYMBOL(sk_stream_error);
196 196
197void __sk_stream_mem_reclaim(struct sock *sk) 197void __sk_stream_mem_reclaim(struct sock *sk)
198{ 198{
199 if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) { 199 atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM,
200 atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM, 200 sk->sk_prot->memory_allocated);
201 sk->sk_prot->memory_allocated); 201 sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
202 sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1; 202 if (*sk->sk_prot->memory_pressure &&
203 if (*sk->sk_prot->memory_pressure && 203 (atomic_read(sk->sk_prot->memory_allocated) <
204 (atomic_read(sk->sk_prot->memory_allocated) < 204 sk->sk_prot->sysctl_mem[0]))
205 sk->sk_prot->sysctl_mem[0])) 205 *sk->sk_prot->memory_pressure = 0;
206 *sk->sk_prot->memory_pressure = 0;
207 }
208} 206}
209 207
210EXPORT_SYMBOL(__sk_stream_mem_reclaim); 208EXPORT_SYMBOL(__sk_stream_mem_reclaim);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index b7c98dbcdb81..248a6b666aff 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -29,6 +29,7 @@
29#include <linux/socket.h> 29#include <linux/socket.h>
30#include <linux/rtnetlink.h> /* for BUG_TRAP */ 30#include <linux/rtnetlink.h> /* for BUG_TRAP */
31#include <net/tcp.h> 31#include <net/tcp.h>
32#include <net/netdma.h>
32 33
33#define NET_DMA_DEFAULT_COPYBREAK 4096 34#define NET_DMA_DEFAULT_COPYBREAK 4096
34 35
diff --git a/net/core/utils.c b/net/core/utils.c
index 4f96f389243d..e31c90e05594 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -130,12 +130,13 @@ void __init net_random_init(void)
130static int net_random_reseed(void) 130static int net_random_reseed(void)
131{ 131{
132 int i; 132 int i;
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed;
134 134
135 get_random_bytes(seed, sizeof(seed));
136 for_each_possible_cpu(i) { 135 for_each_possible_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 136 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 137
138 get_random_bytes(&seed, sizeof(seed));
139 __net_srandom(state, seed);
139 } 140 }
140 return 0; 141 return 0;
141} 142}
diff --git a/net/core/wireless.c b/net/core/wireless.c
index d2bc72d318f7..de0bde4b51dd 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -82,6 +82,7 @@
82#include <linux/init.h> /* for __init */ 82#include <linux/init.h> /* for __init */
83#include <linux/if_arp.h> /* ARPHRD_ETHER */ 83#include <linux/if_arp.h> /* ARPHRD_ETHER */
84#include <linux/etherdevice.h> /* compare_ether_addr */ 84#include <linux/etherdevice.h> /* compare_ether_addr */
85#include <linux/interrupt.h>
85 86
86#include <linux/wireless.h> /* Pretty obvious */ 87#include <linux/wireless.h> /* Pretty obvious */
87#include <net/iw_handler.h> /* New driver API */ 88#include <net/iw_handler.h> /* New driver API */
@@ -1842,6 +1843,18 @@ int wireless_rtnetlink_set(struct net_device * dev,
1842 */ 1843 */
1843 1844
1844#ifdef WE_EVENT_RTNETLINK 1845#ifdef WE_EVENT_RTNETLINK
1846static struct sk_buff_head wireless_nlevent_queue;
1847
1848static void wireless_nlevent_process(unsigned long data)
1849{
1850 struct sk_buff *skb;
1851
1852 while ((skb = skb_dequeue(&wireless_nlevent_queue)))
1853 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC);
1854}
1855
1856static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0);
1857
1845/* ---------------------------------------------------------------- */ 1858/* ---------------------------------------------------------------- */
1846/* 1859/*
1847 * Fill a rtnetlink message with our event data. 1860 * Fill a rtnetlink message with our event data.
@@ -1904,8 +1917,17 @@ static inline void rtmsg_iwinfo(struct net_device * dev,
1904 return; 1917 return;
1905 } 1918 }
1906 NETLINK_CB(skb).dst_group = RTNLGRP_LINK; 1919 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
1907 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC); 1920 skb_queue_tail(&wireless_nlevent_queue, skb);
1921 tasklet_schedule(&wireless_nlevent_tasklet);
1922}
1923
1924static int __init wireless_nlevent_init(void)
1925{
1926 skb_queue_head_init(&wireless_nlevent_queue);
1927 return 0;
1908} 1928}
1929
1930subsys_initcall(wireless_nlevent_init);
1909#endif /* WE_EVENT_RTNETLINK */ 1931#endif /* WE_EVENT_RTNETLINK */
1910 1932
1911/* ---------------------------------------------------------------- */ 1933/* ---------------------------------------------------------------- */
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index c39bff706cfc..090bc39e8199 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/ccid3.c 2 * net/dccp/ccids/ccid3.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com> 5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * 6 *
7 * An implementation of the DCCP protocol 7 * An implementation of the DCCP protocol
8 * 8 *
@@ -342,6 +342,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
342 new_packet->dccphtx_ccval = 342 new_packet->dccphtx_ccval =
343 DCCP_SKB_CB(skb)->dccpd_ccval = 343 DCCP_SKB_CB(skb)->dccpd_ccval =
344 hctx->ccid3hctx_last_win_count; 344 hctx->ccid3hctx_last_win_count;
345 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
346 hctx->ccid3hctx_t_ipi);
345 } 347 }
346out: 348out:
347 return rc; 349 return rc;
@@ -413,7 +415,8 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
413 case TFRC_SSTATE_NO_FBACK: 415 case TFRC_SSTATE_NO_FBACK:
414 case TFRC_SSTATE_FBACK: 416 case TFRC_SSTATE_FBACK:
415 if (len > 0) { 417 if (len > 0) {
416 hctx->ccid3hctx_t_nom = now; 418 timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
419 hctx->ccid3hctx_t_ipi);
417 ccid3_calc_new_t_ipi(hctx); 420 ccid3_calc_new_t_ipi(hctx);
418 ccid3_calc_new_delta(hctx); 421 ccid3_calc_new_delta(hctx);
419 timeval_add_usecs(&hctx->ccid3hctx_t_nom, 422 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
@@ -757,8 +760,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
757 } 760 }
758 761
759 hcrx->ccid3hcrx_tstamp_last_feedback = now; 762 hcrx->ccid3hcrx_tstamp_last_feedback = now;
760 hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval; 763 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
761 hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno;
762 hcrx->ccid3hcrx_bytes_recv = 0; 764 hcrx->ccid3hcrx_bytes_recv = 0;
763 765
764 /* Convert to multiples of 10us */ 766 /* Convert to multiples of 10us */
@@ -782,7 +784,7 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
782 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 784 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
783 return 0; 785 return 0;
784 786
785 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter; 787 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter;
786 788
787 if (dccp_packet_without_ack(skb)) 789 if (dccp_packet_without_ack(skb))
788 return 0; 790 return 0;
@@ -854,6 +856,11 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
854 interval = 1; 856 interval = 1;
855 } 857 }
856found: 858found:
859 if (!tail) {
860 LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n",
861 __FUNCTION__);
862 return ~0;
863 }
857 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; 864 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
858 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", 865 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
859 dccp_role(sk), sk, rtt); 866 dccp_role(sk), sk, rtt);
@@ -864,9 +871,20 @@ found:
864 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); 871 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
865 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); 872 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
866 873
874 if (x_recv == 0)
875 x_recv = hcrx->ccid3hcrx_x_recv;
876
867 tmp1 = (u64)x_recv * (u64)rtt; 877 tmp1 = (u64)x_recv * (u64)rtt;
868 do_div(tmp1,10000000); 878 do_div(tmp1,10000000);
869 tmp2 = (u32)tmp1; 879 tmp2 = (u32)tmp1;
880
881 if (!tmp2) {
882 LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 "
883 "%s: x_recv = %u, rtt =%u\n",
884 __FUNCTION__, x_recv, rtt);
885 return ~0;
886 }
887
870 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; 888 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
871 /* do not alter order above or you will get overflow on 32 bit */ 889 /* do not alter order above or you will get overflow on 32 bit */
872 p = tfrc_calc_x_reverse_lookup(fval); 890 p = tfrc_calc_x_reverse_lookup(fval);
@@ -882,31 +900,101 @@ found:
882static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) 900static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
883{ 901{
884 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 902 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
903 struct dccp_li_hist_entry *next, *head;
904 u64 seq_temp;
885 905
886 if (seq_loss != DCCP_MAX_SEQNO + 1 && 906 if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
887 list_empty(&hcrx->ccid3hcrx_li_hist)) { 907 if (!dccp_li_hist_interval_new(ccid3_li_hist,
888 struct dccp_li_hist_entry *li_tail; 908 &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
909 return;
889 910
890 li_tail = dccp_li_hist_interval_new(ccid3_li_hist, 911 next = (struct dccp_li_hist_entry *)
891 &hcrx->ccid3hcrx_li_hist, 912 hcrx->ccid3hcrx_li_hist.next;
892 seq_loss, win_loss); 913 next->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
893 if (li_tail == NULL) 914 } else {
915 struct dccp_li_hist_entry *entry;
916 struct list_head *tail;
917
918 head = (struct dccp_li_hist_entry *)
919 hcrx->ccid3hcrx_li_hist.next;
920 /* FIXME win count check removed as was wrong */
921 /* should make this check with receive history */
922 /* and compare there as per section 10.2 of RFC4342 */
923
924 /* new loss event detected */
925 /* calculate last interval length */
926 seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
927 entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
928
929 if (entry == NULL) {
930 printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__);
931 dump_stack();
894 return; 932 return;
895 li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); 933 }
896 } else 934
897 LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of " 935 list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
898 "interval\n", __FUNCTION__); 936
937 tail = hcrx->ccid3hcrx_li_hist.prev;
938 list_del(tail);
939 kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
940
941 /* Create the newest interval */
942 entry->dccplih_seqno = seq_loss;
943 entry->dccplih_interval = seq_temp;
944 entry->dccplih_win_count = win_loss;
945 }
899} 946}
900 947
901static void ccid3_hc_rx_detect_loss(struct sock *sk) 948static int ccid3_hc_rx_detect_loss(struct sock *sk,
949 struct dccp_rx_hist_entry *packet)
902{ 950{
903 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 951 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
904 u8 win_loss; 952 struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
905 const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist, 953 u64 seqno = packet->dccphrx_seqno;
906 &hcrx->ccid3hcrx_li_hist, 954 u64 tmp_seqno;
907 &win_loss); 955 int loss = 0;
956 u8 ccval;
957
958
959 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
960
961 if (!rx_hist ||
962 follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
963 hcrx->ccid3hcrx_seqno_nonloss = seqno;
964 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
965 goto detect_out;
966 }
967
908 968
909 ccid3_hc_rx_update_li(sk, seq_loss, win_loss); 969 while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
970 > TFRC_RECV_NUM_LATE_LOSS) {
971 loss = 1;
972 ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss,
973 hcrx->ccid3hcrx_ccval_nonloss);
974 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
975 dccp_inc_seqno(&tmp_seqno);
976 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
977 dccp_inc_seqno(&tmp_seqno);
978 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
979 tmp_seqno, &ccval)) {
980 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
981 hcrx->ccid3hcrx_ccval_nonloss = ccval;
982 dccp_inc_seqno(&tmp_seqno);
983 }
984 }
985
986 /* FIXME - this code could be simplified with above while */
987 /* but works at moment */
988 if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
989 hcrx->ccid3hcrx_seqno_nonloss = seqno;
990 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
991 }
992
993detect_out:
994 dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
995 &hcrx->ccid3hcrx_li_hist, packet,
996 hcrx->ccid3hcrx_seqno_nonloss);
997 return loss;
910} 998}
911 999
912static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 1000static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -916,8 +1004,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
916 struct dccp_rx_hist_entry *packet; 1004 struct dccp_rx_hist_entry *packet;
917 struct timeval now; 1005 struct timeval now;
918 u8 win_count; 1006 u8 win_count;
919 u32 p_prev, r_sample, t_elapsed; 1007 u32 p_prev, rtt_prev, r_sample, t_elapsed;
920 int ins; 1008 int loss;
921 1009
922 BUG_ON(hcrx == NULL || 1010 BUG_ON(hcrx == NULL ||
923 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || 1011 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
@@ -932,7 +1020,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
932 case DCCP_PKT_DATAACK: 1020 case DCCP_PKT_DATAACK:
933 if (opt_recv->dccpor_timestamp_echo == 0) 1021 if (opt_recv->dccpor_timestamp_echo == 0)
934 break; 1022 break;
935 p_prev = hcrx->ccid3hcrx_rtt; 1023 rtt_prev = hcrx->ccid3hcrx_rtt;
936 dccp_timestamp(sk, &now); 1024 dccp_timestamp(sk, &now);
937 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10); 1025 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
938 r_sample = timeval_usecs(&now); 1026 r_sample = timeval_usecs(&now);
@@ -951,8 +1039,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
951 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 + 1039 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
952 r_sample / 10; 1040 r_sample / 10;
953 1041
954 if (p_prev != hcrx->ccid3hcrx_rtt) 1042 if (rtt_prev != hcrx->ccid3hcrx_rtt)
955 ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n", 1043 ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
956 dccp_role(sk), hcrx->ccid3hcrx_rtt, 1044 dccp_role(sk), hcrx->ccid3hcrx_rtt,
957 opt_recv->dccpor_elapsed_time); 1045 opt_recv->dccpor_elapsed_time);
958 break; 1046 break;
@@ -973,8 +1061,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
973 1061
974 win_count = packet->dccphrx_ccval; 1062 win_count = packet->dccphrx_ccval;
975 1063
976 ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, 1064 loss = ccid3_hc_rx_detect_loss(sk, packet);
977 &hcrx->ccid3hcrx_li_hist, packet);
978 1065
979 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) 1066 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
980 return; 1067 return;
@@ -991,7 +1078,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
991 case TFRC_RSTATE_DATA: 1078 case TFRC_RSTATE_DATA:
992 hcrx->ccid3hcrx_bytes_recv += skb->len - 1079 hcrx->ccid3hcrx_bytes_recv += skb->len -
993 dccp_hdr(skb)->dccph_doff * 4; 1080 dccp_hdr(skb)->dccph_doff * 4;
994 if (ins != 0) 1081 if (loss)
995 break; 1082 break;
996 1083
997 dccp_timestamp(sk, &now); 1084 dccp_timestamp(sk, &now);
@@ -1012,7 +1099,6 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1012 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", 1099 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
1013 dccp_role(sk), sk, dccp_state_name(sk->sk_state)); 1100 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1014 1101
1015 ccid3_hc_rx_detect_loss(sk);
1016 p_prev = hcrx->ccid3hcrx_p; 1102 p_prev = hcrx->ccid3hcrx_p;
1017 1103
1018 /* Calculate loss event rate */ 1104 /* Calculate loss event rate */
@@ -1022,6 +1108,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1022 /* Scaling up by 1000000 as fixed decimal */ 1108 /* Scaling up by 1000000 as fixed decimal */
1023 if (i_mean != 0) 1109 if (i_mean != 0)
1024 hcrx->ccid3hcrx_p = 1000000 / i_mean; 1110 hcrx->ccid3hcrx_p = 1000000 / i_mean;
1111 } else {
1112 printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__);
1113 dump_stack();
1025 } 1114 }
1026 1115
1027 if (hcrx->ccid3hcrx_p > p_prev) { 1116 if (hcrx->ccid3hcrx_p > p_prev) {
@@ -1230,7 +1319,7 @@ static __exit void ccid3_module_exit(void)
1230} 1319}
1231module_exit(ccid3_module_exit); 1320module_exit(ccid3_module_exit);
1232 1321
1233MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " 1322MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
1234 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 1323 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
1235MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); 1324MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
1236MODULE_LICENSE("GPL"); 1325MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 5ade4f668b22..0a2cb7536d26 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/ccids/ccid3.h 2 * net/dccp/ccids/ccid3.h
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -120,9 +120,10 @@ struct ccid3_hc_rx_sock {
120#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv 120#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv
121#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt 121#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt
122#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p 122#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p
123 u64 ccid3hcrx_seqno_last_counter:48, 123 u64 ccid3hcrx_seqno_nonloss:48,
124 ccid3hcrx_ccval_nonloss:4,
124 ccid3hcrx_state:8, 125 ccid3hcrx_state:8,
125 ccid3hcrx_last_counter:4; 126 ccid3hcrx_ccval_last_counter:4;
126 u32 ccid3hcrx_bytes_recv; 127 u32 ccid3hcrx_bytes_recv;
127 struct timeval ccid3hcrx_tstamp_last_feedback; 128 struct timeval ccid3hcrx_tstamp_last_feedback;
128 struct timeval ccid3hcrx_tstamp_last_ack; 129 struct timeval ccid3hcrx_tstamp_last_ack;
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 5d7b7d864385..906c81ab9d4f 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/lib/loss_interval.c 2 * net/dccp/ccids/lib/loss_interval.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <net/sock.h>
15 16
16#include "loss_interval.h" 17#include "loss_interval.h"
17 18
@@ -90,13 +91,13 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
90 u32 w_tot = 0; 91 u32 w_tot = 0;
91 92
92 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) { 93 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) {
93 if (i < DCCP_LI_HIST_IVAL_F_LENGTH) { 94 if (li_entry->dccplih_interval != ~0) {
94 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i]; 95 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i];
95 w_tot += dccp_li_hist_w[i]; 96 w_tot += dccp_li_hist_w[i];
97 if (i != 0)
98 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
96 } 99 }
97 100
98 if (i != 0)
99 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
100 101
101 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH) 102 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH)
102 break; 103 break;
@@ -107,37 +108,36 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
107 108
108 i_tot = max(i_tot0, i_tot1); 109 i_tot = max(i_tot0, i_tot1);
109 110
110 /* FIXME: Why do we do this? -Ian McDonald */ 111 if (!w_tot) {
111 if (i_tot * 4 < w_tot) 112 LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__);
112 i_tot = w_tot * 4; 113 return 1;
114 }
113 115
114 return i_tot * 4 / w_tot; 116 return i_tot / w_tot;
115} 117}
116 118
117EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean); 119EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean);
118 120
119struct dccp_li_hist_entry *dccp_li_hist_interval_new(struct dccp_li_hist *hist, 121int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
120 struct list_head *list, 122 struct list_head *list, const u64 seq_loss, const u8 win_loss)
121 const u64 seq_loss,
122 const u8 win_loss)
123{ 123{
124 struct dccp_li_hist_entry *tail = NULL, *entry; 124 struct dccp_li_hist_entry *entry;
125 int i; 125 int i;
126 126
127 for (i = 0; i <= DCCP_LI_HIST_IVAL_F_LENGTH; ++i) { 127 for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
128 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); 128 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
129 if (entry == NULL) { 129 if (entry == NULL) {
130 dccp_li_hist_purge(hist, list); 130 dccp_li_hist_purge(hist, list);
131 return NULL; 131 dump_stack();
132 return 0;
132 } 133 }
133 if (tail == NULL) 134 entry->dccplih_interval = ~0;
134 tail = entry;
135 list_add(&entry->dccplih_node, list); 135 list_add(&entry->dccplih_node, list);
136 } 136 }
137 137
138 entry->dccplih_seqno = seq_loss; 138 entry->dccplih_seqno = seq_loss;
139 entry->dccplih_win_count = win_loss; 139 entry->dccplih_win_count = win_loss;
140 return tail; 140 return 1;
141} 141}
142 142
143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new); 143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new);
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index 43bf78269d1d..0ae85f0340b2 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -4,7 +4,7 @@
4 * net/dccp/ccids/lib/loss_interval.h 4 * net/dccp/ccids/lib/loss_interval.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
@@ -52,9 +52,6 @@ extern void dccp_li_hist_purge(struct dccp_li_hist *hist,
52 52
53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); 53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list);
54 54
55extern struct dccp_li_hist_entry * 55extern int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
56 dccp_li_hist_interval_new(struct dccp_li_hist *hist, 56 struct list_head *list, const u64 seq_loss, const u8 win_loss);
57 struct list_head *list,
58 const u64 seq_loss,
59 const u8 win_loss);
60#endif /* _DCCP_LI_HIST_ */ 57#endif /* _DCCP_LI_HIST_ */
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index ad98d6a322eb..b876c9c81c65 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/packet_history.h 2 * net/dccp/packet_history.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -112,64 +112,27 @@ struct dccp_rx_hist_entry *
112 112
113EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet); 113EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet);
114 114
115int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, 115void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
116 struct list_head *rx_list, 116 struct list_head *rx_list,
117 struct list_head *li_list, 117 struct list_head *li_list,
118 struct dccp_rx_hist_entry *packet) 118 struct dccp_rx_hist_entry *packet,
119 u64 nonloss_seqno)
119{ 120{
120 struct dccp_rx_hist_entry *entry, *next, *iter; 121 struct dccp_rx_hist_entry *entry, *next;
121 u8 num_later = 0; 122 u8 num_later = 0;
122 123
123 iter = dccp_rx_hist_head(rx_list); 124 list_add(&packet->dccphrx_node, rx_list);
124 if (iter == NULL)
125 dccp_rx_hist_add_entry(rx_list, packet);
126 else {
127 const u64 seqno = packet->dccphrx_seqno;
128
129 if (after48(seqno, iter->dccphrx_seqno))
130 dccp_rx_hist_add_entry(rx_list, packet);
131 else {
132 if (dccp_rx_hist_entry_data_packet(iter))
133 num_later = 1;
134
135 list_for_each_entry_continue(iter, rx_list,
136 dccphrx_node) {
137 if (after48(seqno, iter->dccphrx_seqno)) {
138 dccp_rx_hist_add_entry(&iter->dccphrx_node,
139 packet);
140 goto trim_history;
141 }
142
143 if (dccp_rx_hist_entry_data_packet(iter))
144 num_later++;
145 125
146 if (num_later == TFRC_RECV_NUM_LATE_LOSS) {
147 dccp_rx_hist_entry_delete(hist, packet);
148 return 1;
149 }
150 }
151
152 if (num_later < TFRC_RECV_NUM_LATE_LOSS)
153 dccp_rx_hist_add_entry(rx_list, packet);
154 /*
155 * FIXME: else what? should we destroy the packet
156 * like above?
157 */
158 }
159 }
160
161trim_history:
162 /*
163 * Trim history (remove all packets after the NUM_LATE_LOSS + 1
164 * data packets)
165 */
166 num_later = TFRC_RECV_NUM_LATE_LOSS + 1; 126 num_later = TFRC_RECV_NUM_LATE_LOSS + 1;
167 127
168 if (!list_empty(li_list)) { 128 if (!list_empty(li_list)) {
169 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { 129 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
170 if (num_later == 0) { 130 if (num_later == 0) {
171 list_del_init(&entry->dccphrx_node); 131 if (after48(nonloss_seqno,
172 dccp_rx_hist_entry_delete(hist, entry); 132 entry->dccphrx_seqno)) {
133 list_del_init(&entry->dccphrx_node);
134 dccp_rx_hist_entry_delete(hist, entry);
135 }
173 } else if (dccp_rx_hist_entry_data_packet(entry)) 136 } else if (dccp_rx_hist_entry_data_packet(entry))
174 --num_later; 137 --num_later;
175 } 138 }
@@ -217,94 +180,10 @@ trim_history:
217 --num_later; 180 --num_later;
218 } 181 }
219 } 182 }
220
221 return 0;
222} 183}
223 184
224EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); 185EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
225 186
226u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
227 struct list_head *li_list, u8 *win_loss)
228{
229 struct dccp_rx_hist_entry *entry, *next, *packet;
230 struct dccp_rx_hist_entry *a_loss = NULL;
231 struct dccp_rx_hist_entry *b_loss = NULL;
232 u64 seq_loss = DCCP_MAX_SEQNO + 1;
233 u8 num_later = TFRC_RECV_NUM_LATE_LOSS;
234
235 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
236 if (num_later == 0) {
237 b_loss = entry;
238 break;
239 } else if (dccp_rx_hist_entry_data_packet(entry))
240 --num_later;
241 }
242
243 if (b_loss == NULL)
244 goto out;
245
246 num_later = 1;
247 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
248 if (num_later == 0) {
249 a_loss = entry;
250 break;
251 } else if (dccp_rx_hist_entry_data_packet(entry))
252 --num_later;
253 }
254
255 if (a_loss == NULL) {
256 if (list_empty(li_list)) {
257 /* no loss event have occured yet */
258 LIMIT_NETDEBUG("%s: TODO: find a lost data packet by "
259 "comparing to initial seqno\n",
260 __FUNCTION__);
261 goto out;
262 } else {
263 LIMIT_NETDEBUG("%s: Less than 4 data pkts in history!",
264 __FUNCTION__);
265 goto out;
266 }
267 }
268
269 /* Locate a lost data packet */
270 entry = packet = b_loss;
271 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
272 u64 delta = dccp_delta_seqno(entry->dccphrx_seqno,
273 packet->dccphrx_seqno);
274
275 if (delta != 0) {
276 if (dccp_rx_hist_entry_data_packet(packet))
277 --delta;
278 /*
279 * FIXME: check this, probably this % usage is because
280 * in earlier drafts the ndp count was just 8 bits
281 * long, but now it cam be up to 24 bits long.
282 */
283#if 0
284 if (delta % DCCP_NDP_LIMIT !=
285 (packet->dccphrx_ndp -
286 entry->dccphrx_ndp) % DCCP_NDP_LIMIT)
287#endif
288 if (delta != packet->dccphrx_ndp - entry->dccphrx_ndp) {
289 seq_loss = entry->dccphrx_seqno;
290 dccp_inc_seqno(&seq_loss);
291 }
292 }
293 packet = entry;
294 if (packet == a_loss)
295 break;
296 }
297out:
298 if (seq_loss != DCCP_MAX_SEQNO + 1)
299 *win_loss = a_loss->dccphrx_ccval;
300 else
301 *win_loss = 0; /* Paranoia */
302
303 return seq_loss;
304}
305
306EXPORT_SYMBOL_GPL(dccp_rx_hist_detect_loss);
307
308struct dccp_tx_hist *dccp_tx_hist_new(const char *name) 187struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
309{ 188{
310 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); 189 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
@@ -365,6 +244,25 @@ struct dccp_tx_hist_entry *
365 244
366EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry); 245EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
367 246
247int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
248 u8 *ccval)
249{
250 struct dccp_rx_hist_entry *packet = NULL, *entry;
251
252 list_for_each_entry(entry, list, dccphrx_node)
253 if (entry->dccphrx_seqno == seq) {
254 packet = entry;
255 break;
256 }
257
258 if (packet)
259 *ccval = packet->dccphrx_ccval;
260
261 return packet != NULL;
262}
263
264EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
265
368void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, 266void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
369 struct list_head *list, 267 struct list_head *list,
370 struct dccp_tx_hist_entry *packet) 268 struct dccp_tx_hist_entry *packet)
@@ -391,7 +289,7 @@ void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
391 289
392EXPORT_SYMBOL_GPL(dccp_tx_hist_purge); 290EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
393 291
394MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " 292MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
395 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 293 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
396MODULE_DESCRIPTION("DCCP TFRC library"); 294MODULE_DESCRIPTION("DCCP TFRC library");
397MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 673c209e4e85..067cf1c85a37 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/packet_history.h 2 * net/dccp/packet_history.h
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -106,6 +106,8 @@ static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
106extern struct dccp_tx_hist_entry * 106extern struct dccp_tx_hist_entry *
107 dccp_tx_hist_find_entry(const struct list_head *list, 107 dccp_tx_hist_find_entry(const struct list_head *list,
108 const u64 seq); 108 const u64 seq);
109extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
110 u8 *ccval);
109 111
110static inline void dccp_tx_hist_add_entry(struct list_head *list, 112static inline void dccp_tx_hist_add_entry(struct list_head *list,
111 struct dccp_tx_hist_entry *entry) 113 struct dccp_tx_hist_entry *entry)
@@ -164,12 +166,6 @@ static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
164extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist, 166extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
165 struct list_head *list); 167 struct list_head *list);
166 168
167static inline void dccp_rx_hist_add_entry(struct list_head *list,
168 struct dccp_rx_hist_entry *entry)
169{
170 list_add(&entry->dccphrx_node, list);
171}
172
173static inline struct dccp_rx_hist_entry * 169static inline struct dccp_rx_hist_entry *
174 dccp_rx_hist_head(struct list_head *list) 170 dccp_rx_hist_head(struct list_head *list)
175{ 171{
@@ -188,10 +184,11 @@ static inline int
188 entry->dccphrx_type == DCCP_PKT_DATAACK; 184 entry->dccphrx_type == DCCP_PKT_DATAACK;
189} 185}
190 186
191extern int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, 187extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
192 struct list_head *rx_list, 188 struct list_head *rx_list,
193 struct list_head *li_list, 189 struct list_head *li_list,
194 struct dccp_rx_hist_entry *packet); 190 struct dccp_rx_hist_entry *packet,
191 u64 nonloss_seqno);
195 192
196extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, 193extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
197 struct list_head *li_list, u8 *win_loss); 194 struct list_head *li_list, u8 *win_loss);
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 130c4c40cfe3..45f30f59ea2a 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -4,7 +4,7 @@
4 * net/dccp/ccids/lib/tfrc.h 4 * net/dccp/ccids/lib/tfrc.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
10 * 10 *
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 4fd2ebebf5a0..44076e0c6591 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/lib/tfrc_equation.c 2 * net/dccp/ccids/lib/tfrc_equation.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
8 * 8 *
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index d00a2f4ee5dd..a5c5475724c0 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 8 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as 11 * under the terms of the GNU General Public License version 2 as
@@ -81,6 +81,14 @@ static inline u64 max48(const u64 seq1, const u64 seq2)
81 return after48(seq1, seq2) ? seq1 : seq2; 81 return after48(seq1, seq2) ? seq1 : seq2;
82} 82}
83 83
84/* is seq1 next seqno after seq2 */
85static inline int follows48(const u64 seq1, const u64 seq2)
86{
87 int diff = (seq1 & 0xFFFF) - (seq2 & 0xFFFF);
88
89 return diff==1;
90}
91
84enum { 92enum {
85 DCCP_MIB_NUM = 0, 93 DCCP_MIB_NUM = 0,
86 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ 94 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index 6048373c7186..b44c45504fb6 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -26,4 +26,6 @@ extern void dccp_feat_clean(struct dccp_minisock *dmsk);
26extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); 26extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk);
27extern int dccp_feat_init(struct dccp_minisock *dmsk); 27extern int dccp_feat_init(struct dccp_minisock *dmsk);
28 28
29extern int dccp_feat_default_sequence_window;
30
29#endif /* _DCCP_FEAT_H */ 31#endif /* _DCCP_FEAT_H */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index c3073e7e81d3..7f56f7e8f571 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -504,8 +504,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
504 ireq = inet_rsk(req); 504 ireq = inet_rsk(req);
505 ireq->loc_addr = daddr; 505 ireq->loc_addr = daddr;
506 ireq->rmt_addr = saddr; 506 ireq->rmt_addr = saddr;
507 req->rcv_wnd = 100; /* Fake, option parsing will get the 507 req->rcv_wnd = dccp_feat_default_sequence_window;
508 right value */
509 ireq->opt = NULL; 508 ireq->opt = NULL;
510 509
511 /* 510 /*
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ff42bc43263d..610c722ac27f 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -31,6 +31,7 @@
31 31
32#include "dccp.h" 32#include "dccp.h"
33#include "ipv6.h" 33#include "ipv6.h"
34#include "feat.h"
34 35
35/* Socket used for sending RSTs and ACKs */ 36/* Socket used for sending RSTs and ACKs */
36static struct socket *dccp_v6_ctl_socket; 37static struct socket *dccp_v6_ctl_socket;
@@ -229,7 +230,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
229 ipv6_addr_copy(&np->saddr, saddr); 230 ipv6_addr_copy(&np->saddr, saddr);
230 inet->rcv_saddr = LOOPBACK4_IPV6; 231 inet->rcv_saddr = LOOPBACK4_IPV6;
231 232
232 ip6_dst_store(sk, dst, NULL); 233 __ip6_dst_store(sk, dst, NULL);
233 234
234 icsk->icsk_ext_hdr_len = 0; 235 icsk->icsk_ext_hdr_len = 0;
235 if (np->opt != NULL) 236 if (np->opt != NULL)
@@ -707,8 +708,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
707 ireq = inet_rsk(req); 708 ireq = inet_rsk(req);
708 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); 709 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
709 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); 710 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
710 req->rcv_wnd = 100; /* Fake, option parsing will get the 711 req->rcv_wnd = dccp_feat_default_sequence_window;
711 right value */
712 ireq6->pktopts = NULL; 712 ireq6->pktopts = NULL;
713 713
714 if (ipv6_opt_accepted(sk, skb) || 714 if (ipv6_opt_accepted(sk, skb) ||
@@ -863,7 +863,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
863 * comment in that function for the gory details. -acme 863 * comment in that function for the gory details. -acme
864 */ 864 */
865 865
866 ip6_dst_store(newsk, dst, NULL); 866 __ip6_dst_store(newsk, dst, NULL);
867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
868 NETIF_F_TSO); 868 NETIF_F_TSO);
869 newdp6 = (struct dccp6_sock *)newsk; 869 newdp6 = (struct dccp6_sock *)newsk;
diff --git a/net/dccp/options.c b/net/dccp/options.c
index c3cda1e39aa8..07a34696ac97 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -4,7 +4,7 @@
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> 5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -29,6 +29,8 @@ int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO;
29int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; 29int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR;
30int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; 30int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT;
31 31
32EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window);
33
32void dccp_minisock_init(struct dccp_minisock *dmsk) 34void dccp_minisock_init(struct dccp_minisock *dmsk)
33{ 35{
34 dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window; 36 dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index f4f0627ea41c..6f14bb5a28d4 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -484,7 +484,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
484 err = -EINVAL; 484 err = -EINVAL;
485 else 485 else
486 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L, 486 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
487 (struct dccp_so_feat *) 487 (struct dccp_so_feat __user *)
488 optval); 488 optval);
489 break; 489 break;
490 490
@@ -493,7 +493,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
493 err = -EINVAL; 493 err = -EINVAL;
494 else 494 else
495 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R, 495 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
496 (struct dccp_so_feat *) 496 (struct dccp_so_feat __user *)
497 optval); 497 optval);
498 break; 498 break;
499 499
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 98a25208440d..476455fbdb03 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -413,11 +413,7 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
413{ 413{
414 struct dn_ifaddr *ifa; 414 struct dn_ifaddr *ifa;
415 415
416 ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 416 ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
417
418 if (ifa) {
419 memset(ifa, 0, sizeof(*ifa));
420 }
421 417
422 return ifa; 418 return ifa;
423} 419}
@@ -1105,10 +1101,9 @@ struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1105 return NULL; 1101 return NULL;
1106 1102
1107 *err = -ENOBUFS; 1103 *err = -ENOBUFS;
1108 if ((dn_db = kmalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) 1104 if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
1109 return NULL; 1105 return NULL;
1110 1106
1111 memset(dn_db, 0, sizeof(struct dn_dev));
1112 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1107 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1113 smp_wmb(); 1108 smp_wmb();
1114 dev->dn_ptr = dn_db; 1109 dev->dn_ptr = dn_db;
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 0375077391b7..fa20e2efcfc1 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -283,11 +283,10 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
283 goto err_inval; 283 goto err_inval;
284 } 284 }
285 285
286 fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); 286 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
287 err = -ENOBUFS; 287 err = -ENOBUFS;
288 if (fi == NULL) 288 if (fi == NULL)
289 goto failure; 289 goto failure;
290 memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct dn_fib_nh));
291 290
292 fi->fib_protocol = r->rtm_protocol; 291 fi->fib_protocol = r->rtm_protocol;
293 fi->fib_nhs = nhs; 292 fi->fib_nhs = nhs;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 5ce9c9e0565c..ff0ebe99137d 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -580,12 +580,11 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
580{ 580{
581 struct seq_file *seq; 581 struct seq_file *seq;
582 int rc = -ENOMEM; 582 int rc = -ENOMEM;
583 struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 583 struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
584 584
585 if (!s) 585 if (!s)
586 goto out; 586 goto out;
587 587
588 memset(s, 0, sizeof(*s));
589 rc = seq_open(file, &dn_neigh_seq_ops); 588 rc = seq_open(file, &dn_neigh_seq_ops);
590 if (rc) 589 if (rc)
591 goto out_kfree; 590 goto out_kfree;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1355614ec11b..743e9fcf7c5a 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -925,8 +925,13 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) {
926 if (!dev_out->dn_ptr) 926 if (!dev_out->dn_ptr)
927 continue; 927 continue;
928 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 928 if (!dn_dev_islocal(dev_out, oldflp->fld_src))
929 break; 929 continue;
930 if ((dev_out->flags & IFF_LOOPBACK) &&
931 oldflp->fld_dst &&
932 !dn_dev_islocal(dev_out, oldflp->fld_dst))
933 continue;
934 break;
930 } 935 }
931 read_unlock(&dev_base_lock); 936 read_unlock(&dev_base_lock);
932 if (dev_out == NULL) 937 if (dev_out == NULL)
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 06e785fe5757..6986be754ef2 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -151,10 +151,9 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
151 } 151 }
152 } 152 }
153 153
154 new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 154 new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
155 if (!new_r) 155 if (!new_r)
156 return -ENOMEM; 156 return -ENOMEM;
157 memset(new_r, 0, sizeof(*new_r));
158 157
159 if (rta[RTA_SRC-1]) 158 if (rta[RTA_SRC-1])
160 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2); 159 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
@@ -399,9 +398,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
399 rcu_read_lock(); 398 rcu_read_lock();
400 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { 399 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
401 if (idx < s_idx) 400 if (idx < s_idx)
402 continue; 401 goto next;
403 if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) 402 if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
404 break; 403 break;
404next:
405 idx++; 405 idx++;
406 } 406 }
407 rcu_read_unlock(); 407 rcu_read_unlock();
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 37d9d0a1ac8c..e926c952e363 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -158,12 +158,10 @@ static void dn_rehash_zone(struct dn_zone *dz)
158 break; 158 break;
159 } 159 }
160 160
161 ht = kmalloc(new_divisor*sizeof(struct dn_fib_node*), GFP_KERNEL); 161 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
162
163 if (ht == NULL) 162 if (ht == NULL)
164 return; 163 return;
165 164
166 memset(ht, 0, new_divisor*sizeof(struct dn_fib_node *));
167 write_lock_bh(&dn_fib_tables_lock); 165 write_lock_bh(&dn_fib_tables_lock);
168 old_ht = dz->dz_hash; 166 old_ht = dz->dz_hash;
169 dz->dz_hash = ht; 167 dz->dz_hash = ht;
@@ -184,11 +182,10 @@ static void dn_free_node(struct dn_fib_node *f)
184static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) 182static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
185{ 183{
186 int i; 184 int i;
187 struct dn_zone *dz = kmalloc(sizeof(struct dn_zone), GFP_KERNEL); 185 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
188 if (!dz) 186 if (!dz)
189 return NULL; 187 return NULL;
190 188
191 memset(dz, 0, sizeof(struct dn_zone));
192 if (z) { 189 if (z) {
193 dz->dz_divisor = 16; 190 dz->dz_divisor = 16;
194 dz->dz_hashmask = 0x0F; 191 dz->dz_hashmask = 0x0F;
@@ -197,14 +194,12 @@ static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
197 dz->dz_hashmask = 0; 194 dz->dz_hashmask = 0;
198 } 195 }
199 196
200 dz->dz_hash = kmalloc(dz->dz_divisor*sizeof(struct dn_fib_node *), GFP_KERNEL); 197 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
201
202 if (!dz->dz_hash) { 198 if (!dz->dz_hash) {
203 kfree(dz); 199 kfree(dz);
204 return NULL; 200 return NULL;
205 } 201 }
206 202
207 memset(dz->dz_hash, 0, dz->dz_divisor*sizeof(struct dn_fib_node*));
208 dz->dz_order = z; 203 dz->dz_order = z;
209 dz->dz_mask = dnet_make_mask(z); 204 dz->dz_mask = dnet_make_mask(z);
210 205
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 309ae4c6549a..4d66aac13483 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -673,12 +673,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
673 edev = dev->ec_ptr; 673 edev = dev->ec_ptr;
674 if (edev == NULL) { 674 if (edev == NULL) {
675 /* Magic up a new one. */ 675 /* Magic up a new one. */
676 edev = kmalloc(sizeof(struct ec_device), GFP_KERNEL); 676 edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
677 if (edev == NULL) { 677 if (edev == NULL) {
678 err = -ENOMEM; 678 err = -ENOMEM;
679 break; 679 break;
680 } 680 }
681 memset(edev, 0, sizeof(struct ec_device));
682 dev->ec_ptr = edev; 681 dev->ec_ptr = edev;
683 } else 682 } else
684 net2dev_map[edev->net] = NULL; 683 net2dev_map[edev->net] = NULL;
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
index dbb08528ddf5..f7e84e9d13ad 100644
--- a/net/ieee80211/Kconfig
+++ b/net/ieee80211/Kconfig
@@ -58,6 +58,7 @@ config IEEE80211_CRYPT_TKIP
58 depends on IEEE80211 && NET_RADIO 58 depends on IEEE80211 && NET_RADIO
59 select CRYPTO 59 select CRYPTO
60 select CRYPTO_MICHAEL_MIC 60 select CRYPTO_MICHAEL_MIC
61 select CRC32
61 ---help--- 62 ---help---
62 Include software based cipher suites in support of IEEE 802.11i 63 Include software based cipher suites in support of IEEE 802.11i
63 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled 64 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
index cb71d794a7d1..5ed0a98b2d76 100644
--- a/net/ieee80211/ieee80211_crypt.c
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -110,11 +110,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
110 unsigned long flags; 110 unsigned long flags;
111 struct ieee80211_crypto_alg *alg; 111 struct ieee80211_crypto_alg *alg;
112 112
113 alg = kmalloc(sizeof(*alg), GFP_KERNEL); 113 alg = kzalloc(sizeof(*alg), GFP_KERNEL);
114 if (alg == NULL) 114 if (alg == NULL)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
117 memset(alg, 0, sizeof(*alg));
118 alg->ops = ops; 117 alg->ops = ops;
119 118
120 spin_lock_irqsave(&ieee80211_crypto_lock, flags); 119 spin_lock_irqsave(&ieee80211_crypto_lock, flags);
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 492647382ad0..ed90a8af1444 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -76,10 +76,9 @@ static void *ieee80211_ccmp_init(int key_idx)
76{ 76{
77 struct ieee80211_ccmp_data *priv; 77 struct ieee80211_ccmp_data *priv;
78 78
79 priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 79 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
80 if (priv == NULL) 80 if (priv == NULL)
81 goto fail; 81 goto fail;
82 memset(priv, 0, sizeof(*priv));
83 priv->key_idx = key_idx; 82 priv->key_idx = key_idx;
84 83
85 priv->tfm = crypto_alloc_tfm("aes", 0); 84 priv->tfm = crypto_alloc_tfm("aes", 0);
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index c5a87724aabe..0ebf235f6939 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -39,10 +39,9 @@ static void *prism2_wep_init(int keyidx)
39{ 39{
40 struct prism2_wep_data *priv; 40 struct prism2_wep_data *priv;
41 41
42 priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 42 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
43 if (priv == NULL) 43 if (priv == NULL)
44 goto fail; 44 goto fail;
45 memset(priv, 0, sizeof(*priv));
46 priv->key_idx = keyidx; 45 priv->key_idx = keyidx;
47 46
48 priv->tfm = crypto_alloc_tfm("arc4", 0); 47 priv->tfm = crypto_alloc_tfm("arc4", 0);
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 47ccf159372c..72d4d4e04d42 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -368,6 +368,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
368 368
369 /* Put this code here so that we avoid duplicating it in all 369 /* Put this code here so that we avoid duplicating it in all
370 * Rx paths. - Jean II */ 370 * Rx paths. - Jean II */
371#ifdef CONFIG_WIRELESS_EXT
371#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ 372#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
372 /* If spy monitoring on */ 373 /* If spy monitoring on */
373 if (ieee->spy_data.spy_number > 0) { 374 if (ieee->spy_data.spy_number > 0) {
@@ -396,15 +397,16 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
396 wireless_spy_update(ieee->dev, hdr->addr2, &wstats); 397 wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
397 } 398 }
398#endif /* IW_WIRELESS_SPY */ 399#endif /* IW_WIRELESS_SPY */
400#endif /* CONFIG_WIRELESS_EXT */
399 401
400#ifdef NOT_YET 402#ifdef NOT_YET
401 hostap_update_rx_stats(local->ap, hdr, rx_stats); 403 hostap_update_rx_stats(local->ap, hdr, rx_stats);
402#endif 404#endif
403 405
404 if (ieee->iw_mode == IW_MODE_MONITOR) { 406 if (ieee->iw_mode == IW_MODE_MONITOR) {
405 ieee80211_monitor_rx(ieee, skb, rx_stats);
406 stats->rx_packets++; 407 stats->rx_packets++;
407 stats->rx_bytes += skb->len; 408 stats->rx_bytes += skb->len;
409 ieee80211_monitor_rx(ieee, skb, rx_stats);
408 return 1; 410 return 1;
409 } 411 }
410 412
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index de148ae594f3..bf042139c7ab 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -562,10 +562,13 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
562 struct net_device_stats *stats = &ieee->stats; 562 struct net_device_stats *stats = &ieee->stats;
563 struct sk_buff *skb_frag; 563 struct sk_buff *skb_frag;
564 int priority = -1; 564 int priority = -1;
565 int fraglen = total_len;
566 int headroom = ieee->tx_headroom;
567 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
565 568
566 spin_lock_irqsave(&ieee->lock, flags); 569 spin_lock_irqsave(&ieee->lock, flags);
567 570
568 if (encrypt_mpdu && !ieee->sec.encrypt) 571 if (encrypt_mpdu && (!ieee->sec.encrypt || !crypt))
569 encrypt_mpdu = 0; 572 encrypt_mpdu = 0;
570 573
571 /* If there is no driver handler to take the TXB, dont' bother 574 /* If there is no driver handler to take the TXB, dont' bother
@@ -581,20 +584,24 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
581 goto success; 584 goto success;
582 } 585 }
583 586
584 if (encrypt_mpdu) 587 if (encrypt_mpdu) {
585 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 588 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
589 fraglen += crypt->ops->extra_mpdu_prefix_len +
590 crypt->ops->extra_mpdu_postfix_len;
591 headroom += crypt->ops->extra_mpdu_prefix_len;
592 }
586 593
587 /* When we allocate the TXB we allocate enough space for the reserve 594 /* When we allocate the TXB we allocate enough space for the reserve
588 * and full fragment bytes (bytes_per_frag doesn't include prefix, 595 * and full fragment bytes (bytes_per_frag doesn't include prefix,
589 * postfix, header, FCS, etc.) */ 596 * postfix, header, FCS, etc.) */
590 txb = ieee80211_alloc_txb(1, total_len, ieee->tx_headroom, GFP_ATOMIC); 597 txb = ieee80211_alloc_txb(1, fraglen, headroom, GFP_ATOMIC);
591 if (unlikely(!txb)) { 598 if (unlikely(!txb)) {
592 printk(KERN_WARNING "%s: Could not allocate TXB\n", 599 printk(KERN_WARNING "%s: Could not allocate TXB\n",
593 ieee->dev->name); 600 ieee->dev->name);
594 goto failed; 601 goto failed;
595 } 602 }
596 txb->encrypted = 0; 603 txb->encrypted = 0;
597 txb->payload_size = total_len; 604 txb->payload_size = fraglen;
598 605
599 skb_frag = txb->fragments[0]; 606 skb_frag = txb->fragments[0];
600 607
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index a78c4f845f66..5cb9cfd35397 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -369,11 +369,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
369 struct ieee80211_crypt_data *new_crypt; 369 struct ieee80211_crypt_data *new_crypt;
370 370
371 /* take WEP into use */ 371 /* take WEP into use */
372 new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data), 372 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
373 GFP_KERNEL); 373 GFP_KERNEL);
374 if (new_crypt == NULL) 374 if (new_crypt == NULL)
375 return -ENOMEM; 375 return -ENOMEM;
376 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
377 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 376 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
378 if (!new_crypt->ops) { 377 if (!new_crypt->ops) {
379 request_module("ieee80211_crypt_wep"); 378 request_module("ieee80211_crypt_wep");
@@ -616,13 +615,11 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
616 615
617 ieee80211_crypt_delayed_deinit(ieee, crypt); 616 ieee80211_crypt_delayed_deinit(ieee, crypt);
618 617
619 new_crypt = (struct ieee80211_crypt_data *) 618 new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
620 kmalloc(sizeof(*new_crypt), GFP_KERNEL);
621 if (new_crypt == NULL) { 619 if (new_crypt == NULL) {
622 ret = -ENOMEM; 620 ret = -ENOMEM;
623 goto done; 621 goto done;
624 } 622 }
625 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
626 new_crypt->ops = ops; 623 new_crypt->ops = ops;
627 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 624 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
628 new_crypt->priv = new_crypt->ops->init(idx); 625 new_crypt->priv = new_crypt->ops->init(idx);
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index 5e9a90651d04..44215ce64d4e 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -47,9 +47,7 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
47 47
48 dprintk(KERN_INFO PFX "sent association request!\n"); 48 dprintk(KERN_INFO PFX "sent association request!\n");
49 49
50 /* Change the state to associating */
51 spin_lock_irqsave(&mac->lock, flags); 50 spin_lock_irqsave(&mac->lock, flags);
52 mac->associnfo.associating = 1;
53 mac->associated = 0; /* just to make sure */ 51 mac->associated = 0; /* just to make sure */
54 52
55 /* Set a timer for timeout */ 53 /* Set a timer for timeout */
@@ -63,6 +61,7 @@ void
63ieee80211softmac_assoc_timeout(void *d) 61ieee80211softmac_assoc_timeout(void *d)
64{ 62{
65 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 63 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
64 struct ieee80211softmac_network *n;
66 unsigned long flags; 65 unsigned long flags;
67 66
68 spin_lock_irqsave(&mac->lock, flags); 67 spin_lock_irqsave(&mac->lock, flags);
@@ -75,11 +74,12 @@ ieee80211softmac_assoc_timeout(void *d)
75 mac->associnfo.associating = 0; 74 mac->associnfo.associating = 0;
76 mac->associnfo.bssvalid = 0; 75 mac->associnfo.bssvalid = 0;
77 mac->associated = 0; 76 mac->associated = 0;
77
78 n = ieee80211softmac_get_network_by_bssid_locked(mac, mac->associnfo.bssid);
78 spin_unlock_irqrestore(&mac->lock, flags); 79 spin_unlock_irqrestore(&mac->lock, flags);
79 80
80 dprintk(KERN_INFO PFX "assoc request timed out!\n"); 81 dprintk(KERN_INFO PFX "assoc request timed out!\n");
81 /* FIXME: we need to know the network here. that requires a bit of restructuring */ 82 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, n);
82 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, NULL);
83} 83}
84 84
85void 85void
@@ -203,6 +203,10 @@ ieee80211softmac_assoc_work(void *d)
203 if (mac->associated) 203 if (mac->associated)
204 ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT); 204 ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
205 205
206 spin_lock_irqsave(&mac->lock, flags);
207 mac->associnfo.associating = 1;
208 spin_unlock_irqrestore(&mac->lock, flags);
209
206 /* try to find the requested network in our list, if we found one already */ 210 /* try to find the requested network in our list, if we found one already */
207 if (bssvalid || mac->associnfo.bssfixed) 211 if (bssvalid || mac->associnfo.bssfixed)
208 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); 212 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
@@ -295,19 +299,32 @@ ieee80211softmac_assoc_work(void *d)
295 memcpy(mac->associnfo.associate_essid.data, found->essid.data, IW_ESSID_MAX_SIZE + 1); 299 memcpy(mac->associnfo.associate_essid.data, found->essid.data, IW_ESSID_MAX_SIZE + 1);
296 300
297 /* we found a network! authenticate (if necessary) and associate to it. */ 301 /* we found a network! authenticate (if necessary) and associate to it. */
298 if (!found->authenticated) { 302 if (found->authenticating) {
303 dprintk(KERN_INFO PFX "Already requested authentication, waiting...\n");
304 if(!mac->associnfo.assoc_wait) {
305 mac->associnfo.assoc_wait = 1;
306 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL);
307 }
308 return;
309 }
310 if (!found->authenticated && !found->authenticating) {
299 /* This relies on the fact that _auth_req only queues the work, 311 /* This relies on the fact that _auth_req only queues the work,
300 * otherwise adding the notification would be racy. */ 312 * otherwise adding the notification would be racy. */
301 if (!ieee80211softmac_auth_req(mac, found)) { 313 if (!ieee80211softmac_auth_req(mac, found)) {
302 dprintk(KERN_INFO PFX "cannot associate without being authenticated, requested authentication\n"); 314 if(!mac->associnfo.assoc_wait) {
303 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL); 315 dprintk(KERN_INFO PFX "Cannot associate without being authenticated, requested authentication\n");
316 mac->associnfo.assoc_wait = 1;
317 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL);
318 }
304 } else { 319 } else {
305 printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n"); 320 printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n");
321 mac->associnfo.assoc_wait = 0;
306 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found); 322 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found);
307 } 323 }
308 return; 324 return;
309 } 325 }
310 /* finally! now we can start associating */ 326 /* finally! now we can start associating */
327 mac->associnfo.assoc_wait = 0;
311 ieee80211softmac_assoc(mac, found); 328 ieee80211softmac_assoc(mac, found);
312} 329}
313 330
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 90b8484e509b..4cef39e171d0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -36,8 +36,9 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
36 struct ieee80211softmac_auth_queue_item *auth; 36 struct ieee80211softmac_auth_queue_item *auth;
37 unsigned long flags; 37 unsigned long flags;
38 38
39 if (net->authenticating) 39 if (net->authenticating || net->authenticated)
40 return 0; 40 return 0;
41 net->authenticating = 1;
41 42
42 /* Add the network if it's not already added */ 43 /* Add the network if it's not already added */
43 ieee80211softmac_add_network(mac, net); 44 ieee80211softmac_add_network(mac, net);
@@ -92,7 +93,6 @@ ieee80211softmac_auth_queue(void *data)
92 return; 93 return;
93 } 94 }
94 net->authenticated = 0; 95 net->authenticated = 0;
95 net->authenticating = 1;
96 /* add a timeout call so we eventually give up waiting for an auth reply */ 96 /* add a timeout call so we eventually give up waiting for an auth reply */
97 schedule_delayed_work(&auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT); 97 schedule_delayed_work(&auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT);
98 auth->retry--; 98 auth->retry--;
@@ -116,6 +116,16 @@ ieee80211softmac_auth_queue(void *data)
116 kfree(auth); 116 kfree(auth);
117} 117}
118 118
119/* Sends a response to an auth challenge (for shared key auth). */
120static void
121ieee80211softmac_auth_challenge_response(void *_aq)
122{
123 struct ieee80211softmac_auth_queue_item *aq = _aq;
124
125 /* Send our response */
126 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
127}
128
119/* Handle the auth response from the AP 129/* Handle the auth response from the AP
120 * This should be registered with ieee80211 as handle_auth 130 * This should be registered with ieee80211 as handle_auth
121 */ 131 */
@@ -197,24 +207,30 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
197 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE: 207 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE:
198 /* Check to make sure we have a challenge IE */ 208 /* Check to make sure we have a challenge IE */
199 data = (u8 *)auth->info_element; 209 data = (u8 *)auth->info_element;
200 if(*data++ != MFIE_TYPE_CHALLENGE){ 210 if (*data++ != MFIE_TYPE_CHALLENGE) {
201 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); 211 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n");
202 break; 212 break;
203 } 213 }
204 /* Save the challenge */ 214 /* Save the challenge */
205 spin_lock_irqsave(&mac->lock, flags); 215 spin_lock_irqsave(&mac->lock, flags);
206 net->challenge_len = *data++; 216 net->challenge_len = *data++;
207 if(net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) 217 if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
208 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; 218 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
209 if(net->challenge != NULL) 219 if (net->challenge != NULL)
210 kfree(net->challenge); 220 kfree(net->challenge);
211 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); 221 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
212 memcpy(net->challenge, data, net->challenge_len); 222 memcpy(net->challenge, data, net->challenge_len);
213 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 223 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
214 spin_unlock_irqrestore(&mac->lock, flags);
215 224
216 /* Send our response */ 225 /* We reuse the work struct from the auth request here.
217 ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); 226 * It is safe to do so as each one is per-request, and
227 * at this point (dealing with authentication response)
228 * we have obviously already sent the initial auth
229 * request. */
230 cancel_delayed_work(&aq->work);
231 INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
232 schedule_work(&aq->work);
233 spin_unlock_irqrestore(&mac->lock, flags);
218 return 0; 234 return 0;
219 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: 235 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
220 kfree(net->challenge); 236 kfree(net->challenge);
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index 09541611e48c..6ae5a1dc7956 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -96,8 +96,7 @@ ieee80211softmac_alloc_mgt(u32 size)
96 if(size > IEEE80211_DATA_LEN) 96 if(size > IEEE80211_DATA_LEN)
97 return NULL; 97 return NULL;
98 /* Allocate the frame */ 98 /* Allocate the frame */
99 data = kmalloc(size, GFP_ATOMIC); 99 data = kzalloc(size, GFP_ATOMIC);
100 memset(data, 0, size);
101 return data; 100 return data;
102} 101}
103 102
@@ -229,6 +228,9 @@ ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt,
229 return 0; 228 return 0;
230 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid); 229 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid);
231 230
231 /* Fill in the capabilities */
232 (*pkt)->capability = ieee80211softmac_capabilities(mac, net);
233
232 /* Fill in Listen Interval (?) */ 234 /* Fill in Listen Interval (?) */
233 (*pkt)->listen_interval = cpu_to_le16(10); 235 (*pkt)->listen_interval = cpu_to_le16(10);
234 236
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 0e65ff4e33fc..75320b6842ab 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -70,12 +70,44 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
70 char *extra) 70 char *extra)
71{ 71{
72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); 72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
73 struct ieee80211softmac_network *n;
74 struct ieee80211softmac_auth_queue_item *authptr;
73 int length = 0; 75 int length = 0;
74 unsigned long flags; 76 unsigned long flags;
75 77
78 /* Check if we're already associating to this or another network
79 * If it's another network, cancel and start over with our new network
80 * If it's our network, ignore the change, we're already doing it!
81 */
82 if((sm->associnfo.associating || sm->associated) &&
83 (data->essid.flags && data->essid.length && extra)) {
84 /* Get the associating network */
85 n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
86 if(n && n->essid.len == (data->essid.length - 1) &&
87 !memcmp(n->essid.data, extra, n->essid.len)) {
88 dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n",
89 MAC_ARG(sm->associnfo.bssid));
90 return 0;
91 } else {
92 dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
93 spin_lock_irqsave(&sm->lock,flags);
94 /* Cancel assoc work */
95 cancel_delayed_work(&sm->associnfo.work);
96 /* We don't have to do this, but it's a little cleaner */
97 list_for_each_entry(authptr, &sm->auth_queue, list)
98 cancel_delayed_work(&authptr->work);
99 sm->associnfo.bssvalid = 0;
100 sm->associnfo.bssfixed = 0;
101 spin_unlock_irqrestore(&sm->lock,flags);
102 flush_scheduled_work();
103 }
104 }
105
106
76 spin_lock_irqsave(&sm->lock, flags); 107 spin_lock_irqsave(&sm->lock, flags);
77 108
78 sm->associnfo.static_essid = 0; 109 sm->associnfo.static_essid = 0;
110 sm->associnfo.assoc_wait = 0;
79 111
80 if (data->essid.flags && data->essid.length && extra /*required?*/) { 112 if (data->essid.flags && data->essid.length && extra /*required?*/) {
81 length = min(data->essid.length - 1, IW_ESSID_MAX_SIZE); 113 length = min(data->essid.length - 1, IW_ESSID_MAX_SIZE);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index da33393be45f..8514106761b0 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -572,16 +572,6 @@ config TCP_CONG_VENO
572 loss packets. 572 loss packets.
573 See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf 573 See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf
574 574
575config TCP_CONG_COMPOUND
576 tristate "TCP Compound"
577 depends on EXPERIMENTAL
578 default n
579 ---help---
580 TCP Compound is a sender-side only change to TCP that uses
581 a mixed Reno/Vegas approach to calculate the cwnd.
582 For further details look here:
583 ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf
584
585endmenu 575endmenu
586 576
587config TCP_CONG_BIC 577config TCP_CONG_BIC
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 38b8039bdd55..4878fc5be85f 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o
47obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o 47obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o
48obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o 48obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
49obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o 49obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
50obj-$(CONFIG_TCP_CONG_COMPOUND) += tcp_compound.o
51 50
52obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ 51obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
53 xfrm4_output.o 52 xfrm4_output.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 318d4674faa1..c84a32070f8d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk)
1097 1097
1098EXPORT_SYMBOL(inet_sk_rebuild_header); 1098EXPORT_SYMBOL(inet_sk_rebuild_header);
1099 1099
1100static int inet_gso_send_check(struct sk_buff *skb)
1101{
1102 struct iphdr *iph;
1103 struct net_protocol *ops;
1104 int proto;
1105 int ihl;
1106 int err = -EINVAL;
1107
1108 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1109 goto out;
1110
1111 iph = skb->nh.iph;
1112 ihl = iph->ihl * 4;
1113 if (ihl < sizeof(*iph))
1114 goto out;
1115
1116 if (unlikely(!pskb_may_pull(skb, ihl)))
1117 goto out;
1118
1119 skb->h.raw = __skb_pull(skb, ihl);
1120 iph = skb->nh.iph;
1121 proto = iph->protocol & (MAX_INET_PROTOS - 1);
1122 err = -EPROTONOSUPPORT;
1123
1124 rcu_read_lock();
1125 ops = rcu_dereference(inet_protos[proto]);
1126 if (likely(ops && ops->gso_send_check))
1127 err = ops->gso_send_check(skb);
1128 rcu_read_unlock();
1129
1130out:
1131 return err;
1132}
1133
1100static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) 1134static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
1101{ 1135{
1102 struct sk_buff *segs = ERR_PTR(-EINVAL); 1136 struct sk_buff *segs = ERR_PTR(-EINVAL);
@@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = {
1162static struct net_protocol tcp_protocol = { 1196static struct net_protocol tcp_protocol = {
1163 .handler = tcp_v4_rcv, 1197 .handler = tcp_v4_rcv,
1164 .err_handler = tcp_v4_err, 1198 .err_handler = tcp_v4_err,
1199 .gso_send_check = tcp_v4_gso_send_check,
1165 .gso_segment = tcp_tso_segment, 1200 .gso_segment = tcp_tso_segment,
1166 .no_policy = 1, 1201 .no_policy = 1,
1167}; 1202};
@@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void);
1208static struct packet_type ip_packet_type = { 1243static struct packet_type ip_packet_type = {
1209 .type = __constant_htons(ETH_P_IP), 1244 .type = __constant_htons(ETH_P_IP),
1210 .func = ip_rcv, 1245 .func = ip_rcv,
1246 .gso_send_check = inet_gso_send_check,
1211 .gso_segment = inet_gso_segment, 1247 .gso_segment = inet_gso_segment,
1212}; 1248};
1213 1249
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 8e748be36c5a..1366bc6ce6a5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -215,12 +215,10 @@ static int ah_init_state(struct xfrm_state *x)
215 if (x->encap) 215 if (x->encap)
216 goto error; 216 goto error;
217 217
218 ahp = kmalloc(sizeof(*ahp), GFP_KERNEL); 218 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
219 if (ahp == NULL) 219 if (ahp == NULL)
220 return -ENOMEM; 220 return -ENOMEM;
221 221
222 memset(ahp, 0, sizeof(*ahp));
223
224 ahp->key = x->aalg->alg_key; 222 ahp->key = x->aalg->alg_key;
225 ahp->key_len = (x->aalg->alg_key_len+7)/8; 223 ahp->key_len = (x->aalg->alg_key_len+7)/8;
226 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 224 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 7b51b3bdb548..c8a3723bc001 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1372,12 +1372,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
1372{ 1372{
1373 struct seq_file *seq; 1373 struct seq_file *seq;
1374 int rc = -ENOMEM; 1374 int rc = -ENOMEM;
1375 struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1375 struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1376 1376
1377 if (!s) 1377 if (!s)
1378 goto out; 1378 goto out;
1379 1379
1380 memset(s, 0, sizeof(*s));
1381 rc = seq_open(file, &arp_seq_ops); 1380 rc = seq_open(file, &arp_seq_ops);
1382 if (rc) 1381 if (rc)
1383 goto out_kfree; 1382 goto out_kfree;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a7c65e9e5ec9..a6cc31d911eb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -93,10 +93,9 @@ static void devinet_sysctl_unregister(struct ipv4_devconf *p);
93 93
94static struct in_ifaddr *inet_alloc_ifa(void) 94static struct in_ifaddr *inet_alloc_ifa(void)
95{ 95{
96 struct in_ifaddr *ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 96 struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
97 97
98 if (ifa) { 98 if (ifa) {
99 memset(ifa, 0, sizeof(*ifa));
100 INIT_RCU_HEAD(&ifa->rcu_head); 99 INIT_RCU_HEAD(&ifa->rcu_head);
101 } 100 }
102 101
@@ -140,10 +139,9 @@ struct in_device *inetdev_init(struct net_device *dev)
140 139
141 ASSERT_RTNL(); 140 ASSERT_RTNL();
142 141
143 in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); 142 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
144 if (!in_dev) 143 if (!in_dev)
145 goto out; 144 goto out;
146 memset(in_dev, 0, sizeof(*in_dev));
147 INIT_RCU_HEAD(&in_dev->rcu_head); 145 INIT_RCU_HEAD(&in_dev->rcu_head);
148 memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); 146 memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf));
149 in_dev->cnf.sysctl = NULL; 147 in_dev->cnf.sysctl = NULL;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4e112738b3fa..fc2f8ce441de 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -316,12 +316,10 @@ static int esp_init_state(struct xfrm_state *x)
316 if (x->ealg == NULL) 316 if (x->ealg == NULL)
317 goto error; 317 goto error;
318 318
319 esp = kmalloc(sizeof(*esp), GFP_KERNEL); 319 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
320 if (esp == NULL) 320 if (esp == NULL)
321 return -ENOMEM; 321 return -ENOMEM;
322 322
323 memset(esp, 0, sizeof(*esp));
324
325 if (x->aalg) { 323 if (x->aalg) {
326 struct xfrm_algo_desc *aalg_desc; 324 struct xfrm_algo_desc *aalg_desc;
327 325
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 3c1d32ad35f2..72c633b357cf 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -204,11 +204,10 @@ static struct fn_zone *
204fn_new_zone(struct fn_hash *table, int z) 204fn_new_zone(struct fn_hash *table, int z)
205{ 205{
206 int i; 206 int i;
207 struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL); 207 struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
208 if (!fz) 208 if (!fz)
209 return NULL; 209 return NULL;
210 210
211 memset(fz, 0, sizeof(struct fn_zone));
212 if (z) { 211 if (z) {
213 fz->fz_divisor = 16; 212 fz->fz_divisor = 16;
214 } else { 213 } else {
@@ -1046,7 +1045,7 @@ static int fib_seq_open(struct inode *inode, struct file *file)
1046{ 1045{
1047 struct seq_file *seq; 1046 struct seq_file *seq;
1048 int rc = -ENOMEM; 1047 int rc = -ENOMEM;
1049 struct fib_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1048 struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1050 1049
1051 if (!s) 1050 if (!s)
1052 goto out; 1051 goto out;
@@ -1057,7 +1056,6 @@ static int fib_seq_open(struct inode *inode, struct file *file)
1057 1056
1058 seq = file->private_data; 1057 seq = file->private_data;
1059 seq->private = s; 1058 seq->private = s;
1060 memset(s, 0, sizeof(*s));
1061out: 1059out:
1062 return rc; 1060 return rc;
1063out_kfree: 1061out_kfree:
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 6c642d11d4ca..79b04718bdfd 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -196,10 +196,9 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
196 } 196 }
197 } 197 }
198 198
199 new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 199 new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
200 if (!new_r) 200 if (!new_r)
201 return -ENOMEM; 201 return -ENOMEM;
202 memset(new_r, 0, sizeof(*new_r));
203 202
204 if (rta[RTA_SRC-1]) 203 if (rta[RTA_SRC-1])
205 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4); 204 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
@@ -457,13 +456,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
457 456
458 rcu_read_lock(); 457 rcu_read_lock();
459 hlist_for_each_entry(r, node, &fib_rules, hlist) { 458 hlist_for_each_entry(r, node, &fib_rules, hlist) {
460
461 if (idx < s_idx) 459 if (idx < s_idx)
462 continue; 460 goto next;
463 if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, 461 if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
464 cb->nlh->nlmsg_seq, 462 cb->nlh->nlmsg_seq,
465 RTM_NEWRULE, NLM_F_MULTI) < 0) 463 RTM_NEWRULE, NLM_F_MULTI) < 0)
466 break; 464 break;
465next:
467 idx++; 466 idx++;
468 } 467 }
469 rcu_read_unlock(); 468 rcu_read_unlock();
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5f87533684d5..51738000f3dc 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -159,7 +159,7 @@ void free_fib_info(struct fib_info *fi)
159 159
160void fib_release_info(struct fib_info *fi) 160void fib_release_info(struct fib_info *fi)
161{ 161{
162 write_lock(&fib_info_lock); 162 write_lock_bh(&fib_info_lock);
163 if (fi && --fi->fib_treeref == 0) { 163 if (fi && --fi->fib_treeref == 0) {
164 hlist_del(&fi->fib_hash); 164 hlist_del(&fi->fib_hash);
165 if (fi->fib_prefsrc) 165 if (fi->fib_prefsrc)
@@ -172,7 +172,7 @@ void fib_release_info(struct fib_info *fi)
172 fi->fib_dead = 1; 172 fi->fib_dead = 1;
173 fib_info_put(fi); 173 fib_info_put(fi);
174 } 174 }
175 write_unlock(&fib_info_lock); 175 write_unlock_bh(&fib_info_lock);
176} 176}
177 177
178static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) 178static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
@@ -598,7 +598,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
598 unsigned int old_size = fib_hash_size; 598 unsigned int old_size = fib_hash_size;
599 unsigned int i, bytes; 599 unsigned int i, bytes;
600 600
601 write_lock(&fib_info_lock); 601 write_lock_bh(&fib_info_lock);
602 old_info_hash = fib_info_hash; 602 old_info_hash = fib_info_hash;
603 old_laddrhash = fib_info_laddrhash; 603 old_laddrhash = fib_info_laddrhash;
604 fib_hash_size = new_size; 604 fib_hash_size = new_size;
@@ -639,7 +639,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
639 } 639 }
640 fib_info_laddrhash = new_laddrhash; 640 fib_info_laddrhash = new_laddrhash;
641 641
642 write_unlock(&fib_info_lock); 642 write_unlock_bh(&fib_info_lock);
643 643
644 bytes = old_size * sizeof(struct hlist_head *); 644 bytes = old_size * sizeof(struct hlist_head *);
645 fib_hash_free(old_info_hash, bytes); 645 fib_hash_free(old_info_hash, bytes);
@@ -709,11 +709,10 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
709 goto failure; 709 goto failure;
710 } 710 }
711 711
712 fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 712 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
713 if (fi == NULL) 713 if (fi == NULL)
714 goto failure; 714 goto failure;
715 fib_info_cnt++; 715 fib_info_cnt++;
716 memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh));
717 716
718 fi->fib_protocol = r->rtm_protocol; 717 fi->fib_protocol = r->rtm_protocol;
719 718
@@ -821,7 +820,7 @@ link_it:
821 820
822 fi->fib_treeref++; 821 fi->fib_treeref++;
823 atomic_inc(&fi->fib_clntref); 822 atomic_inc(&fi->fib_clntref);
824 write_lock(&fib_info_lock); 823 write_lock_bh(&fib_info_lock);
825 hlist_add_head(&fi->fib_hash, 824 hlist_add_head(&fi->fib_hash,
826 &fib_info_hash[fib_info_hashfn(fi)]); 825 &fib_info_hash[fib_info_hashfn(fi)]);
827 if (fi->fib_prefsrc) { 826 if (fi->fib_prefsrc) {
@@ -840,7 +839,7 @@ link_it:
840 head = &fib_info_devhash[hash]; 839 head = &fib_info_devhash[hash];
841 hlist_add_head(&nh->nh_hash, head); 840 hlist_add_head(&nh->nh_hash, head);
842 } endfor_nexthops(fi) 841 } endfor_nexthops(fi)
843 write_unlock(&fib_info_lock); 842 write_unlock_bh(&fib_info_lock);
844 return fi; 843 return fi;
845 844
846err_inval: 845err_inval:
@@ -962,10 +961,6 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
962 rtm->rtm_protocol = fi->fib_protocol; 961 rtm->rtm_protocol = fi->fib_protocol;
963 if (fi->fib_priority) 962 if (fi->fib_priority)
964 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 963 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
965#ifdef CONFIG_NET_CLS_ROUTE
966 if (fi->fib_nh[0].nh_tclassid)
967 RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
968#endif
969 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 964 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
970 goto rtattr_failure; 965 goto rtattr_failure;
971 if (fi->fib_prefsrc) 966 if (fi->fib_prefsrc)
@@ -975,6 +970,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
975 RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw); 970 RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw);
976 if (fi->fib_nh->nh_oif) 971 if (fi->fib_nh->nh_oif)
977 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 972 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
973#ifdef CONFIG_NET_CLS_ROUTE
974 if (fi->fib_nh[0].nh_tclassid)
975 RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
976#endif
978 } 977 }
979#ifdef CONFIG_IP_ROUTE_MULTIPATH 978#ifdef CONFIG_IP_ROUTE_MULTIPATH
980 if (fi->fib_nhs > 1) { 979 if (fi->fib_nhs > 1) {
@@ -993,6 +992,10 @@ fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
993 nhp->rtnh_ifindex = nh->nh_oif; 992 nhp->rtnh_ifindex = nh->nh_oif;
994 if (nh->nh_gw) 993 if (nh->nh_gw)
995 RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw); 994 RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw);
995#ifdef CONFIG_NET_CLS_ROUTE
996 if (nh->nh_tclassid)
997 RTA_PUT(skb, RTA_FLOW, 4, &nh->nh_tclassid);
998#endif
996 nhp->rtnh_len = skb->tail - (unsigned char*)nhp; 999 nhp->rtnh_len = skb->tail - (unsigned char*)nhp;
997 } endfor_nexthops(fi); 1000 } endfor_nexthops(fi);
998 mp_head->rta_type = RTA_MULTIPATH; 1001 mp_head->rta_type = RTA_MULTIPATH;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1cb65305e102..23fb9d9768e3 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1252,8 +1252,8 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1252 */ 1252 */
1253 1253
1254 if (!fa_head) { 1254 if (!fa_head) {
1255 fa_head = fib_insert_node(t, &err, key, plen);
1256 err = 0; 1255 err = 0;
1256 fa_head = fib_insert_node(t, &err, key, plen);
1257 if (err) 1257 if (err)
1258 goto out_free_new_fa; 1258 goto out_free_new_fa;
1259 } 1259 }
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d299c8e547d6..8e8117c19e4d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1028,10 +1028,9 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1028 * for deleted items allows change reports to use common code with 1028 * for deleted items allows change reports to use common code with
1029 * non-deleted or query-response MCA's. 1029 * non-deleted or query-response MCA's.
1030 */ 1030 */
1031 pmc = kmalloc(sizeof(*pmc), GFP_KERNEL); 1031 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1032 if (!pmc) 1032 if (!pmc)
1033 return; 1033 return;
1034 memset(pmc, 0, sizeof(*pmc));
1035 spin_lock_bh(&im->lock); 1034 spin_lock_bh(&im->lock);
1036 pmc->interface = im->interface; 1035 pmc->interface = im->interface;
1037 in_dev_hold(in_dev); 1036 in_dev_hold(in_dev);
@@ -1529,10 +1528,9 @@ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1529 psf_prev = psf; 1528 psf_prev = psf;
1530 } 1529 }
1531 if (!psf) { 1530 if (!psf) {
1532 psf = kmalloc(sizeof(*psf), GFP_ATOMIC); 1531 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1533 if (!psf) 1532 if (!psf)
1534 return -ENOBUFS; 1533 return -ENOBUFS;
1535 memset(psf, 0, sizeof(*psf));
1536 psf->sf_inaddr = *psfsrc; 1534 psf->sf_inaddr = *psfsrc;
1537 if (psf_prev) { 1535 if (psf_prev) {
1538 psf_prev->sf_next = psf; 1536 psf_prev->sf_next = psf;
@@ -1795,29 +1793,35 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1795 struct in_device *in_dev; 1793 struct in_device *in_dev;
1796 u32 group = imr->imr_multiaddr.s_addr; 1794 u32 group = imr->imr_multiaddr.s_addr;
1797 u32 ifindex; 1795 u32 ifindex;
1796 int ret = -EADDRNOTAVAIL;
1798 1797
1799 rtnl_lock(); 1798 rtnl_lock();
1800 in_dev = ip_mc_find_dev(imr); 1799 in_dev = ip_mc_find_dev(imr);
1801 if (!in_dev) {
1802 rtnl_unlock();
1803 return -ENODEV;
1804 }
1805 ifindex = imr->imr_ifindex; 1800 ifindex = imr->imr_ifindex;
1806 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1801 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1807 if (iml->multi.imr_multiaddr.s_addr == group && 1802 if (iml->multi.imr_multiaddr.s_addr != group)
1808 iml->multi.imr_ifindex == ifindex) { 1803 continue;
1809 (void) ip_mc_leave_src(sk, iml, in_dev); 1804 if (ifindex) {
1805 if (iml->multi.imr_ifindex != ifindex)
1806 continue;
1807 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
1808 iml->multi.imr_address.s_addr)
1809 continue;
1810
1811 (void) ip_mc_leave_src(sk, iml, in_dev);
1810 1812
1811 *imlp = iml->next; 1813 *imlp = iml->next;
1812 1814
1815 if (in_dev)
1813 ip_mc_dec_group(in_dev, group); 1816 ip_mc_dec_group(in_dev, group);
1814 rtnl_unlock(); 1817 rtnl_unlock();
1815 sock_kfree_s(sk, iml, sizeof(*iml)); 1818 sock_kfree_s(sk, iml, sizeof(*iml));
1816 return 0; 1819 return 0;
1817 }
1818 } 1820 }
1821 if (!in_dev)
1822 ret = -ENODEV;
1819 rtnl_unlock(); 1823 rtnl_unlock();
1820 return -EADDRNOTAVAIL; 1824 return ret;
1821} 1825}
1822 1826
1823int ip_mc_source(int add, int omode, struct sock *sk, struct 1827int ip_mc_source(int add, int omode, struct sock *sk, struct
@@ -2201,13 +2205,13 @@ void ip_mc_drop_socket(struct sock *sk)
2201 struct in_device *in_dev; 2205 struct in_device *in_dev;
2202 inet->mc_list = iml->next; 2206 inet->mc_list = iml->next;
2203 2207
2204 if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) { 2208 in_dev = inetdev_by_index(iml->multi.imr_ifindex);
2205 (void) ip_mc_leave_src(sk, iml, in_dev); 2209 (void) ip_mc_leave_src(sk, iml, in_dev);
2210 if (in_dev != NULL) {
2206 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2211 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2207 in_dev_put(in_dev); 2212 in_dev_put(in_dev);
2208 } 2213 }
2209 sock_kfree_s(sk, iml, sizeof(*iml)); 2214 sock_kfree_s(sk, iml, sizeof(*iml));
2210
2211 } 2215 }
2212 rtnl_unlock(); 2216 rtnl_unlock();
2213} 2217}
@@ -2380,7 +2384,7 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2380{ 2384{
2381 struct seq_file *seq; 2385 struct seq_file *seq;
2382 int rc = -ENOMEM; 2386 int rc = -ENOMEM;
2383 struct igmp_mc_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2387 struct igmp_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
2384 2388
2385 if (!s) 2389 if (!s)
2386 goto out; 2390 goto out;
@@ -2390,7 +2394,6 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2390 2394
2391 seq = file->private_data; 2395 seq = file->private_data;
2392 seq->private = s; 2396 seq->private = s;
2393 memset(s, 0, sizeof(*s));
2394out: 2397out:
2395 return rc; 2398 return rc;
2396out_kfree: 2399out_kfree:
@@ -2555,7 +2558,7 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2555{ 2558{
2556 struct seq_file *seq; 2559 struct seq_file *seq;
2557 int rc = -ENOMEM; 2560 int rc = -ENOMEM;
2558 struct igmp_mcf_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2561 struct igmp_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
2559 2562
2560 if (!s) 2563 if (!s)
2561 goto out; 2564 goto out;
@@ -2565,7 +2568,6 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2565 2568
2566 seq = file->private_data; 2569 seq = file->private_data;
2567 seq->private = s; 2570 seq->private = s;
2568 memset(s, 0, sizeof(*s));
2569out: 2571out:
2570 return rc; 2572 return rc;
2571out_kfree: 2573out_kfree:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8e7e41b66c79..492858e6faf0 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -909,11 +909,10 @@ static int __init inet_diag_init(void)
909 sizeof(struct inet_diag_handler *)); 909 sizeof(struct inet_diag_handler *));
910 int err = -ENOMEM; 910 int err = -ENOMEM;
911 911
912 inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL); 912 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
913 if (!inet_diag_table) 913 if (!inet_diag_table)
914 goto out; 914 goto out;
915 915
916 memset(inet_diag_table, 0, inet_diag_table_size);
917 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, 916 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv,
918 THIS_MODULE); 917 THIS_MODULE);
919 if (idiagnl == NULL) 918 if (idiagnl == NULL)
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 2160874ce7aa..03ff62ebcfeb 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -86,7 +86,7 @@ static struct inet_peer *peer_root = peer_avl_empty;
86static DEFINE_RWLOCK(peer_pool_lock); 86static DEFINE_RWLOCK(peer_pool_lock);
87#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 87#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
88 88
89static volatile int peer_total; 89static int peer_total;
90/* Exported for sysctl_net_ipv4. */ 90/* Exported for sysctl_net_ipv4. */
91int inet_peer_threshold = 65536 + 128; /* start to throw entries more 91int inet_peer_threshold = 65536 + 128; /* start to throw entries more
92 * aggressively at this stage */ 92 * aggressively at this stage */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6ff9b10d9563..0f9b3a31997b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -617,7 +617,6 @@ static int ipgre_rcv(struct sk_buff *skb)
617 skb->mac.raw = skb->nh.raw; 617 skb->mac.raw = skb->nh.raw;
618 skb->nh.raw = __pskb_pull(skb, offset); 618 skb->nh.raw = __pskb_pull(skb, offset);
619 skb_postpull_rcsum(skb, skb->h.raw, offset); 619 skb_postpull_rcsum(skb, skb->h.raw, offset);
620 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
621 skb->pkt_type = PACKET_HOST; 620 skb->pkt_type = PACKET_HOST;
622#ifdef CONFIG_NET_IPGRE_BROADCAST 621#ifdef CONFIG_NET_IPGRE_BROADCAST
623 if (MULTICAST(iph->daddr)) { 622 if (MULTICAST(iph->daddr)) {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e1a7dba2fa8a..212734ca238f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -428,6 +428,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
428 goto drop; 428 goto drop;
429 } 429 }
430 430
431 /* Remove any debris in the socket control block */
432 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
433
431 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, 434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
432 ip_rcv_finish); 435 ip_rcv_finish);
433 436
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index cbcae6544622..406056edc02b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -256,7 +256,6 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
256 256
257 if (!opt) { 257 if (!opt) {
258 opt = &(IPCB(skb)->opt); 258 opt = &(IPCB(skb)->opt);
259 memset(opt, 0, sizeof(struct ip_options));
260 iph = skb->nh.raw; 259 iph = skb->nh.raw;
261 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); 260 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr);
262 optptr = iph + sizeof(struct iphdr); 261 optptr = iph + sizeof(struct iphdr);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ca0e714613fb..a2ede167e045 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb)
209 return dst_output(skb); 209 return dst_output(skb);
210 } 210 }
211#endif 211#endif
212 if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) 212 if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
213 return ip_fragment(skb, ip_finish_output2); 213 return ip_fragment(skb, ip_finish_output2);
214 else 214 else
215 return ip_finish_output2(skb); 215 return ip_finish_output2(skb);
@@ -440,6 +440,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
440 iph = skb->nh.iph; 440 iph = skb->nh.iph;
441 441
442 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { 442 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
443 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
443 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 444 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
444 htonl(dst_mtu(&rt->u.dst))); 445 htonl(dst_mtu(&rt->u.dst)));
445 kfree_skb(skb); 446 kfree_skb(skb);
@@ -526,6 +527,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
526 527
527 err = output(skb); 528 err = output(skb);
528 529
530 if (!err)
531 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
529 if (err || !frag) 532 if (err || !frag)
530 break; 533 break;
531 534
@@ -649,9 +652,6 @@ slow_path:
649 /* 652 /*
650 * Put this fragment into the sending queue. 653 * Put this fragment into the sending queue.
651 */ 654 */
652
653 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
654
655 iph->tot_len = htons(len + hlen); 655 iph->tot_len = htons(len + hlen);
656 656
657 ip_send_check(iph); 657 ip_send_check(iph);
@@ -659,6 +659,8 @@ slow_path:
659 err = output(skb2); 659 err = output(skb2);
660 if (err) 660 if (err)
661 goto fail; 661 goto fail;
662
663 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
662 } 664 }
663 kfree_skb(skb); 665 kfree_skb(skb);
664 IP_INC_STATS(IPSTATS_MIB_FRAGOKS); 666 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
@@ -946,7 +948,7 @@ alloc_new_skb:
946 skb_prev->csum = csum_sub(skb_prev->csum, 948 skb_prev->csum = csum_sub(skb_prev->csum,
947 skb->csum); 949 skb->csum);
948 data += fraggap; 950 data += fraggap;
949 skb_trim(skb_prev, maxfraglen); 951 pskb_trim_unique(skb_prev, maxfraglen);
950 } 952 }
951 953
952 copy = datalen - transhdrlen - fraggap; 954 copy = datalen - transhdrlen - fraggap;
@@ -1095,7 +1097,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1095 while (size > 0) { 1097 while (size > 0) {
1096 int i; 1098 int i;
1097 1099
1098 if (skb_shinfo(skb)->gso_size) 1100 if (skb_is_gso(skb))
1099 len = size; 1101 len = size;
1100 else { 1102 else {
1101 1103
@@ -1141,7 +1143,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1141 data, fraggap, 0); 1143 data, fraggap, 0);
1142 skb_prev->csum = csum_sub(skb_prev->csum, 1144 skb_prev->csum = csum_sub(skb_prev->csum,
1143 skb->csum); 1145 skb->csum);
1144 skb_trim(skb_prev, maxfraglen); 1146 pskb_trim_unique(skb_prev, maxfraglen);
1145 } 1147 }
1146 1148
1147 /* 1149 /*
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 84f43a3c9098..2d05c4133d3e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -112,14 +112,19 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
112static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) 112static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
113{ 113{
114 char *secdata; 114 char *secdata;
115 u32 seclen; 115 u32 seclen, secid;
116 int err; 116 int err;
117 117
118 err = security_socket_getpeersec_dgram(skb, &secdata, &seclen); 118 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
119 if (err)
120 return;
121
122 err = security_secid_to_secctx(secid, &secdata, &seclen);
119 if (err) 123 if (err)
120 return; 124 return;
121 125
122 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); 126 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
127 security_release_secctx(secdata, seclen);
123} 128}
124 129
125 130
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 8e0374847532..a0c28b2b756e 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -70,7 +70,8 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
70 if (err) 70 if (err)
71 goto out; 71 goto out;
72 72
73 skb_put(skb, dlen - plen); 73 skb->truesize += dlen - plen;
74 __skb_put(skb, dlen - plen);
74 memcpy(skb->data, scratch, dlen); 75 memcpy(skb->data, scratch, dlen);
75out: 76out:
76 put_cpu(); 77 put_cpu();
@@ -409,11 +410,10 @@ static int ipcomp_init_state(struct xfrm_state *x)
409 goto out; 410 goto out;
410 411
411 err = -ENOMEM; 412 err = -ENOMEM;
412 ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL); 413 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
413 if (!ipcd) 414 if (!ipcd)
414 goto out; 415 goto out;
415 416
416 memset(ipcd, 0, sizeof(*ipcd));
417 x->props.header_len = 0; 417 x->props.header_len = 0;
418 if (x->props.mode) 418 if (x->props.mode)
419 x->props.header_len += sizeof(struct iphdr); 419 x->props.header_len += sizeof(struct iphdr);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 3291d5192aad..76ab50b0d6ef 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -487,7 +487,6 @@ static int ipip_rcv(struct sk_buff *skb)
487 487
488 skb->mac.raw = skb->nh.raw; 488 skb->mac.raw = skb->nh.raw;
489 skb->nh.raw = skb->data; 489 skb->nh.raw = skb->data;
490 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
491 skb->protocol = htons(ETH_P_IP); 490 skb->protocol = htons(ETH_P_IP);
492 skb->pkt_type = PACKET_HOST; 491 skb->pkt_type = PACKET_HOST;
493 492
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ba33f8621c67..85893eef6b16 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1461,7 +1461,6 @@ int pim_rcv_v1(struct sk_buff * skb)
1461 skb_pull(skb, (u8*)encap - skb->data); 1461 skb_pull(skb, (u8*)encap - skb->data);
1462 skb->nh.iph = (struct iphdr *)skb->data; 1462 skb->nh.iph = (struct iphdr *)skb->data;
1463 skb->dev = reg_dev; 1463 skb->dev = reg_dev;
1464 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1465 skb->protocol = htons(ETH_P_IP); 1464 skb->protocol = htons(ETH_P_IP);
1466 skb->ip_summed = 0; 1465 skb->ip_summed = 0;
1467 skb->pkt_type = PACKET_HOST; 1466 skb->pkt_type = PACKET_HOST;
@@ -1517,7 +1516,6 @@ static int pim_rcv(struct sk_buff * skb)
1517 skb_pull(skb, (u8*)encap - skb->data); 1516 skb_pull(skb, (u8*)encap - skb->data);
1518 skb->nh.iph = (struct iphdr *)skb->data; 1517 skb->nh.iph = (struct iphdr *)skb->data;
1519 skb->dev = reg_dev; 1518 skb->dev = reg_dev;
1520 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
1521 skb->protocol = htons(ETH_P_IP); 1519 skb->protocol = htons(ETH_P_IP);
1522 skb->ip_summed = 0; 1520 skb->ip_summed = 0;
1523 skb->pkt_type = PACKET_HOST; 1521 skb->pkt_type = PACKET_HOST;
@@ -1580,6 +1578,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1580 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1578 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1581 1579
1582 if (cache==NULL) { 1580 if (cache==NULL) {
1581 struct sk_buff *skb2;
1583 struct net_device *dev; 1582 struct net_device *dev;
1584 int vif; 1583 int vif;
1585 1584
@@ -1593,12 +1592,18 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1593 read_unlock(&mrt_lock); 1592 read_unlock(&mrt_lock);
1594 return -ENODEV; 1593 return -ENODEV;
1595 } 1594 }
1596 skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); 1595 skb2 = skb_clone(skb, GFP_ATOMIC);
1597 skb->nh.iph->ihl = sizeof(struct iphdr)>>2; 1596 if (!skb2) {
1598 skb->nh.iph->saddr = rt->rt_src; 1597 read_unlock(&mrt_lock);
1599 skb->nh.iph->daddr = rt->rt_dst; 1598 return -ENOMEM;
1600 skb->nh.iph->version = 0; 1599 }
1601 err = ipmr_cache_unresolved(vif, skb); 1600
1601 skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr));
1602 skb2->nh.iph->ihl = sizeof(struct iphdr)>>2;
1603 skb2->nh.iph->saddr = rt->rt_src;
1604 skb2->nh.iph->daddr = rt->rt_dst;
1605 skb2->nh.iph->version = 0;
1606 err = ipmr_cache_unresolved(vif, skb2);
1602 read_unlock(&mrt_lock); 1607 read_unlock(&mrt_lock);
1603 return err; 1608 return err;
1604 } 1609 }
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f28ec6882162..6a28fafe910c 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -735,12 +735,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
735 if (atype != RTN_LOCAL && atype != RTN_UNICAST) 735 if (atype != RTN_LOCAL && atype != RTN_UNICAST)
736 return -EINVAL; 736 return -EINVAL;
737 737
738 dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 738 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
739 if (dest == NULL) { 739 if (dest == NULL) {
740 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); 740 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n");
741 return -ENOMEM; 741 return -ENOMEM;
742 } 742 }
743 memset(dest, 0, sizeof(struct ip_vs_dest));
744 743
745 dest->protocol = svc->protocol; 744 dest->protocol = svc->protocol;
746 dest->vaddr = svc->addr; 745 dest->vaddr = svc->addr;
@@ -1050,14 +1049,12 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1050 goto out_mod_dec; 1049 goto out_mod_dec;
1051 } 1050 }
1052 1051
1053 svc = (struct ip_vs_service *) 1052 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1054 kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1055 if (svc == NULL) { 1053 if (svc == NULL) {
1056 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); 1054 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
1057 ret = -ENOMEM; 1055 ret = -ENOMEM;
1058 goto out_err; 1056 goto out_err;
1059 } 1057 }
1060 memset(svc, 0, sizeof(struct ip_vs_service));
1061 1058
1062 /* I'm the first user of the service */ 1059 /* I'm the first user of the service */
1063 atomic_set(&svc->usecnt, 1); 1060 atomic_set(&svc->usecnt, 1);
@@ -1797,7 +1794,7 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
1797{ 1794{
1798 struct seq_file *seq; 1795 struct seq_file *seq;
1799 int rc = -ENOMEM; 1796 int rc = -ENOMEM;
1800 struct ip_vs_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); 1797 struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL);
1801 1798
1802 if (!s) 1799 if (!s)
1803 goto out; 1800 goto out;
@@ -1808,7 +1805,6 @@ static int ip_vs_info_open(struct inode *inode, struct file *file)
1808 1805
1809 seq = file->private_data; 1806 seq = file->private_data;
1810 seq->private = s; 1807 seq->private = s;
1811 memset(s, 0, sizeof(*s));
1812out: 1808out:
1813 return rc; 1809 return rc;
1814out_kfree: 1810out_kfree:
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 4c1940381ba0..7d68b80c4c19 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -123,11 +123,10 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
123{ 123{
124 struct ip_vs_estimator *est; 124 struct ip_vs_estimator *est;
125 125
126 est = kmalloc(sizeof(*est), GFP_KERNEL); 126 est = kzalloc(sizeof(*est), GFP_KERNEL);
127 if (est == NULL) 127 if (est == NULL)
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 memset(est, 0, sizeof(*est));
131 est->stats = stats; 130 est->stats = stats;
132 est->last_conns = stats->conns; 131 est->last_conns = stats->conns;
133 est->cps = stats->cps<<10; 132 est->cps = stats->cps<<10;
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index a19a33ceb811..37fafb1fbcff 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -46,14 +46,7 @@
46 */ 46 */
47static int ports[IP_VS_APP_MAX_PORTS] = {21, 0}; 47static int ports[IP_VS_APP_MAX_PORTS] = {21, 0};
48module_param_array(ports, int, NULL, 0); 48module_param_array(ports, int, NULL, 0);
49 49MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands");
50/*
51 * Debug level
52 */
53#ifdef CONFIG_IP_VS_DEBUG
54static int debug=0;
55module_param(debug, int, 0);
56#endif
57 50
58 51
59/* Dummy variable */ 52/* Dummy variable */
@@ -177,7 +170,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
177 &start, &end) != 1) 170 &start, &end) != 1)
178 return 1; 171 return 1;
179 172
180 IP_VS_DBG(1-debug, "PASV response (%u.%u.%u.%u:%d) -> " 173 IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> "
181 "%u.%u.%u.%u:%d detected\n", 174 "%u.%u.%u.%u:%d detected\n",
182 NIPQUAD(from), ntohs(port), NIPQUAD(cp->caddr), 0); 175 NIPQUAD(from), ntohs(port), NIPQUAD(cp->caddr), 0);
183 176
@@ -280,7 +273,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
280 while (data <= data_limit - 6) { 273 while (data <= data_limit - 6) {
281 if (strnicmp(data, "PASV\r\n", 6) == 0) { 274 if (strnicmp(data, "PASV\r\n", 6) == 0) {
282 /* Passive mode on */ 275 /* Passive mode on */
283 IP_VS_DBG(1-debug, "got PASV at %zd of %zd\n", 276 IP_VS_DBG(7, "got PASV at %zd of %zd\n",
284 data - data_start, 277 data - data_start,
285 data_limit - data_start); 278 data_limit - data_start);
286 cp->app_data = &ip_vs_ftp_pasv; 279 cp->app_data = &ip_vs_ftp_pasv;
@@ -302,7 +295,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
302 &start, &end) != 1) 295 &start, &end) != 1)
303 return 1; 296 return 1;
304 297
305 IP_VS_DBG(1-debug, "PORT %u.%u.%u.%u:%d detected\n", 298 IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n",
306 NIPQUAD(to), ntohs(port)); 299 NIPQUAD(to), ntohs(port));
307 300
308 /* Passive mode off */ 301 /* Passive mode off */
@@ -311,7 +304,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
311 /* 304 /*
312 * Now update or create a connection entry for it 305 * Now update or create a connection entry for it
313 */ 306 */
314 IP_VS_DBG(1-debug, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n", 307 IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n",
315 ip_vs_proto_name(iph->protocol), 308 ip_vs_proto_name(iph->protocol),
316 NIPQUAD(to), ntohs(port), NIPQUAD(cp->vaddr), 0); 309 NIPQUAD(to), ntohs(port), NIPQUAD(cp->vaddr), 0);
317 310
@@ -372,11 +365,17 @@ static int __init ip_vs_ftp_init(void)
372 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { 365 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
373 if (!ports[i]) 366 if (!ports[i])
374 continue; 367 continue;
368 if (ports[i] < 0 || ports[i] > 0xffff) {
369 IP_VS_WARNING("ip_vs_ftp: Ignoring invalid "
370 "configuration port[%d] = %d\n",
371 i, ports[i]);
372 continue;
373 }
375 ret = register_ip_vs_app_inc(app, app->protocol, ports[i]); 374 ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
376 if (ret) 375 if (ret)
377 break; 376 break;
378 IP_VS_DBG(1-debug, "%s: loaded support on port[%d] = %d\n", 377 IP_VS_INFO("%s: loaded support on port[%d] = %d\n",
379 app->name, i, ports[i]); 378 app->name, i, ports[i]);
380 } 379 }
381 380
382 if (ret) 381 if (ret)
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 80c73ca90116..8d1d7a6e72a5 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -236,7 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
236 struct arpt_entry *e, *back; 236 struct arpt_entry *e, *back;
237 const char *indev, *outdev; 237 const char *indev, *outdev;
238 void *table_base; 238 void *table_base;
239 struct xt_table_info *private = table->private; 239 struct xt_table_info *private;
240 240
241 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 241 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
242 if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) + 242 if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) +
@@ -248,6 +248,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
248 outdev = out ? out->name : nulldevname; 248 outdev = out ? out->name : nulldevname;
249 249
250 read_lock_bh(&table->lock); 250 read_lock_bh(&table->lock);
251 private = table->private;
251 table_base = (void *)private->entries[smp_processor_id()]; 252 table_base = (void *)private->entries[smp_processor_id()];
252 e = get_entry(table_base, private->hook_entry[hook]); 253 e = get_entry(table_base, private->hook_entry[hook]);
253 back = get_entry(table_base, private->underflow[hook]); 254 back = get_entry(table_base, private->underflow[hook]);
@@ -1170,21 +1171,34 @@ static int __init arp_tables_init(void)
1170{ 1171{
1171 int ret; 1172 int ret;
1172 1173
1173 xt_proto_init(NF_ARP); 1174 ret = xt_proto_init(NF_ARP);
1175 if (ret < 0)
1176 goto err1;
1174 1177
1175 /* Noone else will be downing sem now, so we won't sleep */ 1178 /* Noone else will be downing sem now, so we won't sleep */
1176 xt_register_target(&arpt_standard_target); 1179 ret = xt_register_target(&arpt_standard_target);
1177 xt_register_target(&arpt_error_target); 1180 if (ret < 0)
1181 goto err2;
1182 ret = xt_register_target(&arpt_error_target);
1183 if (ret < 0)
1184 goto err3;
1178 1185
1179 /* Register setsockopt */ 1186 /* Register setsockopt */
1180 ret = nf_register_sockopt(&arpt_sockopts); 1187 ret = nf_register_sockopt(&arpt_sockopts);
1181 if (ret < 0) { 1188 if (ret < 0)
1182 duprintf("Unable to register sockopts.\n"); 1189 goto err4;
1183 return ret;
1184 }
1185 1190
1186 printk("arp_tables: (C) 2002 David S. Miller\n"); 1191 printk("arp_tables: (C) 2002 David S. Miller\n");
1187 return 0; 1192 return 0;
1193
1194err4:
1195 xt_unregister_target(&arpt_error_target);
1196err3:
1197 xt_unregister_target(&arpt_standard_target);
1198err2:
1199 xt_proto_fini(NF_ARP);
1200err1:
1201 return ret;
1188} 1202}
1189 1203
1190static void __exit arp_tables_fini(void) 1204static void __exit arp_tables_fini(void)
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index af35235672d5..9a39e2969712 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct,
1200 tuple.dst.protonum = IPPROTO_TCP; 1200 tuple.dst.protonum = IPPROTO_TCP;
1201 1201
1202 exp = __ip_conntrack_expect_find(&tuple); 1202 exp = __ip_conntrack_expect_find(&tuple);
1203 if (exp->master == ct) 1203 if (exp && exp->master == ct)
1204 return exp; 1204 return exp;
1205 return NULL; 1205 return NULL;
1206} 1206}
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 33891bb1fde4..0d4cc92391fa 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -415,21 +415,18 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
415 cb->args[0], *id); 415 cb->args[0], *id);
416 416
417 read_lock_bh(&ip_conntrack_lock); 417 read_lock_bh(&ip_conntrack_lock);
418 last = (struct ip_conntrack *)cb->args[1];
418 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) { 419 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
419restart: 420restart:
420 last = (struct ip_conntrack *)cb->args[1];
421 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) { 421 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
422 h = (struct ip_conntrack_tuple_hash *) i; 422 h = (struct ip_conntrack_tuple_hash *) i;
423 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) 423 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
424 continue; 424 continue;
425 ct = tuplehash_to_ctrack(h); 425 ct = tuplehash_to_ctrack(h);
426 if (last != NULL) { 426 if (cb->args[1]) {
427 if (ct == last) { 427 if (ct != last)
428 ip_conntrack_put(last);
429 cb->args[1] = 0;
430 last = NULL;
431 } else
432 continue; 428 continue;
429 cb->args[1] = 0;
433 } 430 }
434 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 431 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
435 cb->nlh->nlmsg_seq, 432 cb->nlh->nlmsg_seq,
@@ -440,17 +437,17 @@ restart:
440 goto out; 437 goto out;
441 } 438 }
442 } 439 }
443 if (last != NULL) { 440 if (cb->args[1]) {
444 ip_conntrack_put(last);
445 cb->args[1] = 0; 441 cb->args[1] = 0;
446 goto restart; 442 goto restart;
447 } 443 }
448 } 444 }
449out: 445out:
450 read_unlock_bh(&ip_conntrack_lock); 446 read_unlock_bh(&ip_conntrack_lock);
447 if (last)
448 ip_conntrack_put(last);
451 449
452 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); 450 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
453
454 return skb->len; 451 return skb->len;
455} 452}
456 453
diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c
index fc87ce0da40d..4f222d6be009 100644
--- a/net/ipv4/netfilter/ip_conntrack_sip.c
+++ b/net/ipv4/netfilter/ip_conntrack_sip.c
@@ -442,7 +442,7 @@ static int __init init(void)
442 sip[i].tuple.src.u.udp.port = htons(ports[i]); 442 sip[i].tuple.src.u.udp.port = htons(ports[i]);
443 sip[i].mask.src.u.udp.port = 0xFFFF; 443 sip[i].mask.src.u.udp.port = 0xFFFF;
444 sip[i].mask.dst.protonum = 0xFF; 444 sip[i].mask.dst.protonum = 0xFF;
445 sip[i].max_expected = 1; 445 sip[i].max_expected = 2;
446 sip[i].timeout = 3 * 60; /* 3 minutes */ 446 sip[i].timeout = 3 * 60; /* 3 minutes */
447 sip[i].me = THIS_MODULE; 447 sip[i].me = THIS_MODULE;
448 sip[i].help = sip_help; 448 sip[i].help = sip_help;
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 7bd3c22003a2..7a9fa04a467a 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -534,6 +534,8 @@ static struct nf_hook_ops ip_conntrack_ops[] = {
534 534
535/* Sysctl support */ 535/* Sysctl support */
536 536
537int ip_conntrack_checksum = 1;
538
537#ifdef CONFIG_SYSCTL 539#ifdef CONFIG_SYSCTL
538 540
539/* From ip_conntrack_core.c */ 541/* From ip_conntrack_core.c */
@@ -568,8 +570,6 @@ extern unsigned int ip_ct_generic_timeout;
568static int log_invalid_proto_min = 0; 570static int log_invalid_proto_min = 0;
569static int log_invalid_proto_max = 255; 571static int log_invalid_proto_max = 255;
570 572
571int ip_conntrack_checksum = 1;
572
573static struct ctl_table_header *ip_ct_sysctl_header; 573static struct ctl_table_header *ip_ct_sysctl_header;
574 574
575static ctl_table ip_ct_sysctl_table[] = { 575static ctl_table ip_ct_sysctl_table[] = {
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index 0b1b416759cc..18b7fbdccb61 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -1255,9 +1255,9 @@ static int help(struct sk_buff **pskb,
1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); 1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
1256 1256
1257 /* SNMP replies and originating SNMP traps get mangled */ 1257 /* SNMP replies and originating SNMP traps get mangled */
1258 if (udph->source == ntohs(SNMP_PORT) && dir != IP_CT_DIR_REPLY) 1258 if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
1259 return NF_ACCEPT; 1259 return NF_ACCEPT;
1260 if (udph->dest == ntohs(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) 1260 if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
1261 return NF_ACCEPT; 1261 return NF_ACCEPT;
1262 1262
1263 /* No NAT? */ 1263 /* No NAT? */
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index fc5bdd5eb7d3..048514f15f2f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -230,7 +230,7 @@ ipt_do_table(struct sk_buff **pskb,
230 const char *indev, *outdev; 230 const char *indev, *outdev;
231 void *table_base; 231 void *table_base;
232 struct ipt_entry *e, *back; 232 struct ipt_entry *e, *back;
233 struct xt_table_info *private = table->private; 233 struct xt_table_info *private;
234 234
235 /* Initialization */ 235 /* Initialization */
236 ip = (*pskb)->nh.iph; 236 ip = (*pskb)->nh.iph;
@@ -247,6 +247,7 @@ ipt_do_table(struct sk_buff **pskb,
247 247
248 read_lock_bh(&table->lock); 248 read_lock_bh(&table->lock);
249 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 249 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
250 private = table->private;
250 table_base = (void *)private->entries[smp_processor_id()]; 251 table_base = (void *)private->entries[smp_processor_id()];
251 e = get_entry(table_base, private->hook_entry[hook]); 252 e = get_entry(table_base, private->hook_entry[hook]);
252 253
@@ -2239,22 +2240,39 @@ static int __init ip_tables_init(void)
2239{ 2240{
2240 int ret; 2241 int ret;
2241 2242
2242 xt_proto_init(AF_INET); 2243 ret = xt_proto_init(AF_INET);
2244 if (ret < 0)
2245 goto err1;
2243 2246
2244 /* Noone else will be downing sem now, so we won't sleep */ 2247 /* Noone else will be downing sem now, so we won't sleep */
2245 xt_register_target(&ipt_standard_target); 2248 ret = xt_register_target(&ipt_standard_target);
2246 xt_register_target(&ipt_error_target); 2249 if (ret < 0)
2247 xt_register_match(&icmp_matchstruct); 2250 goto err2;
2251 ret = xt_register_target(&ipt_error_target);
2252 if (ret < 0)
2253 goto err3;
2254 ret = xt_register_match(&icmp_matchstruct);
2255 if (ret < 0)
2256 goto err4;
2248 2257
2249 /* Register setsockopt */ 2258 /* Register setsockopt */
2250 ret = nf_register_sockopt(&ipt_sockopts); 2259 ret = nf_register_sockopt(&ipt_sockopts);
2251 if (ret < 0) { 2260 if (ret < 0)
2252 duprintf("Unable to register sockopts.\n"); 2261 goto err5;
2253 return ret;
2254 }
2255 2262
2256 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n"); 2263 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2257 return 0; 2264 return 0;
2265
2266err5:
2267 xt_unregister_match(&icmp_matchstruct);
2268err4:
2269 xt_unregister_target(&ipt_error_target);
2270err3:
2271 xt_unregister_target(&ipt_standard_target);
2272err2:
2273 xt_proto_fini(AF_INET);
2274err1:
2275 return ret;
2258} 2276}
2259 2277
2260static void __exit ip_tables_fini(void) 2278static void __exit ip_tables_fini(void)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index cbffeae3f565..d994c5f5744c 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -172,11 +172,10 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip,
172 struct clusterip_config *c; 172 struct clusterip_config *c;
173 char buffer[16]; 173 char buffer[16];
174 174
175 c = kmalloc(sizeof(*c), GFP_ATOMIC); 175 c = kzalloc(sizeof(*c), GFP_ATOMIC);
176 if (!c) 176 if (!c)
177 return NULL; 177 return NULL;
178 178
179 memset(c, 0, sizeof(*c));
180 c->dev = dev; 179 c->dev = dev;
181 c->clusterip = ip; 180 c->clusterip = ip;
182 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); 181 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index d7dd7fe7051c..d46fd677fa11 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -115,6 +115,11 @@ static void ulog_send(unsigned int nlgroupnum)
115 del_timer(&ub->timer); 115 del_timer(&ub->timer);
116 } 116 }
117 117
118 if (!ub->skb) {
119 DEBUGP("ipt_ULOG: ulog_send: nothing to send\n");
120 return;
121 }
122
118 /* last nlmsg needs NLMSG_DONE */ 123 /* last nlmsg needs NLMSG_DONE */
119 if (ub->qlen > 1) 124 if (ub->qlen > 1)
120 ub->lastnlh->nlmsg_type = NLMSG_DONE; 125 ub->lastnlh->nlmsg_type = NLMSG_DONE;
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
index 92980ab8ce48..3bd2368e1fc9 100644
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ b/net/ipv4/netfilter/ipt_hashlimit.c
@@ -454,15 +454,12 @@ hashlimit_match(const struct sk_buff *skb,
454 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * 454 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
455 hinfo->cfg.burst); 455 hinfo->cfg.burst);
456 dh->rateinfo.cost = user2credits(hinfo->cfg.avg); 456 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
457 457 } else {
458 spin_unlock_bh(&hinfo->lock); 458 /* update expiration timeout */
459 return 1; 459 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
460 rateinfo_recalc(dh, now);
460 } 461 }
461 462
462 /* update expiration timeout */
463 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
464
465 rateinfo_recalc(dh, now);
466 if (dh->rateinfo.credit >= dh->rateinfo.cost) { 463 if (dh->rateinfo.credit >= dh->rateinfo.cost) {
467 /* We're underlimit. */ 464 /* We're underlimit. */
468 dh->rateinfo.credit -= dh->rateinfo.cost; 465 dh->rateinfo.credit -= dh->rateinfo.cost;
@@ -508,6 +505,9 @@ hashlimit_checkentry(const char *tablename,
508 if (!r->cfg.expire) 505 if (!r->cfg.expire)
509 return 0; 506 return 0;
510 507
508 if (r->name[sizeof(r->name) - 1] != '\0')
509 return 0;
510
511 /* This is the best we've got: We cannot release and re-grab lock, 511 /* This is the best we've got: We cannot release and re-grab lock,
512 * since checkentry() is called before ip_tables.c grabs ipt_mutex. 512 * since checkentry() is called before ip_tables.c grabs ipt_mutex.
513 * We also cannot grab the hashtable spinlock, since htable_create will 513 * We also cannot grab the hashtable spinlock, since htable_create will
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bd221ec3f81e..62b2762a2420 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -609,6 +609,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
609 if (sin) { 609 if (sin) {
610 sin->sin_family = AF_INET; 610 sin->sin_family = AF_INET;
611 sin->sin_addr.s_addr = skb->nh.iph->saddr; 611 sin->sin_addr.s_addr = skb->nh.iph->saddr;
612 sin->sin_port = 0;
612 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 613 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
613 } 614 }
614 if (inet->cmsg_flags) 615 if (inet->cmsg_flags)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2dc6dbb28467..b873cbcdd0b8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -104,6 +104,7 @@
104#include <net/icmp.h> 104#include <net/icmp.h>
105#include <net/xfrm.h> 105#include <net/xfrm.h>
106#include <net/ip_mp_alg.h> 106#include <net/ip_mp_alg.h>
107#include <net/netevent.h>
107#ifdef CONFIG_SYSCTL 108#ifdef CONFIG_SYSCTL
108#include <linux/sysctl.h> 109#include <linux/sysctl.h>
109#endif 110#endif
@@ -1125,6 +1126,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1125 struct rtable *rth, **rthp; 1126 struct rtable *rth, **rthp;
1126 u32 skeys[2] = { saddr, 0 }; 1127 u32 skeys[2] = { saddr, 0 };
1127 int ikeys[2] = { dev->ifindex, 0 }; 1128 int ikeys[2] = { dev->ifindex, 0 };
1129 struct netevent_redirect netevent;
1128 1130
1129 if (!in_dev) 1131 if (!in_dev)
1130 return; 1132 return;
@@ -1216,6 +1218,11 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1216 rt_drop(rt); 1218 rt_drop(rt);
1217 goto do_next; 1219 goto do_next;
1218 } 1220 }
1221
1222 netevent.old = &rth->u.dst;
1223 netevent.new = &rt->u.dst;
1224 call_netevent_notifiers(NETEVENT_REDIRECT,
1225 &netevent);
1219 1226
1220 rt_del(hash, rth); 1227 rt_del(hash, rth);
1221 if (!rt_intern_hash(hash, rt, &rt)) 1228 if (!rt_intern_hash(hash, rt, &rt))
@@ -1452,6 +1459,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1452 } 1459 }
1453 dst->metrics[RTAX_MTU-1] = mtu; 1460 dst->metrics[RTAX_MTU-1] = mtu;
1454 dst_set_expires(dst, ip_rt_mtu_expires); 1461 dst_set_expires(dst, ip_rt_mtu_expires);
1462 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1455 } 1463 }
1456} 1464}
1457 1465
@@ -3149,7 +3157,7 @@ int __init ip_rt_init(void)
3149 rhash_entries, 3157 rhash_entries,
3150 (num_physpages >= 128 * 1024) ? 3158 (num_physpages >= 128 * 1024) ?
3151 15 : 17, 3159 15 : 17,
3152 HASH_HIGHMEM, 3160 0,
3153 &rt_hash_log, 3161 &rt_hash_log,
3154 &rt_hash_mask, 3162 &rt_hash_mask,
3155 0); 3163 0);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f6a2d9223d07..934396bb1376 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1132,7 +1132,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1132 tp->ucopy.dma_chan = NULL; 1132 tp->ucopy.dma_chan = NULL;
1133 preempt_disable(); 1133 preempt_disable();
1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1135 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) { 1135 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
1136 preempt_enable_no_resched(); 1136 preempt_enable_no_resched();
1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); 1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1138 } else 1138 } else
@@ -1659,7 +1659,8 @@ adjudge_to_death:
1659 const int tmo = tcp_fin_time(sk); 1659 const int tmo = tcp_fin_time(sk);
1660 1660
1661 if (tmo > TCP_TIMEWAIT_LEN) { 1661 if (tmo > TCP_TIMEWAIT_LEN) {
1662 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); 1662 inet_csk_reset_keepalive_timer(sk,
1663 tmo - TCP_TIMEWAIT_LEN);
1663 } else { 1664 } else {
1664 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1665 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1665 goto out; 1666 goto out;
diff --git a/net/ipv4/tcp_compound.c b/net/ipv4/tcp_compound.c
deleted file mode 100644
index bc54f7e9aea9..000000000000
--- a/net/ipv4/tcp_compound.c
+++ /dev/null
@@ -1,448 +0,0 @@
1/*
2 * TCP Vegas congestion control
3 *
4 * This is based on the congestion detection/avoidance scheme described in
5 * Lawrence S. Brakmo and Larry L. Peterson.
6 * "TCP Vegas: End to end congestion avoidance on a global internet."
7 * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480,
8 * October 1995. Available from:
9 * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps
10 *
11 * See http://www.cs.arizona.edu/xkernel/ for their implementation.
12 * The main aspects that distinguish this implementation from the
13 * Arizona Vegas implementation are:
14 * o We do not change the loss detection or recovery mechanisms of
15 * Linux in any way. Linux already recovers from losses quite well,
16 * using fine-grained timers, NewReno, and FACK.
17 * o To avoid the performance penalty imposed by increasing cwnd
18 * only every-other RTT during slow start, we increase during
19 * every RTT during slow start, just like Reno.
20 * o Largely to allow continuous cwnd growth during slow start,
21 * we use the rate at which ACKs come back as the "actual"
22 * rate, rather than the rate at which data is sent.
23 * o To speed convergence to the right rate, we set the cwnd
24 * to achieve the right ("actual") rate when we exit slow start.
25 * o To filter out the noise caused by delayed ACKs, we use the
26 * minimum RTT sample observed during the last RTT to calculate
27 * the actual rate.
28 * o When the sender re-starts from idle, it waits until it has
29 * received ACKs for an entire flight of new data before making
30 * a cwnd adjustment decision. The original Vegas implementation
31 * assumed senders never went idle.
32 *
33 *
34 * TCP Compound based on TCP Vegas
35 *
36 * further details can be found here:
37 * ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf
38 */
39
40#include <linux/config.h>
41#include <linux/mm.h>
42#include <linux/module.h>
43#include <linux/skbuff.h>
44#include <linux/inet_diag.h>
45
46#include <net/tcp.h>
47
48/* Default values of the Vegas variables, in fixed-point representation
49 * with V_PARAM_SHIFT bits to the right of the binary point.
50 */
51#define V_PARAM_SHIFT 1
52
53#define TCP_COMPOUND_ALPHA 3U
54#define TCP_COMPOUND_BETA 1U
55#define TCP_COMPOUND_GAMMA 30
56#define TCP_COMPOUND_ZETA 1
57
58/* TCP compound variables */
59struct compound {
60 u32 beg_snd_nxt; /* right edge during last RTT */
61 u32 beg_snd_una; /* left edge during last RTT */
62 u32 beg_snd_cwnd; /* saves the size of the cwnd */
63 u8 doing_vegas_now; /* if true, do vegas for this RTT */
64 u16 cntRTT; /* # of RTTs measured within last RTT */
65 u32 minRTT; /* min of RTTs measured within last RTT (in usec) */
66 u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */
67
68 u32 cwnd;
69 u32 dwnd;
70};
71
72/* There are several situations when we must "re-start" Vegas:
73 *
74 * o when a connection is established
75 * o after an RTO
76 * o after fast recovery
77 * o when we send a packet and there is no outstanding
78 * unacknowledged data (restarting an idle connection)
79 *
80 * In these circumstances we cannot do a Vegas calculation at the
81 * end of the first RTT, because any calculation we do is using
82 * stale info -- both the saved cwnd and congestion feedback are
83 * stale.
84 *
85 * Instead we must wait until the completion of an RTT during
86 * which we actually receive ACKs.
87 */
88static inline void vegas_enable(struct sock *sk)
89{
90 const struct tcp_sock *tp = tcp_sk(sk);
91 struct compound *vegas = inet_csk_ca(sk);
92
93 /* Begin taking Vegas samples next time we send something. */
94 vegas->doing_vegas_now = 1;
95
96 /* Set the beginning of the next send window. */
97 vegas->beg_snd_nxt = tp->snd_nxt;
98
99 vegas->cntRTT = 0;
100 vegas->minRTT = 0x7fffffff;
101}
102
103/* Stop taking Vegas samples for now. */
104static inline void vegas_disable(struct sock *sk)
105{
106 struct compound *vegas = inet_csk_ca(sk);
107
108 vegas->doing_vegas_now = 0;
109}
110
111static void tcp_compound_init(struct sock *sk)
112{
113 struct compound *vegas = inet_csk_ca(sk);
114 const struct tcp_sock *tp = tcp_sk(sk);
115
116 vegas->baseRTT = 0x7fffffff;
117 vegas_enable(sk);
118
119 vegas->dwnd = 0;
120 vegas->cwnd = tp->snd_cwnd;
121}
122
123/* Do RTT sampling needed for Vegas.
124 * Basically we:
125 * o min-filter RTT samples from within an RTT to get the current
126 * propagation delay + queuing delay (we are min-filtering to try to
127 * avoid the effects of delayed ACKs)
128 * o min-filter RTT samples from a much longer window (forever for now)
129 * to find the propagation delay (baseRTT)
130 */
131static void tcp_compound_rtt_calc(struct sock *sk, u32 usrtt)
132{
133 struct compound *vegas = inet_csk_ca(sk);
134 u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */
135
136 /* Filter to find propagation delay: */
137 if (vrtt < vegas->baseRTT)
138 vegas->baseRTT = vrtt;
139
140 /* Find the min RTT during the last RTT to find
141 * the current prop. delay + queuing delay:
142 */
143
144 vegas->minRTT = min(vegas->minRTT, vrtt);
145 vegas->cntRTT++;
146}
147
148static void tcp_compound_state(struct sock *sk, u8 ca_state)
149{
150
151 if (ca_state == TCP_CA_Open)
152 vegas_enable(sk);
153 else
154 vegas_disable(sk);
155}
156
157
158/* 64bit divisor, dividend and result. dynamic precision */
159static inline u64 div64_64(u64 dividend, u64 divisor)
160{
161 u32 d = divisor;
162
163 if (divisor > 0xffffffffULL) {
164 unsigned int shift = fls(divisor >> 32);
165
166 d = divisor >> shift;
167 dividend >>= shift;
168 }
169
170 /* avoid 64 bit division if possible */
171 if (dividend >> 32)
172 do_div(dividend, d);
173 else
174 dividend = (u32) dividend / d;
175
176 return dividend;
177}
178
179/* calculate the quartic root of "a" using Newton-Raphson */
180static u32 qroot(u64 a)
181{
182 u32 x, x1;
183
184 /* Initial estimate is based on:
185 * qrt(x) = exp(log(x) / 4)
186 */
187 x = 1u << (fls64(a) >> 2);
188
189 /*
190 * Iteration based on:
191 * 3
192 * x = ( 3 * x + a / x ) / 4
193 * k+1 k k
194 */
195 do {
196 u64 x3 = x;
197
198 x1 = x;
199 x3 *= x;
200 x3 *= x;
201
202 x = (3 * x + (u32) div64_64(a, x3)) / 4;
203 } while (abs(x1 - x) > 1);
204
205 return x;
206}
207
208
209/*
210 * If the connection is idle and we are restarting,
211 * then we don't want to do any Vegas calculations
212 * until we get fresh RTT samples. So when we
213 * restart, we reset our Vegas state to a clean
214 * slate. After we get acks for this flight of
215 * packets, _then_ we can make Vegas calculations
216 * again.
217 */
218static void tcp_compound_cwnd_event(struct sock *sk, enum tcp_ca_event event)
219{
220 if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START)
221 tcp_compound_init(sk);
222}
223
224static void tcp_compound_cong_avoid(struct sock *sk, u32 ack,
225 u32 seq_rtt, u32 in_flight, int flag)
226{
227 struct tcp_sock *tp = tcp_sk(sk);
228 struct compound *vegas = inet_csk_ca(sk);
229 u8 inc = 0;
230
231 if (vegas->cwnd + vegas->dwnd > tp->snd_cwnd) {
232 if (vegas->cwnd > tp->snd_cwnd || vegas->dwnd > tp->snd_cwnd) {
233 vegas->cwnd = tp->snd_cwnd;
234 vegas->dwnd = 0;
235 } else
236 vegas->cwnd = tp->snd_cwnd - vegas->dwnd;
237
238 }
239
240 if (!tcp_is_cwnd_limited(sk, in_flight))
241 return;
242
243 if (vegas->cwnd <= tp->snd_ssthresh)
244 inc = 1;
245 else if (tp->snd_cwnd_cnt < tp->snd_cwnd)
246 tp->snd_cwnd_cnt++;
247
248 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
249 inc = 1;
250 tp->snd_cwnd_cnt = 0;
251 }
252
253 if (inc && tp->snd_cwnd < tp->snd_cwnd_clamp)
254 vegas->cwnd++;
255
256 /* The key players are v_beg_snd_una and v_beg_snd_nxt.
257 *
258 * These are so named because they represent the approximate values
259 * of snd_una and snd_nxt at the beginning of the current RTT. More
260 * precisely, they represent the amount of data sent during the RTT.
261 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
262 * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
263 * bytes of data have been ACKed during the course of the RTT, giving
264 * an "actual" rate of:
265 *
266 * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
267 *
268 * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
269 * because delayed ACKs can cover more than one segment, so they
270 * don't line up nicely with the boundaries of RTTs.
271 *
272 * Another unfortunate fact of life is that delayed ACKs delay the
273 * advance of the left edge of our send window, so that the number
274 * of bytes we send in an RTT is often less than our cwnd will allow.
275 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
276 */
277
278 if (after(ack, vegas->beg_snd_nxt)) {
279 /* Do the Vegas once-per-RTT cwnd adjustment. */
280 u32 old_wnd, old_snd_cwnd;
281
282 /* Here old_wnd is essentially the window of data that was
283 * sent during the previous RTT, and has all
284 * been acknowledged in the course of the RTT that ended
285 * with the ACK we just received. Likewise, old_snd_cwnd
286 * is the cwnd during the previous RTT.
287 */
288 if (!tp->mss_cache)
289 return;
290
291 old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
292 tp->mss_cache;
293 old_snd_cwnd = vegas->beg_snd_cwnd;
294
295 /* Save the extent of the current window so we can use this
296 * at the end of the next RTT.
297 */
298 vegas->beg_snd_una = vegas->beg_snd_nxt;
299 vegas->beg_snd_nxt = tp->snd_nxt;
300 vegas->beg_snd_cwnd = tp->snd_cwnd;
301
302 /* We do the Vegas calculations only if we got enough RTT
303 * samples that we can be reasonably sure that we got
304 * at least one RTT sample that wasn't from a delayed ACK.
305 * If we only had 2 samples total,
306 * then that means we're getting only 1 ACK per RTT, which
307 * means they're almost certainly delayed ACKs.
308 * If we have 3 samples, we should be OK.
309 */
310
311 if (vegas->cntRTT > 2) {
312 u32 rtt, target_cwnd, diff;
313 u32 brtt, dwnd;
314
315 /* We have enough RTT samples, so, using the Vegas
316 * algorithm, we determine if we should increase or
317 * decrease cwnd, and by how much.
318 */
319
320 /* Pluck out the RTT we are using for the Vegas
321 * calculations. This is the min RTT seen during the
322 * last RTT. Taking the min filters out the effects
323 * of delayed ACKs, at the cost of noticing congestion
324 * a bit later.
325 */
326 rtt = vegas->minRTT;
327
328 /* Calculate the cwnd we should have, if we weren't
329 * going too fast.
330 *
331 * This is:
332 * (actual rate in segments) * baseRTT
333 * We keep it as a fixed point number with
334 * V_PARAM_SHIFT bits to the right of the binary point.
335 */
336 if (!rtt)
337 return;
338
339 brtt = vegas->baseRTT;
340 target_cwnd = ((old_wnd * brtt)
341 << V_PARAM_SHIFT) / rtt;
342
343 /* Calculate the difference between the window we had,
344 * and the window we would like to have. This quantity
345 * is the "Diff" from the Arizona Vegas papers.
346 *
347 * Again, this is a fixed point number with
348 * V_PARAM_SHIFT bits to the right of the binary
349 * point.
350 */
351
352 diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
353
354 dwnd = vegas->dwnd;
355
356 if (diff < (TCP_COMPOUND_GAMMA << V_PARAM_SHIFT)) {
357 u64 v;
358 u32 x;
359
360 /*
361 * The TCP Compound paper describes the choice
362 * of "k" determines the agressiveness,
363 * ie. slope of the response function.
364 *
365 * For same value as HSTCP would be 0.8
366 * but for computaional reasons, both the
367 * original authors and this implementation
368 * use 0.75.
369 */
370 v = old_wnd;
371 x = qroot(v * v * v) >> TCP_COMPOUND_ALPHA;
372 if (x > 1)
373 dwnd = x - 1;
374 else
375 dwnd = 0;
376
377 dwnd += vegas->dwnd;
378
379 } else if ((dwnd << V_PARAM_SHIFT) <
380 (diff * TCP_COMPOUND_BETA))
381 dwnd = 0;
382 else
383 dwnd =
384 ((dwnd << V_PARAM_SHIFT) -
385 (diff *
386 TCP_COMPOUND_BETA)) >> V_PARAM_SHIFT;
387
388 vegas->dwnd = dwnd;
389
390 }
391
392 /* Wipe the slate clean for the next RTT. */
393 vegas->cntRTT = 0;
394 vegas->minRTT = 0x7fffffff;
395 }
396
397 tp->snd_cwnd = vegas->cwnd + vegas->dwnd;
398}
399
400/* Extract info for Tcp socket info provided via netlink. */
401static void tcp_compound_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
402{
403 const struct compound *ca = inet_csk_ca(sk);
404 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
405 struct tcpvegas_info *info;
406
407 info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO,
408 sizeof(*info)));
409
410 info->tcpv_enabled = ca->doing_vegas_now;
411 info->tcpv_rttcnt = ca->cntRTT;
412 info->tcpv_rtt = ca->baseRTT;
413 info->tcpv_minrtt = ca->minRTT;
414 rtattr_failure:;
415 }
416}
417
418static struct tcp_congestion_ops tcp_compound = {
419 .init = tcp_compound_init,
420 .ssthresh = tcp_reno_ssthresh,
421 .cong_avoid = tcp_compound_cong_avoid,
422 .rtt_sample = tcp_compound_rtt_calc,
423 .set_state = tcp_compound_state,
424 .cwnd_event = tcp_compound_cwnd_event,
425 .get_info = tcp_compound_get_info,
426
427 .owner = THIS_MODULE,
428 .name = "compound",
429};
430
431static int __init tcp_compound_register(void)
432{
433 BUG_ON(sizeof(struct compound) > ICSK_CA_PRIV_SIZE);
434 tcp_register_congestion_control(&tcp_compound);
435 return 0;
436}
437
438static void __exit tcp_compound_unregister(void)
439{
440 tcp_unregister_congestion_control(&tcp_compound);
441}
442
443module_init(tcp_compound_register);
444module_exit(tcp_compound_unregister);
445
446MODULE_AUTHOR("Angelo P. Castellani, Stephen Hemminger");
447MODULE_LICENSE("GPL");
448MODULE_DESCRIPTION("TCP Compound");
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 5765f9d03174..7ff2e4273a7c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -189,7 +189,7 @@ void tcp_slow_start(struct tcp_sock *tp)
189 return; 189 return;
190 190
191 /* We MAY increase by 2 if discovered delayed ack */ 191 /* We MAY increase by 2 if discovered delayed ack */
192 if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) { 192 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) {
193 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 193 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
194 tp->snd_cwnd++; 194 tp->snd_cwnd++;
195 } 195 }
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index aaa1538c0692..fa3e1aad660c 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -139,14 +139,19 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
139 tp->snd_cwnd++; 139 tp->snd_cwnd++;
140 } 140 }
141 } else { 141 } else {
142 /* Update AIMD parameters */ 142 /* Update AIMD parameters.
143 *
144 * We want to guarantee that:
145 * hstcp_aimd_vals[ca->ai-1].cwnd <
146 * snd_cwnd <=
147 * hstcp_aimd_vals[ca->ai].cwnd
148 */
143 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { 149 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
144 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && 150 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
145 ca->ai < HSTCP_AIMD_MAX - 1) 151 ca->ai < HSTCP_AIMD_MAX - 1)
146 ca->ai++; 152 ca->ai++;
147 } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { 153 } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
148 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && 154 while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
149 ca->ai > 0)
150 ca->ai--; 155 ca->ai--;
151 } 156 }
152 157
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 738dad9f7d49..159fa3f1ba67 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,7 +89,7 @@ int sysctl_tcp_frto;
89int sysctl_tcp_nometrics_save; 89int sysctl_tcp_nometrics_save;
90 90
91int sysctl_tcp_moderate_rcvbuf = 1; 91int sysctl_tcp_moderate_rcvbuf = 1;
92int sysctl_tcp_abc = 1; 92int sysctl_tcp_abc;
93 93
94#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 94#define FLAG_DATA 0x01 /* Incoming frame contained data. */
95#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 95#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -2505,8 +2505,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2505 if (before(ack, prior_snd_una)) 2505 if (before(ack, prior_snd_una))
2506 goto old_ack; 2506 goto old_ack;
2507 2507
2508 if (sysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR) 2508 if (sysctl_tcp_abc) {
2509 tp->bytes_acked += ack - prior_snd_una; 2509 if (icsk->icsk_ca_state < TCP_CA_CWR)
2510 tp->bytes_acked += ack - prior_snd_una;
2511 else if (icsk->icsk_ca_state == TCP_CA_Loss)
2512 /* we assume just one segment left network */
2513 tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
2514 }
2510 2515
2511 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 2516 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
2512 /* Window is constant, pure forward advance. 2517 /* Window is constant, pure forward advance.
@@ -3541,7 +3546,8 @@ void tcp_cwnd_application_limited(struct sock *sk)
3541 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 3546 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
3542 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 3547 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
3543 /* Limited by application or receiver window. */ 3548 /* Limited by application or receiver window. */
3544 u32 win_used = max(tp->snd_cwnd_used, 2U); 3549 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
3550 u32 win_used = max(tp->snd_cwnd_used, init_win);
3545 if (win_used < tp->snd_cwnd) { 3551 if (win_used < tp->snd_cwnd) {
3546 tp->snd_ssthresh = tcp_current_ssthresh(sk); 3552 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3547 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 3553 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a886e6efbbe..4b04c3edd4a9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -438,7 +438,6 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
438 It can f.e. if SYNs crossed. 438 It can f.e. if SYNs crossed.
439 */ 439 */
440 if (!sock_owned_by_user(sk)) { 440 if (!sock_owned_by_user(sk)) {
441 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
442 sk->sk_err = err; 441 sk->sk_err = err;
443 442
444 sk->sk_error_report(sk); 443 sk->sk_error_report(sk);
@@ -496,6 +495,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
496 } 495 }
497} 496}
498 497
498int tcp_v4_gso_send_check(struct sk_buff *skb)
499{
500 struct iphdr *iph;
501 struct tcphdr *th;
502
503 if (!pskb_may_pull(skb, sizeof(*th)))
504 return -EINVAL;
505
506 iph = skb->nh.iph;
507 th = skb->h.th;
508
509 th->check = 0;
510 th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
511 skb->csum = offsetof(struct tcphdr, check);
512 skb->ip_summed = CHECKSUM_HW;
513 return 0;
514}
515
499/* 516/*
500 * This routine will send an RST to the other tcp. 517 * This routine will send an RST to the other tcp.
501 * 518 *
@@ -856,7 +873,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
856drop_and_free: 873drop_and_free:
857 reqsk_free(req); 874 reqsk_free(req);
858drop: 875drop:
859 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
860 return 0; 876 return 0;
861} 877}
862 878
@@ -1622,10 +1638,9 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
1622 if (unlikely(afinfo == NULL)) 1638 if (unlikely(afinfo == NULL))
1623 return -EINVAL; 1639 return -EINVAL;
1624 1640
1625 s = kmalloc(sizeof(*s), GFP_KERNEL); 1641 s = kzalloc(sizeof(*s), GFP_KERNEL);
1626 if (!s) 1642 if (!s)
1627 return -ENOMEM; 1643 return -ENOMEM;
1628 memset(s, 0, sizeof(*s));
1629 s->family = afinfo->family; 1644 s->family = afinfo->family;
1630 s->seq_ops.start = tcp_seq_start; 1645 s->seq_ops.start = tcp_seq_start;
1631 s->seq_ops.next = tcp_seq_next; 1646 s->seq_ops.next = tcp_seq_next;
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 1f977b6ee9a1..48f28d617ce6 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -3,13 +3,8 @@
3 * 3 *
4 * TCP Low Priority is a distributed algorithm whose goal is to utilize only 4 * TCP Low Priority is a distributed algorithm whose goal is to utilize only
5 * the excess network bandwidth as compared to the ``fair share`` of 5 * the excess network bandwidth as compared to the ``fair share`` of
6 * bandwidth as targeted by TCP. Available from: 6 * bandwidth as targeted by TCP.
7 * http://www.ece.rice.edu/~akuzma/Doc/akuzma/TCP-LP.pdf
8 * 7 *
9 * Original Author:
10 * Aleksandar Kuzmanovic <akuzma@northwestern.edu>
11 *
12 * See http://www-ece.rice.edu/networks/TCP-LP/ for their implementation.
13 * As of 2.6.13, Linux supports pluggable congestion control algorithms. 8 * As of 2.6.13, Linux supports pluggable congestion control algorithms.
14 * Due to the limitation of the API, we take the following changes from 9 * Due to the limitation of the API, we take the following changes from
15 * the original TCP-LP implementation: 10 * the original TCP-LP implementation:
@@ -24,11 +19,20 @@
24 * o OWD is handled in relative format, where local time stamp will in 19 * o OWD is handled in relative format, where local time stamp will in
25 * tcp_time_stamp format. 20 * tcp_time_stamp format.
26 * 21 *
27 * Port from 2.4.19 to 2.6.16 as module by: 22 * Original Author:
28 * Wong Hoi Sing Edison <hswong3i@gmail.com> 23 * Aleksandar Kuzmanovic <akuzma@northwestern.edu>
29 * Hung Hing Lun <hlhung3i@gmail.com> 24 * Available from:
25 * http://www.ece.rice.edu/~akuzma/Doc/akuzma/TCP-LP.pdf
26 * Original implementation for 2.4.19:
27 * http://www-ece.rice.edu/networks/TCP-LP/
30 * 28 *
31 * Version: $Id: tcp_lp.c,v 1.22 2006-05-02 18:18:19 hswong3i Exp $ 29 * 2.6.x module Authors:
30 * Wong Hoi Sing, Edison <hswong3i@gmail.com>
31 * Hung Hing Lun, Mike <hlhung3i@gmail.com>
32 * SourceForge project page:
33 * http://tcp-lp-mod.sourceforge.net/
34 *
35 * Version: $Id: tcp_lp.c,v 1.24 2006/09/05 20:22:53 hswong3i Exp $
32 */ 36 */
33 37
34#include <linux/config.h> 38#include <linux/config.h>
@@ -153,16 +157,19 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
153 if (m < 0) 157 if (m < 0)
154 m = -m; 158 m = -m;
155 159
156 if (rhz != 0) { 160 if (rhz > 0) {
157 m -= rhz >> 6; /* m is now error in remote HZ est */ 161 m -= rhz >> 6; /* m is now error in remote HZ est */
158 rhz += m; /* 63/64 old + 1/64 new */ 162 rhz += m; /* 63/64 old + 1/64 new */
159 } else 163 } else
160 rhz = m << 6; 164 rhz = m << 6;
161 165
166 out:
162 /* record time for successful remote HZ calc */ 167 /* record time for successful remote HZ calc */
163 lp->flag |= LP_VALID_RHZ; 168 if (rhz > 0)
169 lp->flag |= LP_VALID_RHZ;
170 else
171 lp->flag &= ~LP_VALID_RHZ;
164 172
165 out:
166 /* record reference time stamp */ 173 /* record reference time stamp */
167 lp->remote_ref_time = tp->rx_opt.rcv_tsval; 174 lp->remote_ref_time = tp->rx_opt.rcv_tsval;
168 lp->local_ref_time = tp->rx_opt.rcv_tsecr; 175 lp->local_ref_time = tp->rx_opt.rcv_tsecr;
@@ -333,6 +340,6 @@ static void __exit tcp_lp_unregister(void)
333module_init(tcp_lp_register); 340module_init(tcp_lp_register);
334module_exit(tcp_lp_unregister); 341module_exit(tcp_lp_unregister);
335 342
336MODULE_AUTHOR("Wong Hoi Sing Edison, Hung Hing Lun"); 343MODULE_AUTHOR("Wong Hoi Sing Edison, Hung Hing Lun Mike");
337MODULE_LICENSE("GPL"); 344MODULE_LICENSE("GPL");
338MODULE_DESCRIPTION("TCP Low Priority"); 345MODULE_DESCRIPTION("TCP Low Priority");
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0ccb7cb22b15..624e2b2c7f53 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -589,8 +589,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
589 /* RFC793: "second check the RST bit" and 589 /* RFC793: "second check the RST bit" and
590 * "fourth, check the SYN bit" 590 * "fourth, check the SYN bit"
591 */ 591 */
592 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) 592 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
593 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
593 goto embryonic_reset; 594 goto embryonic_reset;
595 }
594 596
595 /* ACK sequence verified above, just make sure ACK is 597 /* ACK sequence verified above, just make sure ACK is
596 * set. If ACK not set, just silently drop the packet. 598 * set. If ACK not set, just silently drop the packet.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5c08ea20a18d..b4f3ffe1b3b4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -201,6 +201,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
201 * See RFC1323 for an explanation of the limit to 14 201 * See RFC1323 for an explanation of the limit to 14
202 */ 202 */
203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 space = min_t(u32, space, *window_clamp);
204 while (space > 65535 && (*rcv_wscale) < 14) { 205 while (space > 65535 && (*rcv_wscale) < 14) {
205 space >>= 1; 206 space >>= 1;
206 (*rcv_wscale)++; 207 (*rcv_wscale)++;
@@ -466,7 +467,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
466 if (skb->len != tcp_header_size) 467 if (skb->len != tcp_header_size)
467 tcp_event_data_sent(tp, skb, sk); 468 tcp_event_data_sent(tp, skb, sk);
468 469
469 TCP_INC_STATS(TCP_MIB_OUTSEGS); 470 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
471 TCP_INC_STATS(TCP_MIB_OUTSEGS);
470 472
471 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 473 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
472 if (likely(err <= 0)) 474 if (likely(err <= 0))
@@ -2157,10 +2159,9 @@ int tcp_connect(struct sock *sk)
2157 skb_shinfo(buff)->gso_size = 0; 2159 skb_shinfo(buff)->gso_size = 0;
2158 skb_shinfo(buff)->gso_type = 0; 2160 skb_shinfo(buff)->gso_type = 0;
2159 buff->csum = 0; 2161 buff->csum = 0;
2162 tp->snd_nxt = tp->write_seq;
2160 TCP_SKB_CB(buff)->seq = tp->write_seq++; 2163 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2161 TCP_SKB_CB(buff)->end_seq = tp->write_seq; 2164 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2162 tp->snd_nxt = tp->write_seq;
2163 tp->pushed_seq = tp->write_seq;
2164 2165
2165 /* Send it off. */ 2166 /* Send it off. */
2166 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2167 TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2170,6 +2171,12 @@ int tcp_connect(struct sock *sk)
2170 sk_charge_skb(sk, buff); 2171 sk_charge_skb(sk, buff);
2171 tp->packets_out += tcp_skb_pcount(buff); 2172 tp->packets_out += tcp_skb_pcount(buff);
2172 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2173 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2174
2175 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2176 * in order to make this packet get counted in tcpOutSegs.
2177 */
2178 tp->snd_nxt = tp->write_seq;
2179 tp->pushed_seq = tp->write_seq;
2173 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 2180 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2174 2181
2175 /* Timer for repeating the SYN until an answer. */ 2182 /* Timer for repeating the SYN until an answer. */
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index d7d517a3a238..dab37d2f65fc 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -114,7 +114,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file)
114static ssize_t tcpprobe_read(struct file *file, char __user *buf, 114static ssize_t tcpprobe_read(struct file *file, char __user *buf,
115 size_t len, loff_t *ppos) 115 size_t len, loff_t *ppos)
116{ 116{
117 int error = 0, cnt; 117 int error = 0, cnt = 0;
118 unsigned char *tbuf; 118 unsigned char *tbuf;
119 119
120 if (!buf || len < 0) 120 if (!buf || len < 0)
@@ -130,11 +130,12 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
130 error = wait_event_interruptible(tcpw.wait, 130 error = wait_event_interruptible(tcpw.wait,
131 __kfifo_len(tcpw.fifo) != 0); 131 __kfifo_len(tcpw.fifo) != 0);
132 if (error) 132 if (error)
133 return error; 133 goto out_free;
134 134
135 cnt = kfifo_get(tcpw.fifo, tbuf, len); 135 cnt = kfifo_get(tcpw.fifo, tbuf, len);
136 error = copy_to_user(buf, tbuf, cnt); 136 error = copy_to_user(buf, tbuf, cnt);
137 137
138out_free:
138 vfree(tbuf); 139 vfree(tbuf);
139 140
140 return error ? error : cnt; 141 return error ? error : cnt;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9bfcddad695b..f136cec96d95 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1468,11 +1468,10 @@ static int udp_seq_open(struct inode *inode, struct file *file)
1468 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 1468 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1469 struct seq_file *seq; 1469 struct seq_file *seq;
1470 int rc = -ENOMEM; 1470 int rc = -ENOMEM;
1471 struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1471 struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1472 1472
1473 if (!s) 1473 if (!s)
1474 goto out; 1474 goto out;
1475 memset(s, 0, sizeof(*s));
1476 s->family = afinfo->family; 1475 s->family = afinfo->family;
1477 s->seq_ops.start = udp_seq_start; 1476 s->seq_ops.start = udp_seq_start;
1478 s->seq_ops.next = udp_seq_next; 1477 s->seq_ops.next = udp_seq_next;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index f8d880beb12f..13cafbe56ce3 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,6 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 skb->mac.raw = memmove(skb->data - skb->mac_len, 92 skb->mac.raw = memmove(skb->data - skb->mac_len,
93 skb->mac.raw, skb->mac_len); 93 skb->mac.raw, skb->mac_len);
94 skb->nh.raw = skb->data; 94 skb->nh.raw = skb->data;
95 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
96 err = 0; 95 err = 0;
97 96
98out: 97out:
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 193363e22932..d16f863cf687 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
134 } 134 }
135#endif 135#endif
136 136
137 if (!skb_shinfo(skb)->gso_size) 137 if (!skb_is_gso(skb))
138 return xfrm4_output_finish2(skb); 138 return xfrm4_output_finish2(skb);
139 139
140 skb->protocol = htons(ETH_P_IP); 140 skb->protocol = htons(ETH_P_IP);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c250d0af10d7..c7852b38e03e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -508,6 +508,26 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
508 kfree(ifp); 508 kfree(ifp);
509} 509}
510 510
511static void
512ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
513{
514 struct inet6_ifaddr *ifa, **ifap;
515 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
516
517 /*
518 * Each device address list is sorted in order of scope -
519 * global before linklocal.
520 */
521 for (ifap = &idev->addr_list; (ifa = *ifap) != NULL;
522 ifap = &ifa->if_next) {
523 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
524 break;
525 }
526
527 ifp->if_next = *ifap;
528 *ifap = ifp;
529}
530
511/* On success it returns ifp with increased reference count */ 531/* On success it returns ifp with increased reference count */
512 532
513static struct inet6_ifaddr * 533static struct inet6_ifaddr *
@@ -558,6 +578,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
558 ifa->flags = flags | IFA_F_TENTATIVE; 578 ifa->flags = flags | IFA_F_TENTATIVE;
559 ifa->cstamp = ifa->tstamp = jiffies; 579 ifa->cstamp = ifa->tstamp = jiffies;
560 580
581 ifa->rt = rt;
582
561 ifa->idev = idev; 583 ifa->idev = idev;
562 in6_dev_hold(idev); 584 in6_dev_hold(idev);
563 /* For caller */ 585 /* For caller */
@@ -573,8 +595,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
573 595
574 write_lock(&idev->lock); 596 write_lock(&idev->lock);
575 /* Add to inet6_dev unicast addr list. */ 597 /* Add to inet6_dev unicast addr list. */
576 ifa->if_next = idev->addr_list; 598 ipv6_link_dev_addr(idev, ifa);
577 idev->addr_list = ifa;
578 599
579#ifdef CONFIG_IPV6_PRIVACY 600#ifdef CONFIG_IPV6_PRIVACY
580 if (ifa->flags&IFA_F_TEMPORARY) { 601 if (ifa->flags&IFA_F_TEMPORARY) {
@@ -584,8 +605,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
584 } 605 }
585#endif 606#endif
586 607
587 ifa->rt = rt;
588
589 in6_ifa_hold(ifa); 608 in6_ifa_hold(ifa);
590 write_unlock(&idev->lock); 609 write_unlock(&idev->lock);
591out2: 610out2:
@@ -987,7 +1006,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
987 continue; 1006 continue;
988 } else if (score.scope < hiscore.scope) { 1007 } else if (score.scope < hiscore.scope) {
989 if (score.scope < daddr_scope) 1008 if (score.scope < daddr_scope)
990 continue; 1009 break; /* addresses sorted by scope */
991 else { 1010 else {
992 score.rule = 2; 1011 score.rule = 2;
993 goto record_it; 1012 goto record_it;
@@ -1850,15 +1869,21 @@ err_exit:
1850/* 1869/*
1851 * Manual configuration of address on an interface 1870 * Manual configuration of address on an interface
1852 */ 1871 */
1853static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen) 1872static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
1873 __u32 prefered_lft, __u32 valid_lft)
1854{ 1874{
1855 struct inet6_ifaddr *ifp; 1875 struct inet6_ifaddr *ifp;
1856 struct inet6_dev *idev; 1876 struct inet6_dev *idev;
1857 struct net_device *dev; 1877 struct net_device *dev;
1878 __u8 ifa_flags = 0;
1858 int scope; 1879 int scope;
1859 1880
1860 ASSERT_RTNL(); 1881 ASSERT_RTNL();
1861 1882
1883 /* check the lifetime */
1884 if (!valid_lft || prefered_lft > valid_lft)
1885 return -EINVAL;
1886
1862 if ((dev = __dev_get_by_index(ifindex)) == NULL) 1887 if ((dev = __dev_get_by_index(ifindex)) == NULL)
1863 return -ENODEV; 1888 return -ENODEV;
1864 1889
@@ -1870,10 +1895,29 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen)
1870 1895
1871 scope = ipv6_addr_scope(pfx); 1896 scope = ipv6_addr_scope(pfx);
1872 1897
1873 ifp = ipv6_add_addr(idev, pfx, plen, scope, IFA_F_PERMANENT); 1898 if (valid_lft == INFINITY_LIFE_TIME)
1899 ifa_flags |= IFA_F_PERMANENT;
1900 else if (valid_lft >= 0x7FFFFFFF/HZ)
1901 valid_lft = 0x7FFFFFFF/HZ;
1902
1903 if (prefered_lft == 0)
1904 ifa_flags |= IFA_F_DEPRECATED;
1905 else if ((prefered_lft >= 0x7FFFFFFF/HZ) &&
1906 (prefered_lft != INFINITY_LIFE_TIME))
1907 prefered_lft = 0x7FFFFFFF/HZ;
1908
1909 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
1910
1874 if (!IS_ERR(ifp)) { 1911 if (!IS_ERR(ifp)) {
1912 spin_lock_bh(&ifp->lock);
1913 ifp->valid_lft = valid_lft;
1914 ifp->prefered_lft = prefered_lft;
1915 ifp->tstamp = jiffies;
1916 spin_unlock_bh(&ifp->lock);
1917
1875 addrconf_dad_start(ifp, 0); 1918 addrconf_dad_start(ifp, 0);
1876 in6_ifa_put(ifp); 1919 in6_ifa_put(ifp);
1920 addrconf_verify(0);
1877 return 0; 1921 return 0;
1878 } 1922 }
1879 1923
@@ -1926,7 +1970,8 @@ int addrconf_add_ifaddr(void __user *arg)
1926 return -EFAULT; 1970 return -EFAULT;
1927 1971
1928 rtnl_lock(); 1972 rtnl_lock();
1929 err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); 1973 err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen,
1974 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1930 rtnl_unlock(); 1975 rtnl_unlock();
1931 return err; 1976 return err;
1932} 1977}
@@ -2752,12 +2797,16 @@ restart:
2752 ifp->idev->nd_parms->retrans_time / HZ; 2797 ifp->idev->nd_parms->retrans_time / HZ;
2753#endif 2798#endif
2754 2799
2755 if (age >= ifp->valid_lft) { 2800 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
2801 age >= ifp->valid_lft) {
2756 spin_unlock(&ifp->lock); 2802 spin_unlock(&ifp->lock);
2757 in6_ifa_hold(ifp); 2803 in6_ifa_hold(ifp);
2758 read_unlock(&addrconf_hash_lock); 2804 read_unlock(&addrconf_hash_lock);
2759 ipv6_del_addr(ifp); 2805 ipv6_del_addr(ifp);
2760 goto restart; 2806 goto restart;
2807 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
2808 spin_unlock(&ifp->lock);
2809 continue;
2761 } else if (age >= ifp->prefered_lft) { 2810 } else if (age >= ifp->prefered_lft) {
2762 /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */ 2811 /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */
2763 int deprecate = 0; 2812 int deprecate = 0;
@@ -2834,7 +2883,8 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2834 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2883 pfx = RTA_DATA(rta[IFA_ADDRESS-1]);
2835 } 2884 }
2836 if (rta[IFA_LOCAL-1]) { 2885 if (rta[IFA_LOCAL-1]) {
2837 if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2886 if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) ||
2887 (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))))
2838 return -EINVAL; 2888 return -EINVAL;
2839 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2889 pfx = RTA_DATA(rta[IFA_LOCAL-1]);
2840 } 2890 }
@@ -2845,11 +2895,61 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2845} 2895}
2846 2896
2847static int 2897static int
2898inet6_addr_modify(int ifindex, struct in6_addr *pfx,
2899 __u32 prefered_lft, __u32 valid_lft)
2900{
2901 struct inet6_ifaddr *ifp = NULL;
2902 struct net_device *dev;
2903 int ifa_flags = 0;
2904
2905 if ((dev = __dev_get_by_index(ifindex)) == NULL)
2906 return -ENODEV;
2907
2908 if (!(dev->flags&IFF_UP))
2909 return -ENETDOWN;
2910
2911 if (!valid_lft || (prefered_lft > valid_lft))
2912 return -EINVAL;
2913
2914 ifp = ipv6_get_ifaddr(pfx, dev, 1);
2915 if (ifp == NULL)
2916 return -ENOENT;
2917
2918 if (valid_lft == INFINITY_LIFE_TIME)
2919 ifa_flags = IFA_F_PERMANENT;
2920 else if (valid_lft >= 0x7FFFFFFF/HZ)
2921 valid_lft = 0x7FFFFFFF/HZ;
2922
2923 if (prefered_lft == 0)
2924 ifa_flags = IFA_F_DEPRECATED;
2925 else if ((prefered_lft >= 0x7FFFFFFF/HZ) &&
2926 (prefered_lft != INFINITY_LIFE_TIME))
2927 prefered_lft = 0x7FFFFFFF/HZ;
2928
2929 spin_lock_bh(&ifp->lock);
2930 ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED|IFA_F_PERMANENT)) | ifa_flags;
2931
2932 ifp->tstamp = jiffies;
2933 ifp->valid_lft = valid_lft;
2934 ifp->prefered_lft = prefered_lft;
2935
2936 spin_unlock_bh(&ifp->lock);
2937 if (!(ifp->flags&IFA_F_TENTATIVE))
2938 ipv6_ifa_notify(0, ifp);
2939 in6_ifa_put(ifp);
2940
2941 addrconf_verify(0);
2942
2943 return 0;
2944}
2945
2946static int
2848inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2947inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2849{ 2948{
2850 struct rtattr **rta = arg; 2949 struct rtattr **rta = arg;
2851 struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 2950 struct ifaddrmsg *ifm = NLMSG_DATA(nlh);
2852 struct in6_addr *pfx; 2951 struct in6_addr *pfx;
2952 __u32 valid_lft = INFINITY_LIFE_TIME, prefered_lft = INFINITY_LIFE_TIME;
2853 2953
2854 pfx = NULL; 2954 pfx = NULL;
2855 if (rta[IFA_ADDRESS-1]) { 2955 if (rta[IFA_ADDRESS-1]) {
@@ -2858,14 +2958,34 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2858 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2958 pfx = RTA_DATA(rta[IFA_ADDRESS-1]);
2859 } 2959 }
2860 if (rta[IFA_LOCAL-1]) { 2960 if (rta[IFA_LOCAL-1]) {
2861 if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2961 if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) ||
2962 (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))))
2862 return -EINVAL; 2963 return -EINVAL;
2863 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2964 pfx = RTA_DATA(rta[IFA_LOCAL-1]);
2864 } 2965 }
2865 if (pfx == NULL) 2966 if (pfx == NULL)
2866 return -EINVAL; 2967 return -EINVAL;
2867 2968
2868 return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen); 2969 if (rta[IFA_CACHEINFO-1]) {
2970 struct ifa_cacheinfo *ci;
2971 if (RTA_PAYLOAD(rta[IFA_CACHEINFO-1]) < sizeof(*ci))
2972 return -EINVAL;
2973 ci = RTA_DATA(rta[IFA_CACHEINFO-1]);
2974 valid_lft = ci->ifa_valid;
2975 prefered_lft = ci->ifa_prefered;
2976 }
2977
2978 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
2979 int ret;
2980 ret = inet6_addr_modify(ifm->ifa_index, pfx,
2981 prefered_lft, valid_lft);
2982 if (ret == 0 || !(nlh->nlmsg_flags & NLM_F_CREATE))
2983 return ret;
2984 }
2985
2986 return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen,
2987 prefered_lft, valid_lft);
2988
2869} 2989}
2870 2990
2871/* Maximum length of ifa_cacheinfo attributes */ 2991/* Maximum length of ifa_cacheinfo attributes */
@@ -3102,6 +3222,62 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
3102 return inet6_dump_addr(skb, cb, type); 3222 return inet6_dump_addr(skb, cb, type);
3103} 3223}
3104 3224
3225static int inet6_rtm_getaddr(struct sk_buff *in_skb,
3226 struct nlmsghdr* nlh, void *arg)
3227{
3228 struct rtattr **rta = arg;
3229 struct ifaddrmsg *ifm = NLMSG_DATA(nlh);
3230 struct in6_addr *addr = NULL;
3231 struct net_device *dev = NULL;
3232 struct inet6_ifaddr *ifa;
3233 struct sk_buff *skb;
3234 int size = NLMSG_SPACE(sizeof(struct ifaddrmsg) + INET6_IFADDR_RTA_SPACE);
3235 int err;
3236
3237 if (rta[IFA_ADDRESS-1]) {
3238 if (RTA_PAYLOAD(rta[IFA_ADDRESS-1]) < sizeof(*addr))
3239 return -EINVAL;
3240 addr = RTA_DATA(rta[IFA_ADDRESS-1]);
3241 }
3242 if (rta[IFA_LOCAL-1]) {
3243 if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*addr) ||
3244 (addr && memcmp(addr, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*addr))))
3245 return -EINVAL;
3246 addr = RTA_DATA(rta[IFA_LOCAL-1]);
3247 }
3248 if (addr == NULL)
3249 return -EINVAL;
3250
3251 if (ifm->ifa_index)
3252 dev = __dev_get_by_index(ifm->ifa_index);
3253
3254 if ((ifa = ipv6_get_ifaddr(addr, dev, 1)) == NULL)
3255 return -EADDRNOTAVAIL;
3256
3257 if ((skb = alloc_skb(size, GFP_KERNEL)) == NULL) {
3258 err = -ENOBUFS;
3259 goto out;
3260 }
3261
3262 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
3263 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
3264 nlh->nlmsg_seq, RTM_NEWADDR, 0);
3265 if (err < 0) {
3266 err = -EMSGSIZE;
3267 goto out_free;
3268 }
3269
3270 err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
3271 if (err > 0)
3272 err = 0;
3273out:
3274 in6_ifa_put(ifa);
3275 return err;
3276out_free:
3277 kfree_skb(skb);
3278 goto out;
3279}
3280
3105static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) 3281static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
3106{ 3282{
3107 struct sk_buff *skb; 3283 struct sk_buff *skb;
@@ -3344,7 +3520,8 @@ static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = {
3344 [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, 3520 [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, },
3345 [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, 3521 [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, },
3346 [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, 3522 [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, },
3347 [RTM_GETADDR - RTM_BASE] = { .dumpit = inet6_dump_ifaddr, }, 3523 [RTM_GETADDR - RTM_BASE] = { .doit = inet6_rtm_getaddr,
3524 .dumpit = inet6_dump_ifaddr, },
3348 [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, }, 3525 [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, },
3349 [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, }, 3526 [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, },
3350 [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, }, 3527 [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, },
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 5a0ba58b86cc..ac85e9c532c2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -658,7 +658,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
658 return err; 658 return err;
659 } 659 }
660 660
661 ip6_dst_store(sk, dst, NULL); 661 __ip6_dst_store(sk, dst, NULL);
662 } 662 }
663 663
664 return 0; 664 return 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 99a6eb23378b..3b55b4c8e2d1 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -696,7 +696,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
696 } 696 }
697 697
698 tc = *(int *)CMSG_DATA(cmsg); 698 tc = *(int *)CMSG_DATA(cmsg);
699 if (tc < 0 || tc > 0xff) 699 if (tc < -1 || tc > 0xff)
700 goto exit_f; 700 goto exit_f;
701 701
702 err = 0; 702 err = 0;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 9d0ee7f0eeb5..86dac106873b 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -635,14 +635,17 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
635 struct ipv6_txoptions *opt2; 635 struct ipv6_txoptions *opt2;
636 int err; 636 int err;
637 637
638 if (newtype != IPV6_HOPOPTS && opt->hopopt) 638 if (opt) {
639 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt)); 639 if (newtype != IPV6_HOPOPTS && opt->hopopt)
640 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt) 640 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
641 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt)); 641 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
642 if (newtype != IPV6_RTHDR && opt->srcrt) 642 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
643 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt)); 643 if (newtype != IPV6_RTHDR && opt->srcrt)
644 if (newtype != IPV6_DSTOPTS && opt->dst1opt) 644 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
645 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); 645 if (newtype != IPV6_DSTOPTS && opt->dst1opt)
646 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
647 }
648
646 if (newopt && newoptlen) 649 if (newopt && newoptlen)
647 tot_len += CMSG_ALIGN(newoptlen); 650 tot_len += CMSG_ALIGN(newoptlen);
648 651
@@ -659,25 +662,25 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
659 opt2->tot_len = tot_len; 662 opt2->tot_len = tot_len;
660 p = (char *)(opt2 + 1); 663 p = (char *)(opt2 + 1);
661 664
662 err = ipv6_renew_option(opt->hopopt, newopt, newoptlen, 665 err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
663 newtype != IPV6_HOPOPTS, 666 newtype != IPV6_HOPOPTS,
664 &opt2->hopopt, &p); 667 &opt2->hopopt, &p);
665 if (err) 668 if (err)
666 goto out; 669 goto out;
667 670
668 err = ipv6_renew_option(opt->dst0opt, newopt, newoptlen, 671 err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
669 newtype != IPV6_RTHDRDSTOPTS, 672 newtype != IPV6_RTHDRDSTOPTS,
670 &opt2->dst0opt, &p); 673 &opt2->dst0opt, &p);
671 if (err) 674 if (err)
672 goto out; 675 goto out;
673 676
674 err = ipv6_renew_option(opt->srcrt, newopt, newoptlen, 677 err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
675 newtype != IPV6_RTHDR, 678 newtype != IPV6_RTHDR,
676 (struct ipv6_opt_hdr **)opt2->srcrt, &p); 679 (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
677 if (err) 680 if (err)
678 goto out; 681 goto out;
679 682
680 err = ipv6_renew_option(opt->dst1opt, newopt, newoptlen, 683 err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
681 newtype != IPV6_DSTOPTS, 684 newtype != IPV6_DSTOPTS,
682 &opt2->dst1opt, &p); 685 &opt2->dst1opt, &p);
683 if (err) 686 if (err)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 1044b6fce0d5..356a8a7ef22a 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -401,7 +401,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
401 if (hlimit < 0) 401 if (hlimit < 0)
402 hlimit = ipv6_get_hoplimit(dst->dev); 402 hlimit = ipv6_get_hoplimit(dst->dev);
403 403
404 tclass = np->cork.tclass; 404 tclass = np->tclass;
405 if (tclass < 0) 405 if (tclass < 0)
406 tclass = 0; 406 tclass = 0;
407 407
@@ -497,7 +497,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
497 if (hlimit < 0) 497 if (hlimit < 0)
498 hlimit = ipv6_get_hoplimit(dst->dev); 498 hlimit = ipv6_get_hoplimit(dst->dev);
499 499
500 tclass = np->cork.tclass; 500 tclass = np->tclass;
501 if (tclass < 0) 501 if (tclass < 0)
502 tclass = 0; 502 tclass = 0;
503 503
@@ -712,6 +712,11 @@ discard_it:
712 return 0; 712 return 0;
713} 713}
714 714
715/*
716 * Special lock-class for __icmpv6_socket:
717 */
718static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
719
715int __init icmpv6_init(struct net_proto_family *ops) 720int __init icmpv6_init(struct net_proto_family *ops)
716{ 721{
717 struct sock *sk; 722 struct sock *sk;
@@ -730,6 +735,14 @@ int __init icmpv6_init(struct net_proto_family *ops)
730 735
731 sk = per_cpu(__icmpv6_socket, i)->sk; 736 sk = per_cpu(__icmpv6_socket, i)->sk;
732 sk->sk_allocation = GFP_ATOMIC; 737 sk->sk_allocation = GFP_ATOMIC;
738 /*
739 * Split off their lock-class, because sk->sk_dst_lock
740 * gets used from softirqs, which is safe for
741 * __icmpv6_socket (because those never get directly used
742 * via userspace syscalls), but unsafe for normal sockets.
743 */
744 lockdep_set_class(&sk->sk_dst_lock,
745 &icmpv6_socket_sk_dst_lock_key);
733 746
734 /* Enough space for 2 64K ICMP packets, including 747 /* Enough space for 2 64K ICMP packets, including
735 * sk_buff struct overhead. 748 * sk_buff struct overhead.
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 5c950cc79d80..bf491077b822 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -185,7 +185,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
185 return err; 185 return err;
186 } 186 }
187 187
188 ip6_dst_store(sk, dst, NULL); 188 __ip6_dst_store(sk, dst, NULL);
189 } 189 }
190 190
191 skb->dst = dst_clone(dst); 191 skb->dst = dst_clone(dst);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index df8f051c0fce..25c2a9e03895 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -71,6 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
71 goto out; 71 goto out;
72 } 72 }
73 73
74 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
75
74 /* 76 /*
75 * Store incoming device index. When the packet will 77 * Store incoming device index. When the packet will
76 * be queued, we cannot refer to skb->dev anymore. 78 * be queued, we cannot refer to skb->dev anymore.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2c5b44575af0..4fb47a252913 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb)
147 147
148int ip6_output(struct sk_buff *skb) 148int ip6_output(struct sk_buff *skb)
149{ 149{
150 if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || 150 if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
151 dst_allfrag(skb->dst)) 151 dst_allfrag(skb->dst))
152 return ip6_fragment(skb, ip6_output2); 152 return ip6_fragment(skb, ip6_output2);
153 else 153 else
@@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
229 skb->priority = sk->sk_priority; 229 skb->priority = sk->sk_priority;
230 230
231 mtu = dst_mtu(dst); 231 mtu = dst_mtu(dst);
232 if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) { 232 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); 233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
234 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, 234 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
235 dst_output); 235 dst_output);
@@ -356,6 +356,7 @@ int ip6_forward(struct sk_buff *skb)
356 skb->dev = dst->dev; 356 skb->dev = dst->dev;
357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
358 0, skb->dev); 358 0, skb->dev);
359 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
359 360
360 kfree_skb(skb); 361 kfree_skb(skb);
361 return -ETIMEDOUT; 362 return -ETIMEDOUT;
@@ -595,6 +596,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
595 } 596 }
596 597
597 err = output(skb); 598 err = output(skb);
599 if(!err)
600 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
601
598 if (err || !frag) 602 if (err || !frag)
599 break; 603 break;
600 604
@@ -706,12 +710,11 @@ slow_path:
706 /* 710 /*
707 * Put this fragment into the sending queue. 711 * Put this fragment into the sending queue.
708 */ 712 */
709
710 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
711
712 err = output(frag); 713 err = output(frag);
713 if (err) 714 if (err)
714 goto fail; 715 goto fail;
716
717 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
715 } 718 }
716 kfree_skb(skb); 719 kfree_skb(skb);
717 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); 720 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
@@ -723,48 +726,51 @@ fail:
723 return err; 726 return err;
724} 727}
725 728
726int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 729static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
730 struct dst_entry *dst,
731 struct flowi *fl)
727{ 732{
728 int err = 0; 733 struct ipv6_pinfo *np = inet6_sk(sk);
734 struct rt6_info *rt = (struct rt6_info *)dst;
729 735
730 *dst = NULL; 736 if (!dst)
731 if (sk) { 737 goto out;
732 struct ipv6_pinfo *np = inet6_sk(sk); 738
733 739 /* Yes, checking route validity in not connected
734 *dst = sk_dst_check(sk, np->dst_cookie); 740 * case is not very simple. Take into account,
735 if (*dst) { 741 * that we do not support routing by source, TOS,
736 struct rt6_info *rt = (struct rt6_info*)*dst; 742 * and MSG_DONTROUTE --ANK (980726)
737 743 *
738 /* Yes, checking route validity in not connected 744 * 1. If route was host route, check that
739 * case is not very simple. Take into account, 745 * cached destination is current.
740 * that we do not support routing by source, TOS, 746 * If it is network route, we still may
741 * and MSG_DONTROUTE --ANK (980726) 747 * check its validity using saved pointer
742 * 748 * to the last used address: daddr_cache.
743 * 1. If route was host route, check that 749 * We do not want to save whole address now,
744 * cached destination is current. 750 * (because main consumer of this service
745 * If it is network route, we still may 751 * is tcp, which has not this problem),
746 * check its validity using saved pointer 752 * so that the last trick works only on connected
747 * to the last used address: daddr_cache. 753 * sockets.
748 * We do not want to save whole address now, 754 * 2. oif also should be the same.
749 * (because main consumer of this service 755 */
750 * is tcp, which has not this problem), 756 if (((rt->rt6i_dst.plen != 128 ||
751 * so that the last trick works only on connected 757 !ipv6_addr_equal(&fl->fl6_dst, &rt->rt6i_dst.addr))
752 * sockets. 758 && (np->daddr_cache == NULL ||
753 * 2. oif also should be the same. 759 !ipv6_addr_equal(&fl->fl6_dst, np->daddr_cache)))
754 */ 760 || (fl->oif && fl->oif != dst->dev->ifindex)) {
755 if (((rt->rt6i_dst.plen != 128 || 761 dst_release(dst);
756 !ipv6_addr_equal(&fl->fl6_dst, 762 dst = NULL;
757 &rt->rt6i_dst.addr))
758 && (np->daddr_cache == NULL ||
759 !ipv6_addr_equal(&fl->fl6_dst,
760 np->daddr_cache)))
761 || (fl->oif && fl->oif != (*dst)->dev->ifindex)) {
762 dst_release(*dst);
763 *dst = NULL;
764 }
765 }
766 } 763 }
767 764
765out:
766 return dst;
767}
768
769static int ip6_dst_lookup_tail(struct sock *sk,
770 struct dst_entry **dst, struct flowi *fl)
771{
772 int err;
773
768 if (*dst == NULL) 774 if (*dst == NULL)
769 *dst = ip6_route_output(sk, fl); 775 *dst = ip6_route_output(sk, fl);
770 776
@@ -773,7 +779,6 @@ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
773 779
774 if (ipv6_addr_any(&fl->fl6_src)) { 780 if (ipv6_addr_any(&fl->fl6_src)) {
775 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); 781 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
776
777 if (err) 782 if (err)
778 goto out_err_release; 783 goto out_err_release;
779 } 784 }
@@ -786,8 +791,48 @@ out_err_release:
786 return err; 791 return err;
787} 792}
788 793
794/**
795 * ip6_dst_lookup - perform route lookup on flow
796 * @sk: socket which provides route info
797 * @dst: pointer to dst_entry * for result
798 * @fl: flow to lookup
799 *
800 * This function performs a route lookup on the given flow.
801 *
802 * It returns zero on success, or a standard errno code on error.
803 */
804int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
805{
806 *dst = NULL;
807 return ip6_dst_lookup_tail(sk, dst, fl);
808}
789EXPORT_SYMBOL_GPL(ip6_dst_lookup); 809EXPORT_SYMBOL_GPL(ip6_dst_lookup);
790 810
811/**
812 * ip6_sk_dst_lookup - perform socket cached route lookup on flow
813 * @sk: socket which provides the dst cache and route info
814 * @dst: pointer to dst_entry * for result
815 * @fl: flow to lookup
816 *
817 * This function performs a route lookup on the given flow with the
818 * possibility of using the cached route in the socket if it is valid.
819 * It will take the socket dst lock when operating on the dst cache.
820 * As a result, this function can only be used in process context.
821 *
822 * It returns zero on success, or a standard errno code on error.
823 */
824int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
825{
826 *dst = NULL;
827 if (sk) {
828 *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
829 *dst = ip6_sk_dst_check(sk, *dst, fl);
830 }
831
832 return ip6_dst_lookup_tail(sk, dst, fl);
833}
834EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
835
791static inline int ip6_ufo_append_data(struct sock *sk, 836static inline int ip6_ufo_append_data(struct sock *sk,
792 int getfrag(void *from, char *to, int offset, int len, 837 int getfrag(void *from, char *to, int offset, int len,
793 int odd, struct sk_buff *skb), 838 int odd, struct sk_buff *skb),
@@ -1050,7 +1095,7 @@ alloc_new_skb:
1050 skb_prev->csum = csum_sub(skb_prev->csum, 1095 skb_prev->csum = csum_sub(skb_prev->csum,
1051 skb->csum); 1096 skb->csum);
1052 data += fraggap; 1097 data += fraggap;
1053 skb_trim(skb_prev, maxfraglen); 1098 pskb_trim_unique(skb_prev, maxfraglen);
1054 } 1099 }
1055 copy = datalen - transhdrlen - fraggap; 1100 copy = datalen - transhdrlen - fraggap;
1056 if (copy < 0) { 1101 if (copy < 0) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index bc77c0e1a943..84d7ebdb9d21 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -567,10 +567,9 @@ static inline struct ipv6_txoptions *create_tel(__u8 encap_limit)
567 567
568 int opt_len = sizeof(*opt) + 8; 568 int opt_len = sizeof(*opt) + 8;
569 569
570 if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) { 570 if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) {
571 return NULL; 571 return NULL;
572 } 572 }
573 memset(opt, 0, opt_len);
574 opt->tot_len = opt_len; 573 opt->tot_len = opt_len;
575 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1); 574 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1);
576 opt->opt_nflen = 8; 575 opt->opt_nflen = 8;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index b285b0357084..7e4d1c17bfbc 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -109,7 +109,8 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
109 goto out_put_cpu; 109 goto out_put_cpu;
110 } 110 }
111 111
112 skb_put(skb, dlen - plen); 112 skb->truesize += dlen - plen;
113 __skb_put(skb, dlen - plen);
113 memcpy(skb->data, scratch, dlen); 114 memcpy(skb->data, scratch, dlen);
114 err = ipch->nexthdr; 115 err = ipch->nexthdr;
115 116
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0c17dec11c8d..a5eaaf693abf 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -57,29 +57,11 @@
57 57
58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; 58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
59 59
60static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) 60static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
61 int proto)
61{ 62{
62 struct sk_buff *segs = ERR_PTR(-EINVAL); 63 struct inet6_protocol *ops = NULL;
63 struct ipv6hdr *ipv6h;
64 struct inet6_protocol *ops;
65 int proto;
66 64
67 if (unlikely(skb_shinfo(skb)->gso_type &
68 ~(SKB_GSO_UDP |
69 SKB_GSO_DODGY |
70 SKB_GSO_TCP_ECN |
71 SKB_GSO_TCPV6 |
72 0)))
73 goto out;
74
75 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
76 goto out;
77
78 ipv6h = skb->nh.ipv6h;
79 proto = ipv6h->nexthdr;
80 __skb_pull(skb, sizeof(*ipv6h));
81
82 rcu_read_lock();
83 for (;;) { 65 for (;;) {
84 struct ipv6_opt_hdr *opth; 66 struct ipv6_opt_hdr *opth;
85 int len; 67 int len;
@@ -88,30 +70,80 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
88 ops = rcu_dereference(inet6_protos[proto]); 70 ops = rcu_dereference(inet6_protos[proto]);
89 71
90 if (unlikely(!ops)) 72 if (unlikely(!ops))
91 goto unlock; 73 break;
92 74
93 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) 75 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
94 break; 76 break;
95 } 77 }
96 78
97 if (unlikely(!pskb_may_pull(skb, 8))) 79 if (unlikely(!pskb_may_pull(skb, 8)))
98 goto unlock; 80 break;
99 81
100 opth = (void *)skb->data; 82 opth = (void *)skb->data;
101 len = opth->hdrlen * 8 + 8; 83 len = opth->hdrlen * 8 + 8;
102 84
103 if (unlikely(!pskb_may_pull(skb, len))) 85 if (unlikely(!pskb_may_pull(skb, len)))
104 goto unlock; 86 break;
105 87
106 proto = opth->nexthdr; 88 proto = opth->nexthdr;
107 __skb_pull(skb, len); 89 __skb_pull(skb, len);
108 } 90 }
109 91
110 skb->h.raw = skb->data; 92 return ops;
111 if (likely(ops->gso_segment)) 93}
112 segs = ops->gso_segment(skb, features); 94
95static int ipv6_gso_send_check(struct sk_buff *skb)
96{
97 struct ipv6hdr *ipv6h;
98 struct inet6_protocol *ops;
99 int err = -EINVAL;
100
101 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
102 goto out;
113 103
114unlock: 104 ipv6h = skb->nh.ipv6h;
105 __skb_pull(skb, sizeof(*ipv6h));
106 err = -EPROTONOSUPPORT;
107
108 rcu_read_lock();
109 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
110 if (likely(ops && ops->gso_send_check)) {
111 skb->h.raw = skb->data;
112 err = ops->gso_send_check(skb);
113 }
114 rcu_read_unlock();
115
116out:
117 return err;
118}
119
120static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
121{
122 struct sk_buff *segs = ERR_PTR(-EINVAL);
123 struct ipv6hdr *ipv6h;
124 struct inet6_protocol *ops;
125
126 if (unlikely(skb_shinfo(skb)->gso_type &
127 ~(SKB_GSO_UDP |
128 SKB_GSO_DODGY |
129 SKB_GSO_TCP_ECN |
130 SKB_GSO_TCPV6 |
131 0)))
132 goto out;
133
134 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
135 goto out;
136
137 ipv6h = skb->nh.ipv6h;
138 __skb_pull(skb, sizeof(*ipv6h));
139 segs = ERR_PTR(-EPROTONOSUPPORT);
140
141 rcu_read_lock();
142 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
143 if (likely(ops && ops->gso_segment)) {
144 skb->h.raw = skb->data;
145 segs = ops->gso_segment(skb, features);
146 }
115 rcu_read_unlock(); 147 rcu_read_unlock();
116 148
117 if (unlikely(IS_ERR(segs))) 149 if (unlikely(IS_ERR(segs)))
@@ -130,6 +162,7 @@ out:
130static struct packet_type ipv6_packet_type = { 162static struct packet_type ipv6_packet_type = {
131 .type = __constant_htons(ETH_P_IPV6), 163 .type = __constant_htons(ETH_P_IPV6),
132 .func = ipv6_rcv, 164 .func = ipv6_rcv,
165 .gso_send_check = ipv6_gso_send_check,
133 .gso_segment = ipv6_gso_segment, 166 .gso_segment = ipv6_gso_segment,
134}; 167};
135 168
@@ -329,7 +362,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
329 break; 362 break;
330 363
331 case IPV6_TCLASS: 364 case IPV6_TCLASS:
332 if (val < 0 || val > 0xff) 365 if (val < -1 || val > 0xff)
333 goto e_inval; 366 goto e_inval;
334 np->tclass = val; 367 np->tclass = val;
335 retv = 0; 368 retv = 0;
@@ -914,6 +947,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
914 947
915 case IPV6_TCLASS: 948 case IPV6_TCLASS:
916 val = np->tclass; 949 val = np->tclass;
950 if (val < 0)
951 val = 0;
917 break; 952 break;
918 953
919 case IPV6_RECVTCLASS: 954 case IPV6_RECVTCLASS:
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 9d697d4dcffc..639eb20c9f1f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -268,13 +268,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
268 if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) { 268 if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) {
269 struct inet6_dev *idev = in6_dev_get(dev); 269 struct inet6_dev *idev = in6_dev_get(dev);
270 270
271 (void) ip6_mc_leave_src(sk, mc_lst, idev);
271 if (idev) { 272 if (idev) {
272 (void) ip6_mc_leave_src(sk,mc_lst,idev);
273 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 273 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
274 in6_dev_put(idev); 274 in6_dev_put(idev);
275 } 275 }
276 dev_put(dev); 276 dev_put(dev);
277 } 277 } else
278 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
278 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 279 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
279 return 0; 280 return 0;
280 } 281 }
@@ -334,13 +335,14 @@ void ipv6_sock_mc_close(struct sock *sk)
334 if (dev) { 335 if (dev) {
335 struct inet6_dev *idev = in6_dev_get(dev); 336 struct inet6_dev *idev = in6_dev_get(dev);
336 337
338 (void) ip6_mc_leave_src(sk, mc_lst, idev);
337 if (idev) { 339 if (idev) {
338 (void) ip6_mc_leave_src(sk, mc_lst, idev);
339 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 340 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
340 in6_dev_put(idev); 341 in6_dev_put(idev);
341 } 342 }
342 dev_put(dev); 343 dev_put(dev);
343 } 344 } else
345 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
344 346
345 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 347 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
346 348
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index f26898b00347..c9d6b23cd3f7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1398,23 +1398,39 @@ static int __init ip6_tables_init(void)
1398{ 1398{
1399 int ret; 1399 int ret;
1400 1400
1401 xt_proto_init(AF_INET6); 1401 ret = xt_proto_init(AF_INET6);
1402 if (ret < 0)
1403 goto err1;
1402 1404
1403 /* Noone else will be downing sem now, so we won't sleep */ 1405 /* Noone else will be downing sem now, so we won't sleep */
1404 xt_register_target(&ip6t_standard_target); 1406 ret = xt_register_target(&ip6t_standard_target);
1405 xt_register_target(&ip6t_error_target); 1407 if (ret < 0)
1406 xt_register_match(&icmp6_matchstruct); 1408 goto err2;
1409 ret = xt_register_target(&ip6t_error_target);
1410 if (ret < 0)
1411 goto err3;
1412 ret = xt_register_match(&icmp6_matchstruct);
1413 if (ret < 0)
1414 goto err4;
1407 1415
1408 /* Register setsockopt */ 1416 /* Register setsockopt */
1409 ret = nf_register_sockopt(&ip6t_sockopts); 1417 ret = nf_register_sockopt(&ip6t_sockopts);
1410 if (ret < 0) { 1418 if (ret < 0)
1411 duprintf("Unable to register sockopts.\n"); 1419 goto err5;
1412 xt_proto_fini(AF_INET6);
1413 return ret;
1414 }
1415 1420
1416 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n"); 1421 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
1417 return 0; 1422 return 0;
1423
1424err5:
1425 xt_unregister_match(&icmp6_matchstruct);
1426err4:
1427 xt_unregister_target(&ip6t_error_target);
1428err3:
1429 xt_unregister_target(&ip6t_standard_target);
1430err2:
1431 xt_proto_fini(AF_INET6);
1432err1:
1433 return ret;
1418} 1434}
1419 1435
1420static void __exit ip6_tables_fini(void) 1436static void __exit ip6_tables_fini(void)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fa1ce0ae123e..15b862d8acab 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -411,6 +411,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
411 /* Copy the address. */ 411 /* Copy the address. */
412 if (sin6) { 412 if (sin6) {
413 sin6->sin6_family = AF_INET6; 413 sin6->sin6_family = AF_INET6;
414 sin6->sin6_port = 0;
414 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); 415 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
415 sin6->sin6_flowinfo = 0; 416 sin6->sin6_flowinfo = 0;
416 sin6->sin6_scope_id = 0; 417 sin6->sin6_scope_id = 0;
@@ -780,7 +781,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
780 } 781 }
781 782
782 if (tclass < 0) { 783 if (tclass < 0) {
783 tclass = np->cork.tclass; 784 tclass = np->tclass;
784 if (tclass < 0) 785 if (tclass < 0)
785 tclass = 0; 786 tclass = 0;
786 } 787 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 87c39c978cd0..d9baca062d24 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -53,6 +53,7 @@
53#include <linux/rtnetlink.h> 53#include <linux/rtnetlink.h>
54#include <net/dst.h> 54#include <net/dst.h>
55#include <net/xfrm.h> 55#include <net/xfrm.h>
56#include <net/netevent.h>
56 57
57#include <asm/uaccess.h> 58#include <asm/uaccess.h>
58 59
@@ -742,6 +743,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
742 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 743 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
743 } 744 }
744 dst->metrics[RTAX_MTU-1] = mtu; 745 dst->metrics[RTAX_MTU-1] = mtu;
746 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
745 } 747 }
746} 748}
747 749
@@ -1155,6 +1157,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr,
1155 struct rt6_info *rt, *nrt = NULL; 1157 struct rt6_info *rt, *nrt = NULL;
1156 int strict; 1158 int strict;
1157 struct fib6_node *fn; 1159 struct fib6_node *fn;
1160 struct netevent_redirect netevent;
1158 1161
1159 /* 1162 /*
1160 * Get the "current" route for this destination and 1163 * Get the "current" route for this destination and
@@ -1252,6 +1255,10 @@ restart:
1252 if (ip6_ins_rt(nrt, NULL, NULL, NULL)) 1255 if (ip6_ins_rt(nrt, NULL, NULL, NULL))
1253 goto out; 1256 goto out;
1254 1257
1258 netevent.old = &rt->u.dst;
1259 netevent.new = &nrt->u.dst;
1260 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1261
1255 if (rt->rt6i_flags&RTF_CACHE) { 1262 if (rt->rt6i_flags&RTF_CACHE) {
1256 ip6_del_rt(rt, NULL, NULL, NULL); 1263 ip6_del_rt(rt, NULL, NULL, NULL);
1257 return; 1264 return;
@@ -1525,6 +1532,10 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1525 1532
1526static int ip6_pkt_discard(struct sk_buff *skb) 1533static int ip6_pkt_discard(struct sk_buff *skb)
1527{ 1534{
1535 int type = ipv6_addr_type(&skb->nh.ipv6h->daddr);
1536 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED)
1537 IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS);
1538
1528 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); 1539 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
1529 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev); 1540 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
1530 kfree_skb(skb); 1541 kfree_skb(skb);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c56aeece2bf5..836eecd7e62b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -380,7 +380,6 @@ static int ipip6_rcv(struct sk_buff *skb)
380 secpath_reset(skb); 380 secpath_reset(skb);
381 skb->mac.raw = skb->nh.raw; 381 skb->mac.raw = skb->nh.raw;
382 skb->nh.raw = skb->data; 382 skb->nh.raw = skb->data;
383 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
384 IPCB(skb)->flags = 0; 383 IPCB(skb)->flags = 0;
385 skb->protocol = htons(ETH_P_IPV6); 384 skb->protocol = htons(ETH_P_IPV6);
386 skb->pkt_type = PACKET_HOST; 385 skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5bdcb9002cf7..802a1a6b1037 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -270,7 +270,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
270 inet->rcv_saddr = LOOPBACK4_IPV6; 270 inet->rcv_saddr = LOOPBACK4_IPV6;
271 271
272 sk->sk_gso_type = SKB_GSO_TCPV6; 272 sk->sk_gso_type = SKB_GSO_TCPV6;
273 ip6_dst_store(sk, dst, NULL); 273 __ip6_dst_store(sk, dst, NULL);
274 274
275 icsk->icsk_ext_hdr_len = 0; 275 icsk->icsk_ext_hdr_len = 0;
276 if (np->opt) 276 if (np->opt)
@@ -427,7 +427,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
427 case TCP_SYN_RECV: /* Cannot happen. 427 case TCP_SYN_RECV: /* Cannot happen.
428 It can, it SYNs are crossed. --ANK */ 428 It can, it SYNs are crossed. --ANK */
429 if (!sock_owned_by_user(sk)) { 429 if (!sock_owned_by_user(sk)) {
430 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
431 sk->sk_err = err; 430 sk->sk_err = err;
432 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
433 432
@@ -552,6 +551,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
552 } 551 }
553} 552}
554 553
554static int tcp_v6_gso_send_check(struct sk_buff *skb)
555{
556 struct ipv6hdr *ipv6h;
557 struct tcphdr *th;
558
559 if (!pskb_may_pull(skb, sizeof(*th)))
560 return -EINVAL;
561
562 ipv6h = skb->nh.ipv6h;
563 th = skb->h.th;
564
565 th->check = 0;
566 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
567 IPPROTO_TCP, 0);
568 skb->csum = offsetof(struct tcphdr, check);
569 skb->ip_summed = CHECKSUM_HW;
570 return 0;
571}
555 572
556static void tcp_v6_send_reset(struct sk_buff *skb) 573static void tcp_v6_send_reset(struct sk_buff *skb)
557{ 574{
@@ -813,7 +830,6 @@ drop:
813 if (req) 830 if (req)
814 reqsk_free(req); 831 reqsk_free(req);
815 832
816 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
817 return 0; /* don't send reset */ 833 return 0; /* don't send reset */
818} 834}
819 835
@@ -928,8 +944,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
928 * comment in that function for the gory details. -acme 944 * comment in that function for the gory details. -acme
929 */ 945 */
930 946
931 sk->sk_gso_type = SKB_GSO_TCPV6; 947 newsk->sk_gso_type = SKB_GSO_TCPV6;
932 ip6_dst_store(newsk, dst, NULL); 948 __ip6_dst_store(newsk, dst, NULL);
933 949
934 newtcp6sk = (struct tcp6_sock *)newsk; 950 newtcp6sk = (struct tcp6_sock *)newsk;
935 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 951 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
@@ -1603,6 +1619,7 @@ struct proto tcpv6_prot = {
1603static struct inet6_protocol tcpv6_protocol = { 1619static struct inet6_protocol tcpv6_protocol = {
1604 .handler = tcp_v6_rcv, 1620 .handler = tcp_v6_rcv,
1605 .err_handler = tcp_v6_err, 1621 .err_handler = tcp_v6_err,
1622 .gso_send_check = tcp_v6_gso_send_check,
1606 .gso_segment = tcp_tso_segment, 1623 .gso_segment = tcp_tso_segment,
1607 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1624 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1608}; 1625};
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ccc57f434cd3..3d54f246411e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -782,7 +782,7 @@ do_udp_sendmsg:
782 connected = 0; 782 connected = 0;
783 } 783 }
784 784
785 err = ip6_dst_lookup(sk, &dst, fl); 785 err = ip6_sk_dst_lookup(sk, &dst, fl);
786 if (err) 786 if (err)
787 goto out; 787 goto out;
788 if (final_p) 788 if (final_p)
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 48fccb1eca08..c8c8b44a0f58 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -122,10 +122,10 @@ static int xfrm6_output_finish(struct sk_buff *skb)
122{ 122{
123 struct sk_buff *segs; 123 struct sk_buff *segs;
124 124
125 if (!skb_shinfo(skb)->gso_size) 125 if (!skb_is_gso(skb))
126 return xfrm6_output_finish2(skb); 126 return xfrm6_output_finish2(skb);
127 127
128 skb->protocol = htons(ETH_P_IP); 128 skb->protocol = htons(ETH_P_IPV6);
129 segs = skb_gso_segment(skb, 0); 129 segs = skb_gso_segment(skb, 0);
130 kfree_skb(skb); 130 kfree_skb(skb);
131 if (unlikely(IS_ERR(segs))) 131 if (unlikely(IS_ERR(segs)))
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 6b44fe8516c3..c8f9369c2a87 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -31,27 +31,6 @@
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33 33
34#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
35# define X6TDEBUG 3
36#else
37# define X6TDEBUG 1
38#endif
39
40#define X6TPRINTK(fmt, args...) printk(fmt, ## args)
41#define X6TNOPRINTK(fmt, args...) do { ; } while(0)
42
43#if X6TDEBUG >= 1
44# define X6TPRINTK1 X6TPRINTK
45#else
46# define X6TPRINTK1 X6TNOPRINTK
47#endif
48
49#if X6TDEBUG >= 3
50# define X6TPRINTK3 X6TPRINTK
51#else
52# define X6TPRINTK3 X6TNOPRINTK
53#endif
54
55/* 34/*
56 * xfrm_tunnel_spi things are for allocating unique id ("spi") 35 * xfrm_tunnel_spi things are for allocating unique id ("spi")
57 * per xfrm_address_t. 36 * per xfrm_address_t.
@@ -62,15 +41,8 @@ struct xfrm6_tunnel_spi {
62 xfrm_address_t addr; 41 xfrm_address_t addr;
63 u32 spi; 42 u32 spi;
64 atomic_t refcnt; 43 atomic_t refcnt;
65#ifdef XFRM6_TUNNEL_SPI_MAGIC
66 u32 magic;
67#endif
68}; 44};
69 45
70#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
71# define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
72#endif
73
74static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); 46static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
75 47
76static u32 xfrm6_tunnel_spi; 48static u32 xfrm6_tunnel_spi;
@@ -86,43 +58,15 @@ static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
86static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; 58static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
87static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; 59static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
88 60
89#ifdef XFRM6_TUNNEL_SPI_MAGIC
90static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
91 const char *name)
92{
93 if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) {
94 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
95 "at %p has corrupted magic %08x "
96 "(should be %08x)\n",
97 name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC);
98 return -1;
99 }
100 return 0;
101}
102#else
103static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
104 const char *name)
105{
106 return 0;
107}
108#endif
109
110#define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__)
111
112
113static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 61static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
114{ 62{
115 unsigned h; 63 unsigned h;
116 64
117 X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr);
118
119 h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]; 65 h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3];
120 h ^= h >> 16; 66 h ^= h >> 16;
121 h ^= h >> 8; 67 h ^= h >> 8;
122 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; 68 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
123 69
124 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h);
125
126 return h; 70 return h;
127} 71}
128 72
@@ -136,19 +80,13 @@ static int xfrm6_tunnel_spi_init(void)
136{ 80{
137 int i; 81 int i;
138 82
139 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
140
141 xfrm6_tunnel_spi = 0; 83 xfrm6_tunnel_spi = 0;
142 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", 84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
143 sizeof(struct xfrm6_tunnel_spi), 85 sizeof(struct xfrm6_tunnel_spi),
144 0, SLAB_HWCACHE_ALIGN, 86 0, SLAB_HWCACHE_ALIGN,
145 NULL, NULL); 87 NULL, NULL);
146 if (!xfrm6_tunnel_spi_kmem) { 88 if (!xfrm6_tunnel_spi_kmem)
147 X6TPRINTK1(KERN_ERR
148 "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n",
149 __FUNCTION__);
150 return -ENOMEM; 89 return -ENOMEM;
151 }
152 90
153 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 91 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
154 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]); 92 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
@@ -161,22 +99,16 @@ static void xfrm6_tunnel_spi_fini(void)
161{ 99{
162 int i; 100 int i;
163 101
164 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
165
166 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) { 102 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
167 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i])) 103 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
168 goto err; 104 return;
169 } 105 }
170 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) { 106 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
171 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) 107 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
172 goto err; 108 return;
173 } 109 }
174 kmem_cache_destroy(xfrm6_tunnel_spi_kmem); 110 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
175 xfrm6_tunnel_spi_kmem = NULL; 111 xfrm6_tunnel_spi_kmem = NULL;
176 return;
177err:
178 X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__);
179 return;
180} 112}
181 113
182static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 114static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
@@ -184,19 +116,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
184 struct xfrm6_tunnel_spi *x6spi; 116 struct xfrm6_tunnel_spi *x6spi;
185 struct hlist_node *pos; 117 struct hlist_node *pos;
186 118
187 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
188
189 hlist_for_each_entry(x6spi, pos, 119 hlist_for_each_entry(x6spi, pos,
190 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 120 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
191 list_byaddr) { 121 list_byaddr) {
192 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 122 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
193 X6SPI_CHECK_MAGIC(x6spi);
194 X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi);
195 return x6spi; 123 return x6spi;
196 }
197 } 124 }
198 125
199 X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__);
200 return NULL; 126 return NULL;
201} 127}
202 128
@@ -205,8 +131,6 @@ u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
205 struct xfrm6_tunnel_spi *x6spi; 131 struct xfrm6_tunnel_spi *x6spi;
206 u32 spi; 132 u32 spi;
207 133
208 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
209
210 read_lock_bh(&xfrm6_tunnel_spi_lock); 134 read_lock_bh(&xfrm6_tunnel_spi_lock);
211 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 135 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
212 spi = x6spi ? x6spi->spi : 0; 136 spi = x6spi ? x6spi->spi : 0;
@@ -223,8 +147,6 @@ static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
223 struct hlist_node *pos; 147 struct hlist_node *pos;
224 unsigned index; 148 unsigned index;
225 149
226 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
227
228 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 150 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
229 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 151 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
230 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; 152 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
@@ -258,18 +180,10 @@ try_next_2:;
258 spi = 0; 180 spi = 0;
259 goto out; 181 goto out;
260alloc_spi: 182alloc_spi:
261 X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n",
262 __FUNCTION__,
263 NIP6(*(struct in6_addr *)saddr));
264 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); 183 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
265 if (!x6spi) { 184 if (!x6spi)
266 X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n",
267 __FUNCTION__);
268 goto out; 185 goto out;
269 } 186
270#ifdef XFRM6_TUNNEL_SPI_MAGIC
271 x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC;
272#endif
273 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); 187 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
274 x6spi->spi = spi; 188 x6spi->spi = spi;
275 atomic_set(&x6spi->refcnt, 1); 189 atomic_set(&x6spi->refcnt, 1);
@@ -278,9 +192,7 @@ alloc_spi:
278 192
279 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 193 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
280 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 194 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
281 X6SPI_CHECK_MAGIC(x6spi);
282out: 195out:
283 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
284 return spi; 196 return spi;
285} 197}
286 198
@@ -289,8 +201,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
289 struct xfrm6_tunnel_spi *x6spi; 201 struct xfrm6_tunnel_spi *x6spi;
290 u32 spi; 202 u32 spi;
291 203
292 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
293
294 write_lock_bh(&xfrm6_tunnel_spi_lock); 204 write_lock_bh(&xfrm6_tunnel_spi_lock);
295 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 205 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
296 if (x6spi) { 206 if (x6spi) {
@@ -300,8 +210,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
300 spi = __xfrm6_tunnel_alloc_spi(saddr); 210 spi = __xfrm6_tunnel_alloc_spi(saddr);
301 write_unlock_bh(&xfrm6_tunnel_spi_lock); 211 write_unlock_bh(&xfrm6_tunnel_spi_lock);
302 212
303 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
304
305 return spi; 213 return spi;
306} 214}
307 215
@@ -312,8 +220,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
312 struct xfrm6_tunnel_spi *x6spi; 220 struct xfrm6_tunnel_spi *x6spi;
313 struct hlist_node *pos, *n; 221 struct hlist_node *pos, *n;
314 222
315 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
316
317 write_lock_bh(&xfrm6_tunnel_spi_lock); 223 write_lock_bh(&xfrm6_tunnel_spi_lock);
318 224
319 hlist_for_each_entry_safe(x6spi, pos, n, 225 hlist_for_each_entry_safe(x6spi, pos, n,
@@ -321,12 +227,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
321 list_byaddr) 227 list_byaddr)
322 { 228 {
323 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 229 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
324 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT
325 " found at %p\n",
326 __FUNCTION__,
327 NIP6(*(struct in6_addr *)saddr),
328 x6spi);
329 X6SPI_CHECK_MAGIC(x6spi);
330 if (atomic_dec_and_test(&x6spi->refcnt)) { 230 if (atomic_dec_and_test(&x6spi->refcnt)) {
331 hlist_del(&x6spi->list_byaddr); 231 hlist_del(&x6spi->list_byaddr);
332 hlist_del(&x6spi->list_byspi); 232 hlist_del(&x6spi->list_byspi);
@@ -377,20 +277,14 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
377 case ICMPV6_ADDR_UNREACH: 277 case ICMPV6_ADDR_UNREACH:
378 case ICMPV6_PORT_UNREACH: 278 case ICMPV6_PORT_UNREACH:
379 default: 279 default:
380 X6TPRINTK3(KERN_DEBUG
381 "xfrm6_tunnel: Destination Unreach.\n");
382 break; 280 break;
383 } 281 }
384 break; 282 break;
385 case ICMPV6_PKT_TOOBIG: 283 case ICMPV6_PKT_TOOBIG:
386 X6TPRINTK3(KERN_DEBUG
387 "xfrm6_tunnel: Packet Too Big.\n");
388 break; 284 break;
389 case ICMPV6_TIME_EXCEED: 285 case ICMPV6_TIME_EXCEED:
390 switch (code) { 286 switch (code) {
391 case ICMPV6_EXC_HOPLIMIT: 287 case ICMPV6_EXC_HOPLIMIT:
392 X6TPRINTK3(KERN_DEBUG
393 "xfrm6_tunnel: Too small Hoplimit.\n");
394 break; 288 break;
395 case ICMPV6_EXC_FRAGTIME: 289 case ICMPV6_EXC_FRAGTIME:
396 default: 290 default:
@@ -447,22 +341,14 @@ static struct xfrm6_tunnel xfrm6_tunnel_handler = {
447 341
448static int __init xfrm6_tunnel_init(void) 342static int __init xfrm6_tunnel_init(void)
449{ 343{
450 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); 344 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
451
452 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) {
453 X6TPRINTK1(KERN_ERR
454 "xfrm6_tunnel init: can't add xfrm type\n");
455 return -EAGAIN; 345 return -EAGAIN;
456 } 346
457 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) { 347 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) {
458 X6TPRINTK1(KERN_ERR
459 "xfrm6_tunnel init(): can't add handler\n");
460 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 348 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
461 return -EAGAIN; 349 return -EAGAIN;
462 } 350 }
463 if (xfrm6_tunnel_spi_init() < 0) { 351 if (xfrm6_tunnel_spi_init() < 0) {
464 X6TPRINTK1(KERN_ERR
465 "xfrm6_tunnel init: failed to initialize spi\n");
466 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler); 352 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
467 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 353 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
468 return -EAGAIN; 354 return -EAGAIN;
@@ -472,15 +358,9 @@ static int __init xfrm6_tunnel_init(void)
472 358
473static void __exit xfrm6_tunnel_fini(void) 359static void __exit xfrm6_tunnel_fini(void)
474{ 360{
475 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
476
477 xfrm6_tunnel_spi_fini(); 361 xfrm6_tunnel_spi_fini();
478 if (xfrm6_tunnel_deregister(&xfrm6_tunnel_handler)) 362 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler);
479 X6TPRINTK1(KERN_ERR 363 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
480 "xfrm6_tunnel close: can't remove handler\n");
481 if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0)
482 X6TPRINTK1(KERN_ERR
483 "xfrm6_tunnel close: can't remove xfrm type\n");
484} 364}
485 365
486module_init(xfrm6_tunnel_init); 366module_init(xfrm6_tunnel_init);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index aa34ff4b707c..bef3f61569f7 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
1642 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1642 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1643 goto out; 1643 goto out;
1644 1644
1645 ipx = ipx_hdr(skb); 1645 if (!pskb_may_pull(skb, sizeof(struct ipxhdr)))
1646 ipx_pktsize = ntohs(ipx->ipx_pktsize); 1646 goto drop;
1647
1648 ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize);
1647 1649
1648 /* Too small or invalid header? */ 1650 /* Too small or invalid header? */
1649 if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len) 1651 if (ipx_pktsize < sizeof(struct ipxhdr) ||
1652 !pskb_may_pull(skb, ipx_pktsize))
1650 goto drop; 1653 goto drop;
1651 1654
1655 ipx = ipx_hdr(skb);
1652 if (ipx->ipx_checksum != IPX_NO_CHECKSUM && 1656 if (ipx->ipx_checksum != IPX_NO_CHECKSUM &&
1653 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) 1657 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize))
1654 goto drop; 1658 goto drop;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 7fae48a53bff..17699eeb64d7 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -308,7 +308,7 @@ static void irda_connect_response(struct irda_sock *self)
308 308
309 IRDA_ASSERT(self != NULL, return;); 309 IRDA_ASSERT(self != NULL, return;);
310 310
311 skb = dev_alloc_skb(64); 311 skb = alloc_skb(64, GFP_ATOMIC);
312 if (skb == NULL) { 312 if (skb == NULL) {
313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", 313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n",
314 __FUNCTION__); 314 __FUNCTION__);
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 9c4a902a9dba..ad6b6af3dd97 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -115,12 +115,10 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line)
115 115
116 IRDA_ASSERT(ircomm != NULL, return NULL;); 116 IRDA_ASSERT(ircomm != NULL, return NULL;);
117 117
118 self = kmalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); 118 self = kzalloc(sizeof(struct ircomm_cb), GFP_ATOMIC);
119 if (self == NULL) 119 if (self == NULL)
120 return NULL; 120 return NULL;
121 121
122 memset(self, 0, sizeof(struct ircomm_cb));
123
124 self->notify = *notify; 122 self->notify = *notify;
125 self->magic = IRCOMM_MAGIC; 123 self->magic = IRCOMM_MAGIC;
126 124
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index d9097207aed3..959874b6451f 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -81,7 +81,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
81 81
82 /* Any userdata supplied? */ 82 /* Any userdata supplied? */
83 if (userdata == NULL) { 83 if (userdata == NULL) {
84 tx_skb = dev_alloc_skb(64); 84 tx_skb = alloc_skb(64, GFP_ATOMIC);
85 if (!tx_skb) 85 if (!tx_skb)
86 return -ENOMEM; 86 return -ENOMEM;
87 87
@@ -115,7 +115,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
116 116
117 if (!userdata) { 117 if (!userdata) {
118 tx_skb = dev_alloc_skb(64); 118 tx_skb = alloc_skb(64, GFP_ATOMIC);
119 if (!tx_skb) 119 if (!tx_skb)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index 6009bab05091..a39f5735a90b 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -121,7 +121,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
121 121
122 skb = self->ctrl_skb; 122 skb = self->ctrl_skb;
123 if (!skb) { 123 if (!skb) {
124 skb = dev_alloc_skb(256); 124 skb = alloc_skb(256, GFP_ATOMIC);
125 if (!skb) { 125 if (!skb) {
126 spin_unlock_irqrestore(&self->spinlock, flags); 126 spin_unlock_irqrestore(&self->spinlock, flags);
127 return -ENOMEM; 127 return -ENOMEM;
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index b400f27851fc..3bcdb467efc5 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -379,12 +379,11 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
379 self = hashbin_lock_find(ircomm_tty, line, NULL); 379 self = hashbin_lock_find(ircomm_tty, line, NULL);
380 if (!self) { 380 if (!self) {
381 /* No, so make new instance */ 381 /* No, so make new instance */
382 self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); 382 self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
383 if (self == NULL) { 383 if (self == NULL) {
384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); 384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__);
385 return -ENOMEM; 385 return -ENOMEM;
386 } 386 }
387 memset(self, 0, sizeof(struct ircomm_tty_cb));
388 387
389 self->magic = IRCOMM_TTY_MAGIC; 388 self->magic = IRCOMM_TTY_MAGIC;
390 self->flow = FLOW_STOP; 389 self->flow = FLOW_STOP;
@@ -759,8 +758,9 @@ static int ircomm_tty_write(struct tty_struct *tty,
759 } 758 }
760 } else { 759 } else {
761 /* Prepare a full sized frame */ 760 /* Prepare a full sized frame */
762 skb = dev_alloc_skb(self->max_data_size+ 761 skb = alloc_skb(self->max_data_size+
763 self->max_header_size); 762 self->max_header_size,
763 GFP_ATOMIC);
764 if (!skb) { 764 if (!skb) {
765 spin_unlock_irqrestore(&self->spinlock, flags); 765 spin_unlock_irqrestore(&self->spinlock, flags);
766 return -ENOBUFS; 766 return -ENOBUFS;
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index ba40e5495f58..7e7a31798d8d 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -401,12 +401,10 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
401 } 401 }
402 402
403 /* Allocate dongle info for this instance */ 403 /* Allocate dongle info for this instance */
404 dongle = kmalloc(sizeof(dongle_t), GFP_KERNEL); 404 dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL);
405 if (!dongle) 405 if (!dongle)
406 goto out; 406 goto out;
407 407
408 memset(dongle, 0, sizeof(dongle_t));
409
410 /* Bind the registration info to this particular instance */ 408 /* Bind the registration info to this particular instance */
411 dongle->issue = reg; 409 dongle->issue = reg;
412 dongle->dev = dev; 410 dongle->dev = dev;
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index a0472652a44e..61128aa05b40 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -345,7 +345,7 @@ static void iriap_disconnect_request(struct iriap_cb *self)
345 IRDA_ASSERT(self != NULL, return;); 345 IRDA_ASSERT(self != NULL, return;);
346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
347 347
348 tx_skb = dev_alloc_skb(64); 348 tx_skb = alloc_skb(64, GFP_ATOMIC);
349 if (tx_skb == NULL) { 349 if (tx_skb == NULL) {
350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", 350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n",
351 __FUNCTION__, 64); 351 __FUNCTION__, 64);
@@ -396,7 +396,7 @@ int iriap_getvaluebyclass_request(struct iriap_cb *self,
396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */ 396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */
397 397
398 skb_len = self->max_header_size+2+name_len+1+attr_len+4; 398 skb_len = self->max_header_size+2+name_len+1+attr_len+4;
399 tx_skb = dev_alloc_skb(skb_len); 399 tx_skb = alloc_skb(skb_len, GFP_ATOMIC);
400 if (!tx_skb) 400 if (!tx_skb)
401 return -ENOMEM; 401 return -ENOMEM;
402 402
@@ -562,7 +562,8 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
562 * value. We add 32 bytes because of the 6 bytes for the frame and 562 * value. We add 32 bytes because of the 6 bytes for the frame and
563 * max 5 bytes for the value coding. 563 * max 5 bytes for the value coding.
564 */ 564 */
565 tx_skb = dev_alloc_skb(value->len + self->max_header_size + 32); 565 tx_skb = alloc_skb(value->len + self->max_header_size + 32,
566 GFP_ATOMIC);
566 if (!tx_skb) 567 if (!tx_skb)
567 return; 568 return;
568 569
@@ -700,7 +701,7 @@ void iriap_send_ack(struct iriap_cb *self)
700 IRDA_ASSERT(self != NULL, return;); 701 IRDA_ASSERT(self != NULL, return;);
701 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 702 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
702 703
703 tx_skb = dev_alloc_skb(64); 704 tx_skb = alloc_skb(64, GFP_ATOMIC);
704 if (!tx_skb) 705 if (!tx_skb)
705 return; 706 return;
706 707
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index a73607450de1..da17395df05a 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -365,7 +365,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
365 365
366 switch (event) { 366 switch (event) {
367 case IAP_LM_CONNECT_INDICATION: 367 case IAP_LM_CONNECT_INDICATION:
368 tx_skb = dev_alloc_skb(64); 368 tx_skb = alloc_skb(64, GFP_ATOMIC);
369 if (tx_skb == NULL) { 369 if (tx_skb == NULL) {
370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); 370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__);
371 return; 371 return;
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 82e665c79991..a154b1d71c0f 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -82,13 +82,12 @@ struct ias_object *irias_new_object( char *name, int id)
82 82
83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
84 84
85 obj = kmalloc(sizeof(struct ias_object), GFP_ATOMIC); 85 obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
86 if (obj == NULL) { 86 if (obj == NULL) {
87 IRDA_WARNING("%s(), Unable to allocate object!\n", 87 IRDA_WARNING("%s(), Unable to allocate object!\n",
88 __FUNCTION__); 88 __FUNCTION__);
89 return NULL; 89 return NULL;
90 } 90 }
91 memset(obj, 0, sizeof( struct ias_object));
92 91
93 obj->magic = IAS_OBJECT_MAGIC; 92 obj->magic = IAS_OBJECT_MAGIC;
94 obj->name = strndup(name, IAS_MAX_CLASSNAME); 93 obj->name = strndup(name, IAS_MAX_CLASSNAME);
@@ -346,13 +345,12 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
346 IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); 345 IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
347 IRDA_ASSERT(name != NULL, return;); 346 IRDA_ASSERT(name != NULL, return;);
348 347
349 attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 348 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
350 if (attrib == NULL) { 349 if (attrib == NULL) {
351 IRDA_WARNING("%s: Unable to allocate attribute!\n", 350 IRDA_WARNING("%s: Unable to allocate attribute!\n",
352 __FUNCTION__); 351 __FUNCTION__);
353 return; 352 return;
354 } 353 }
355 memset(attrib, 0, sizeof( struct ias_attrib));
356 354
357 attrib->magic = IAS_ATTRIB_MAGIC; 355 attrib->magic = IAS_ATTRIB_MAGIC;
358 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 356 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -382,13 +380,12 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
382 IRDA_ASSERT(name != NULL, return;); 380 IRDA_ASSERT(name != NULL, return;);
383 IRDA_ASSERT(octets != NULL, return;); 381 IRDA_ASSERT(octets != NULL, return;);
384 382
385 attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 383 attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
386 if (attrib == NULL) { 384 if (attrib == NULL) {
387 IRDA_WARNING("%s: Unable to allocate attribute!\n", 385 IRDA_WARNING("%s: Unable to allocate attribute!\n",
388 __FUNCTION__); 386 __FUNCTION__);
389 return; 387 return;
390 } 388 }
391 memset(attrib, 0, sizeof( struct ias_attrib));
392 389
393 attrib->magic = IAS_ATTRIB_MAGIC; 390 attrib->magic = IAS_ATTRIB_MAGIC;
394 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 391 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -416,13 +413,12 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
416 IRDA_ASSERT(name != NULL, return;); 413 IRDA_ASSERT(name != NULL, return;);
417 IRDA_ASSERT(value != NULL, return;); 414 IRDA_ASSERT(value != NULL, return;);
418 415
419 attrib = kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC); 416 attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
420 if (attrib == NULL) { 417 if (attrib == NULL) {
421 IRDA_WARNING("%s: Unable to allocate attribute!\n", 418 IRDA_WARNING("%s: Unable to allocate attribute!\n",
422 __FUNCTION__); 419 __FUNCTION__);
423 return; 420 return;
424 } 421 }
425 memset(attrib, 0, sizeof( struct ias_attrib));
426 422
427 attrib->magic = IAS_ATTRIB_MAGIC; 423 attrib->magic = IAS_ATTRIB_MAGIC;
428 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 424 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
@@ -443,12 +439,11 @@ struct ias_value *irias_new_integer_value(int integer)
443{ 439{
444 struct ias_value *value; 440 struct ias_value *value;
445 441
446 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 442 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
447 if (value == NULL) { 443 if (value == NULL) {
448 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 444 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
449 return NULL; 445 return NULL;
450 } 446 }
451 memset(value, 0, sizeof(struct ias_value));
452 447
453 value->type = IAS_INTEGER; 448 value->type = IAS_INTEGER;
454 value->len = 4; 449 value->len = 4;
@@ -469,12 +464,11 @@ struct ias_value *irias_new_string_value(char *string)
469{ 464{
470 struct ias_value *value; 465 struct ias_value *value;
471 466
472 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 467 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
473 if (value == NULL) { 468 if (value == NULL) {
474 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 469 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
475 return NULL; 470 return NULL;
476 } 471 }
477 memset( value, 0, sizeof( struct ias_value));
478 472
479 value->type = IAS_STRING; 473 value->type = IAS_STRING;
480 value->charset = CS_ASCII; 474 value->charset = CS_ASCII;
@@ -495,12 +489,11 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
495{ 489{
496 struct ias_value *value; 490 struct ias_value *value;
497 491
498 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 492 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
499 if (value == NULL) { 493 if (value == NULL) {
500 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 494 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
501 return NULL; 495 return NULL;
502 } 496 }
503 memset(value, 0, sizeof(struct ias_value));
504 497
505 value->type = IAS_OCT_SEQ; 498 value->type = IAS_OCT_SEQ;
506 /* Check length */ 499 /* Check length */
@@ -522,12 +515,11 @@ struct ias_value *irias_new_missing_value(void)
522{ 515{
523 struct ias_value *value; 516 struct ias_value *value;
524 517
525 value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 518 value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
526 if (value == NULL) { 519 if (value == NULL) {
527 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 520 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
528 return NULL; 521 return NULL;
529 } 522 }
530 memset(value, 0, sizeof(struct ias_value));
531 523
532 value->type = IAS_MISSING; 524 value->type = IAS_MISSING;
533 value->len = 0; 525 value->len = 0;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index bd659dd545ac..7dd0a2fe1d20 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -636,7 +636,7 @@ void irlan_get_provider_info(struct irlan_cb *self)
636 IRDA_ASSERT(self != NULL, return;); 636 IRDA_ASSERT(self != NULL, return;);
637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
638 638
639 skb = dev_alloc_skb(64); 639 skb = alloc_skb(64, GFP_ATOMIC);
640 if (!skb) 640 if (!skb)
641 return; 641 return;
642 642
@@ -668,7 +668,7 @@ void irlan_open_data_channel(struct irlan_cb *self)
668 IRDA_ASSERT(self != NULL, return;); 668 IRDA_ASSERT(self != NULL, return;);
669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
670 670
671 skb = dev_alloc_skb(64); 671 skb = alloc_skb(64, GFP_ATOMIC);
672 if (!skb) 672 if (!skb)
673 return; 673 return;
674 674
@@ -704,7 +704,7 @@ void irlan_close_data_channel(struct irlan_cb *self)
704 if (self->client.tsap_ctrl == NULL) 704 if (self->client.tsap_ctrl == NULL)
705 return; 705 return;
706 706
707 skb = dev_alloc_skb(64); 707 skb = alloc_skb(64, GFP_ATOMIC);
708 if (!skb) 708 if (!skb)
709 return; 709 return;
710 710
@@ -739,7 +739,7 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
739 IRDA_ASSERT(self != NULL, return;); 739 IRDA_ASSERT(self != NULL, return;);
740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
741 741
742 skb = dev_alloc_skb(128); 742 skb = alloc_skb(128, GFP_ATOMIC);
743 if (!skb) 743 if (!skb)
744 return; 744 return;
745 745
@@ -777,7 +777,7 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
777 IRDA_ASSERT(self != NULL, return;); 777 IRDA_ASSERT(self != NULL, return;);
778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
779 779
780 skb = dev_alloc_skb(128); 780 skb = alloc_skb(128, GFP_ATOMIC);
781 if (!skb) 781 if (!skb)
782 return; 782 return;
783 783
@@ -816,7 +816,7 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
816 IRDA_ASSERT(self != NULL, return;); 816 IRDA_ASSERT(self != NULL, return;);
817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
818 818
819 skb = dev_alloc_skb(128); 819 skb = alloc_skb(128, GFP_ATOMIC);
820 if (!skb) 820 if (!skb)
821 return; 821 return;
822 822
@@ -856,7 +856,7 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
856 IRDA_ASSERT(self != NULL, return;); 856 IRDA_ASSERT(self != NULL, return;);
857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
858 858
859 skb = dev_alloc_skb(128); 859 skb = alloc_skb(128, GFP_ATOMIC);
860 if (!skb) 860 if (!skb)
861 return; 861 return;
862 862
@@ -891,7 +891,7 @@ void irlan_get_media_char(struct irlan_cb *self)
891 IRDA_ASSERT(self != NULL, return;); 891 IRDA_ASSERT(self != NULL, return;);
892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
893 893
894 skb = dev_alloc_skb(64); 894 skb = alloc_skb(64, GFP_ATOMIC);
895 if (!skb) 895 if (!skb)
896 return; 896 return;
897 897
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 39c202d1c374..9c0df86044d7 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -296,7 +296,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
296 IRDA_ASSERT(self != NULL, return;); 296 IRDA_ASSERT(self != NULL, return;);
297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
298 298
299 skb = dev_alloc_skb(128); 299 skb = alloc_skb(128, GFP_ATOMIC);
300 if (!skb) 300 if (!skb)
301 return; 301 return;
302 302
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index cade355ac8af..e7852a07495e 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -116,11 +116,10 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
117 117
118 /* Initialize the irlap structure. */ 118 /* Initialize the irlap structure. */
119 self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL); 119 self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
120 if (self == NULL) 120 if (self == NULL)
121 return NULL; 121 return NULL;
122 122
123 memset(self, 0, sizeof(struct irlap_cb));
124 self->magic = LAP_MAGIC; 123 self->magic = LAP_MAGIC;
125 124
126 /* Make a binding between the layers */ 125 /* Make a binding between the layers */
@@ -882,7 +881,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
882 /* Change speed now, or just piggyback speed on frames */ 881 /* Change speed now, or just piggyback speed on frames */
883 if (now) { 882 if (now) {
884 /* Send down empty frame to trigger speed change */ 883 /* Send down empty frame to trigger speed change */
885 skb = dev_alloc_skb(0); 884 skb = alloc_skb(0, GFP_ATOMIC);
886 if (skb) 885 if (skb)
887 irlap_queue_xmit(self, skb); 886 irlap_queue_xmit(self, skb);
888 } 887 }
@@ -1222,7 +1221,7 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
1222{ 1221{
1223 struct seq_file *seq; 1222 struct seq_file *seq;
1224 int rc = -ENOMEM; 1223 int rc = -ENOMEM;
1225 struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1224 struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1226 1225
1227 if (!s) 1226 if (!s)
1228 goto out; 1227 goto out;
@@ -1238,7 +1237,6 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
1238 1237
1239 seq = file->private_data; 1238 seq = file->private_data;
1240 seq->private = s; 1239 seq->private = s;
1241 memset(s, 0, sizeof(*s));
1242out: 1240out:
1243 return rc; 1241 return rc;
1244out_kfree: 1242out_kfree:
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 3e9a06abbdd0..ccb983bf0f4a 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -117,7 +117,7 @@ void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
118 118
119 /* Allocate frame */ 119 /* Allocate frame */
120 tx_skb = dev_alloc_skb(64); 120 tx_skb = alloc_skb(64, GFP_ATOMIC);
121 if (!tx_skb) 121 if (!tx_skb)
122 return; 122 return;
123 123
@@ -210,7 +210,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
211 211
212 /* Allocate frame */ 212 /* Allocate frame */
213 tx_skb = dev_alloc_skb(64); 213 tx_skb = alloc_skb(64, GFP_ATOMIC);
214 if (!tx_skb) 214 if (!tx_skb)
215 return; 215 return;
216 216
@@ -250,7 +250,7 @@ void irlap_send_dm_frame( struct irlap_cb *self)
250 IRDA_ASSERT(self != NULL, return;); 250 IRDA_ASSERT(self != NULL, return;);
251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
252 252
253 tx_skb = dev_alloc_skb(32); 253 tx_skb = alloc_skb(32, GFP_ATOMIC);
254 if (!tx_skb) 254 if (!tx_skb)
255 return; 255 return;
256 256
@@ -282,7 +282,7 @@ void irlap_send_disc_frame(struct irlap_cb *self)
282 IRDA_ASSERT(self != NULL, return;); 282 IRDA_ASSERT(self != NULL, return;);
283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
284 284
285 tx_skb = dev_alloc_skb(16); 285 tx_skb = alloc_skb(16, GFP_ATOMIC);
286 if (!tx_skb) 286 if (!tx_skb)
287 return; 287 return;
288 288
@@ -315,7 +315,7 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
316 IRDA_ASSERT(discovery != NULL, return;); 316 IRDA_ASSERT(discovery != NULL, return;);
317 317
318 tx_skb = dev_alloc_skb(64); 318 tx_skb = alloc_skb(64, GFP_ATOMIC);
319 if (!tx_skb) 319 if (!tx_skb)
320 return; 320 return;
321 321
@@ -422,11 +422,10 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
422 return; 422 return;
423 } 423 }
424 424
425 if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { 425 if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
426 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__); 426 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__);
427 return; 427 return;
428 } 428 }
429 memset(discovery, 0, sizeof(discovery_t));
430 429
431 discovery->data.daddr = info->daddr; 430 discovery->data.daddr = info->daddr;
432 discovery->data.saddr = self->saddr; 431 discovery->data.saddr = self->saddr;
@@ -576,7 +575,7 @@ void irlap_send_rr_frame(struct irlap_cb *self, int command)
576 struct sk_buff *tx_skb; 575 struct sk_buff *tx_skb;
577 __u8 *frame; 576 __u8 *frame;
578 577
579 tx_skb = dev_alloc_skb(16); 578 tx_skb = alloc_skb(16, GFP_ATOMIC);
580 if (!tx_skb) 579 if (!tx_skb)
581 return; 580 return;
582 581
@@ -601,7 +600,7 @@ void irlap_send_rd_frame(struct irlap_cb *self)
601 struct sk_buff *tx_skb; 600 struct sk_buff *tx_skb;
602 __u8 *frame; 601 __u8 *frame;
603 602
604 tx_skb = dev_alloc_skb(16); 603 tx_skb = alloc_skb(16, GFP_ATOMIC);
605 if (!tx_skb) 604 if (!tx_skb)
606 return; 605 return;
607 606
@@ -1215,7 +1214,7 @@ void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
1215 struct test_frame *frame; 1214 struct test_frame *frame;
1216 __u8 *info; 1215 __u8 *info;
1217 1216
1218 tx_skb = dev_alloc_skb(cmd->len+sizeof(struct test_frame)); 1217 tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC);
1219 if (!tx_skb) 1218 if (!tx_skb)
1220 return; 1219 return;
1221 1220
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 129ad64c15bb..c440913dee14 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -78,10 +78,9 @@ int __init irlmp_init(void)
78{ 78{
79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
80 /* Initialize the irlmp structure. */ 80 /* Initialize the irlmp structure. */
81 irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL); 81 irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
82 if (irlmp == NULL) 82 if (irlmp == NULL)
83 return -ENOMEM; 83 return -ENOMEM;
84 memset(irlmp, 0, sizeof(struct irlmp_cb));
85 84
86 irlmp->magic = LMP_MAGIC; 85 irlmp->magic = LMP_MAGIC;
87 86
@@ -160,12 +159,11 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
160 return NULL; 159 return NULL;
161 160
162 /* Allocate new instance of a LSAP connection */ 161 /* Allocate new instance of a LSAP connection */
163 self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC); 162 self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
164 if (self == NULL) { 163 if (self == NULL) {
165 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__); 164 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__);
166 return NULL; 165 return NULL;
167 } 166 }
168 memset(self, 0, sizeof(struct lsap_cb));
169 167
170 self->magic = LMP_LSAP_MAGIC; 168 self->magic = LMP_LSAP_MAGIC;
171 self->slsap_sel = slsap_sel; 169 self->slsap_sel = slsap_sel;
@@ -288,12 +286,11 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
288 /* 286 /*
289 * Allocate new instance of a LSAP connection 287 * Allocate new instance of a LSAP connection
290 */ 288 */
291 lap = kmalloc(sizeof(struct lap_cb), GFP_KERNEL); 289 lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL);
292 if (lap == NULL) { 290 if (lap == NULL) {
293 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__); 291 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__);
294 return; 292 return;
295 } 293 }
296 memset(lap, 0, sizeof(struct lap_cb));
297 294
298 lap->irlap = irlap; 295 lap->irlap = irlap;
299 lap->magic = LMP_LAP_MAGIC; 296 lap->magic = LMP_LAP_MAGIC;
@@ -395,7 +392,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
395 392
396 /* Any userdata? */ 393 /* Any userdata? */
397 if (tx_skb == NULL) { 394 if (tx_skb == NULL) {
398 tx_skb = dev_alloc_skb(64); 395 tx_skb = alloc_skb(64, GFP_ATOMIC);
399 if (!tx_skb) 396 if (!tx_skb)
400 return -ENOMEM; 397 return -ENOMEM;
401 398
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index e53bf9e0053e..a1e502ff9070 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -476,11 +476,10 @@ dev_irnet_open(struct inode * inode,
476#endif /* SECURE_DEVIRNET */ 476#endif /* SECURE_DEVIRNET */
477 477
478 /* Allocate a private structure for this IrNET instance */ 478 /* Allocate a private structure for this IrNET instance */
479 ap = kmalloc(sizeof(*ap), GFP_KERNEL); 479 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
480 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); 480 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n");
481 481
482 /* initialize the irnet structure */ 482 /* initialize the irnet structure */
483 memset(ap, 0, sizeof(*ap));
484 ap->file = file; 483 ap->file = file;
485 484
486 /* PPP channel setup */ 485 /* PPP channel setup */
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 49c51c5f1a86..42acf1cde737 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -85,10 +85,9 @@ static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
85 */ 85 */
86int __init irttp_init(void) 86int __init irttp_init(void)
87{ 87{
88 irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); 88 irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL);
89 if (irttp == NULL) 89 if (irttp == NULL)
90 return -ENOMEM; 90 return -ENOMEM;
91 memset(irttp, 0, sizeof(struct irttp_cb));
92 91
93 irttp->magic = TTP_MAGIC; 92 irttp->magic = TTP_MAGIC;
94 93
@@ -306,7 +305,8 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
306 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); 305 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__);
307 306
308 /* Make new segment */ 307 /* Make new segment */
309 frag = dev_alloc_skb(self->max_seg_size+self->max_header_size); 308 frag = alloc_skb(self->max_seg_size+self->max_header_size,
309 GFP_ATOMIC);
310 if (!frag) 310 if (!frag)
311 return; 311 return;
312 312
@@ -389,12 +389,11 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
389 return NULL; 389 return NULL;
390 } 390 }
391 391
392 self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 392 self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
393 if (self == NULL) { 393 if (self == NULL) {
394 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); 394 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__);
395 return NULL; 395 return NULL;
396 } 396 }
397 memset(self, 0, sizeof(struct tsap_cb));
398 spin_lock_init(&self->lock); 397 spin_lock_init(&self->lock);
399 398
400 /* Initialise todo timer */ 399 /* Initialise todo timer */
@@ -805,7 +804,7 @@ static inline void irttp_give_credit(struct tsap_cb *self)
805 self->send_credit, self->avail_credit, self->remote_credit); 804 self->send_credit, self->avail_credit, self->remote_credit);
806 805
807 /* Give credit to peer */ 806 /* Give credit to peer */
808 tx_skb = dev_alloc_skb(64); 807 tx_skb = alloc_skb(64, GFP_ATOMIC);
809 if (!tx_skb) 808 if (!tx_skb)
810 return; 809 return;
811 810
@@ -1094,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
1094 1093
1095 /* Any userdata supplied? */ 1094 /* Any userdata supplied? */
1096 if (userdata == NULL) { 1095 if (userdata == NULL) {
1097 tx_skb = dev_alloc_skb(64); 1096 tx_skb = alloc_skb(64, GFP_ATOMIC);
1098 if (!tx_skb) 1097 if (!tx_skb)
1099 return -ENOMEM; 1098 return -ENOMEM;
1100 1099
@@ -1342,7 +1341,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
1342 1341
1343 /* Any userdata supplied? */ 1342 /* Any userdata supplied? */
1344 if (userdata == NULL) { 1343 if (userdata == NULL) {
1345 tx_skb = dev_alloc_skb(64); 1344 tx_skb = alloc_skb(64, GFP_ATOMIC);
1346 if (!tx_skb) 1345 if (!tx_skb)
1347 return -ENOMEM; 1346 return -ENOMEM;
1348 1347
@@ -1541,7 +1540,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1541 1540
1542 if (!userdata) { 1541 if (!userdata) {
1543 struct sk_buff *tx_skb; 1542 struct sk_buff *tx_skb;
1544 tx_skb = dev_alloc_skb(64); 1543 tx_skb = alloc_skb(64, GFP_ATOMIC);
1545 if (!tx_skb) 1544 if (!tx_skb)
1546 return -ENOMEM; 1545 return -ENOMEM;
1547 1546
@@ -1876,7 +1875,7 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
1876 int rc = -ENOMEM; 1875 int rc = -ENOMEM;
1877 struct irttp_iter_state *s; 1876 struct irttp_iter_state *s;
1878 1877
1879 s = kmalloc(sizeof(*s), GFP_KERNEL); 1878 s = kzalloc(sizeof(*s), GFP_KERNEL);
1880 if (!s) 1879 if (!s)
1881 goto out; 1880 goto out;
1882 1881
@@ -1886,7 +1885,6 @@ static int irttp_seq_open(struct inode *inode, struct file *file)
1886 1885
1887 seq = file->private_data; 1886 seq = file->private_data;
1888 seq->private = s; 1887 seq->private = s;
1889 memset(s, 0, sizeof(*s));
1890out: 1888out:
1891 return rc; 1889 return rc;
1892out_kfree: 1890out_kfree:
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index aea6616cea3d..7e6bc41eeb21 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -115,14 +115,12 @@ static struct lapb_cb *lapb_devtostruct(struct net_device *dev)
115 */ 115 */
116static struct lapb_cb *lapb_create_cb(void) 116static struct lapb_cb *lapb_create_cb(void)
117{ 117{
118 struct lapb_cb *lapb = kmalloc(sizeof(*lapb), GFP_ATOMIC); 118 struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC);
119 119
120 120
121 if (!lapb) 121 if (!lapb)
122 goto out; 122 goto out;
123 123
124 memset(lapb, 0x00, sizeof(*lapb));
125
126 skb_queue_head_init(&lapb->write_queue); 124 skb_queue_head_init(&lapb->write_queue);
127 skb_queue_head_init(&lapb->ack_queue); 125 skb_queue_head_init(&lapb->ack_queue);
128 126
@@ -240,11 +238,13 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
240 goto out_put; 238 goto out_put;
241 239
242 if (lapb->state == LAPB_STATE_0) { 240 if (lapb->state == LAPB_STATE_0) {
243 if (((parms->mode & LAPB_EXTENDED) && 241 if (parms->mode & LAPB_EXTENDED) {
244 (parms->window < 1 || parms->window > 127)) || 242 if (parms->window < 1 || parms->window > 127)
245 (parms->window < 1 || parms->window > 7)) 243 goto out_put;
246 goto out_put; 244 } else {
247 245 if (parms->window < 1 || parms->window > 7)
246 goto out_put;
247 }
248 lapb->mode = parms->mode; 248 lapb->mode = parms->mode;
249 lapb->window = parms->window; 249 lapb->window = parms->window;
250 } 250 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index d6cfe84d521b..2652ead96c64 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -784,24 +784,20 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
784 copied += used; 784 copied += used;
785 len -= used; 785 len -= used;
786 786
787 if (used + offset < skb->len)
788 continue;
789
790 if (!(flags & MSG_PEEK)) { 787 if (!(flags & MSG_PEEK)) {
791 sk_eat_skb(sk, skb, 0); 788 sk_eat_skb(sk, skb, 0);
792 *seq = 0; 789 *seq = 0;
793 } 790 }
791
792 /* For non stream protcols we get one packet per recvmsg call */
793 if (sk->sk_type != SOCK_STREAM)
794 goto copy_uaddr;
795
796 /* Partial read */
797 if (used + offset < skb->len)
798 continue;
794 } while (len > 0); 799 } while (len > 0);
795 800
796 /*
797 * According to UNIX98, msg_name/msg_namelen are ignored
798 * on connected socket. -ANK
799 * But... af_llc still doesn't have separate sets of methods for
800 * SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will
801 * eventually fix this tho :-) -acme
802 */
803 if (sk->sk_type == SOCK_DGRAM)
804 goto copy_uaddr;
805out: 801out:
806 release_sock(sk); 802 release_sock(sk);
807 return copied; 803 return copied;
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index bd242a49514a..d12413cff5bd 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -33,10 +33,9 @@ unsigned char llc_station_mac_sa[ETH_ALEN];
33 */ 33 */
34static struct llc_sap *llc_sap_alloc(void) 34static struct llc_sap *llc_sap_alloc(void)
35{ 35{
36 struct llc_sap *sap = kmalloc(sizeof(*sap), GFP_ATOMIC); 36 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
37 37
38 if (sap) { 38 if (sap) {
39 memset(sap, 0, sizeof(*sap));
40 sap->state = LLC_SAP_STATE_ACTIVE; 39 sap->state = LLC_SAP_STATE_ACTIVE;
41 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN); 40 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN);
42 rwlock_init(&sap->sk_list.lock); 41 rwlock_init(&sap->sk_list.lock);
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 20c4eb5c1ac6..61cb8cf7d153 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -51,10 +51,10 @@ void llc_save_primitive(struct sock *sk, struct sk_buff* skb, u8 prim)
51{ 51{
52 struct sockaddr_llc *addr; 52 struct sockaddr_llc *addr;
53 53
54 if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */
55 return;
56 /* save primitive for use by the user. */ 54 /* save primitive for use by the user. */
57 addr = llc_ui_skb_cb(skb); 55 addr = llc_ui_skb_cb(skb);
56
57 memset(addr, 0, sizeof(*addr));
58 addr->sllc_family = sk->sk_family; 58 addr->sllc_family = sk->sk_family;
59 addr->sllc_arphrd = skb->dev->type; 59 addr->sllc_arphrd = skb->dev->type;
60 addr->sllc_test = prim == LLC_TEST_PRIM; 60 addr->sllc_test = prim == LLC_TEST_PRIM;
@@ -330,6 +330,9 @@ static void llc_sap_mcast(struct llc_sap *sap,
330 if (llc->laddr.lsap != laddr->lsap) 330 if (llc->laddr.lsap != laddr->lsap)
331 continue; 331 continue;
332 332
333 if (llc->dev != skb->dev)
334 continue;
335
333 skb1 = skb_clone(skb, GFP_ATOMIC); 336 skb1 = skb_clone(skb, GFP_ATOMIC);
334 if (!skb1) 337 if (!skb1)
335 break; 338 break;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 42a178aa30f9..a9894ddfd72a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -386,8 +386,8 @@ config NETFILTER_XT_MATCH_REALM
386 <file:Documentation/modules.txt>. If unsure, say `N'. 386 <file:Documentation/modules.txt>. If unsure, say `N'.
387 387
388config NETFILTER_XT_MATCH_SCTP 388config NETFILTER_XT_MATCH_SCTP
389 tristate '"sctp" protocol match support' 389 tristate '"sctp" protocol match support (EXPERIMENTAL)'
390 depends on NETFILTER_XTABLES 390 depends on NETFILTER_XTABLES && EXPERIMENTAL
391 help 391 help
392 With this option enabled, you will be able to use the 392 With this option enabled, you will be able to use the
393 `sctp' match in order to match on SCTP source/destination ports 393 `sctp' match in order to match on SCTP source/destination ports
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index af4845971f70..6527d4e048d8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -429,9 +429,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
429 cb->args[0], *id); 429 cb->args[0], *id);
430 430
431 read_lock_bh(&nf_conntrack_lock); 431 read_lock_bh(&nf_conntrack_lock);
432 last = (struct nf_conn *)cb->args[1];
432 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 433 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
433restart: 434restart:
434 last = (struct nf_conn *)cb->args[1];
435 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { 435 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) {
436 h = (struct nf_conntrack_tuple_hash *) i; 436 h = (struct nf_conntrack_tuple_hash *) i;
437 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) 437 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
@@ -442,13 +442,10 @@ restart:
442 * then dump everything. */ 442 * then dump everything. */
443 if (l3proto && L3PROTO(ct) != l3proto) 443 if (l3proto && L3PROTO(ct) != l3proto)
444 continue; 444 continue;
445 if (last != NULL) { 445 if (cb->args[1]) {
446 if (ct == last) { 446 if (ct != last)
447 nf_ct_put(last);
448 cb->args[1] = 0;
449 last = NULL;
450 } else
451 continue; 447 continue;
448 cb->args[1] = 0;
452 } 449 }
453 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 450 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
454 cb->nlh->nlmsg_seq, 451 cb->nlh->nlmsg_seq,
@@ -459,17 +456,17 @@ restart:
459 goto out; 456 goto out;
460 } 457 }
461 } 458 }
462 if (last != NULL) { 459 if (cb->args[1]) {
463 nf_ct_put(last);
464 cb->args[1] = 0; 460 cb->args[1] = 0;
465 goto restart; 461 goto restart;
466 } 462 }
467 } 463 }
468out: 464out:
469 read_unlock_bh(&nf_conntrack_lock); 465 read_unlock_bh(&nf_conntrack_lock);
466 if (last)
467 nf_ct_put(last);
470 468
471 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); 469 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
472
473 return skb->len; 470 return skb->len;
474} 471}
475 472
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5fcab2ef231f..4ef836699962 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -428,6 +428,8 @@ static struct file_operations ct_cpu_seq_fops = {
428 428
429/* Sysctl support */ 429/* Sysctl support */
430 430
431int nf_conntrack_checksum = 1;
432
431#ifdef CONFIG_SYSCTL 433#ifdef CONFIG_SYSCTL
432 434
433/* From nf_conntrack_core.c */ 435/* From nf_conntrack_core.c */
@@ -459,8 +461,6 @@ extern unsigned int nf_ct_generic_timeout;
459static int log_invalid_proto_min = 0; 461static int log_invalid_proto_min = 0;
460static int log_invalid_proto_max = 255; 462static int log_invalid_proto_max = 255;
461 463
462int nf_conntrack_checksum = 1;
463
464static struct ctl_table_header *nf_ct_sysctl_header; 464static struct ctl_table_header *nf_ct_sysctl_header;
465 465
466static ctl_table nf_ct_sysctl_table[] = { 466static ctl_table nf_ct_sysctl_table[] = {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index bb6fcee452ca..662a869593bf 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -219,21 +219,20 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
219 219
220 switch (verdict & NF_VERDICT_MASK) { 220 switch (verdict & NF_VERDICT_MASK) {
221 case NF_ACCEPT: 221 case NF_ACCEPT:
222 case NF_STOP:
222 info->okfn(skb); 223 info->okfn(skb);
224 case NF_STOLEN:
223 break; 225 break;
224
225 case NF_QUEUE: 226 case NF_QUEUE:
226 if (!nf_queue(&skb, elem, info->pf, info->hook, 227 if (!nf_queue(&skb, elem, info->pf, info->hook,
227 info->indev, info->outdev, info->okfn, 228 info->indev, info->outdev, info->okfn,
228 verdict >> NF_VERDICT_BITS)) 229 verdict >> NF_VERDICT_BITS))
229 goto next_hook; 230 goto next_hook;
230 break; 231 break;
232 default:
233 kfree_skb(skb);
231 } 234 }
232 rcu_read_unlock(); 235 rcu_read_unlock();
233
234 if (verdict == NF_DROP)
235 kfree_skb(skb);
236
237 kfree(info); 236 kfree(info);
238 return; 237 return;
239} 238}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 61cdda4e5d3b..b59d3b2bde21 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -366,6 +366,9 @@ __nfulnl_send(struct nfulnl_instance *inst)
366 if (timer_pending(&inst->timer)) 366 if (timer_pending(&inst->timer))
367 del_timer(&inst->timer); 367 del_timer(&inst->timer);
368 368
369 if (!inst->skb)
370 return 0;
371
369 if (inst->qlen > 1) 372 if (inst->qlen > 1)
370 inst->lastnlh->nlmsg_type = NLMSG_DONE; 373 inst->lastnlh->nlmsg_type = NLMSG_DONE;
371 374
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index c2ce9c4011cc..de9537ad9a7c 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -57,6 +57,8 @@ static int checkentry_selinux(struct xt_secmark_target_info *info)
57{ 57{
58 int err; 58 int err;
59 struct xt_secmark_target_selinux_info *sel = &info->u.sel; 59 struct xt_secmark_target_selinux_info *sel = &info->u.sel;
60
61 sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0';
60 62
61 err = selinux_string_to_sid(sel->selctx, &sel->selsid); 63 err = selinux_string_to_sid(sel->selctx, &sel->selsid);
62 if (err) { 64 if (err) {
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 5fe4c9df17f5..63a965467465 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/netfilter_bridge.h>
13#include <linux/netfilter/xt_physdev.h> 14#include <linux/netfilter/xt_physdev.h>
14#include <linux/netfilter/x_tables.h> 15#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter_bridge.h> 16#include <linux/netfilter_bridge.h>
@@ -113,6 +114,21 @@ checkentry(const char *tablename,
113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || 114 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
114 info->bitmask & ~XT_PHYSDEV_OP_MASK) 115 info->bitmask & ~XT_PHYSDEV_OP_MASK)
115 return 0; 116 return 0;
117 if (brnf_deferred_hooks == 0 &&
118 info->bitmask & XT_PHYSDEV_OP_OUT &&
119 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
120 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
121 hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) |
122 (1 << NF_IP_POST_ROUTING))) {
123 printk(KERN_WARNING "physdev match: using --physdev-out in the "
124 "OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
125 "traffic is deprecated and breaks other things, it will "
126 "be removed in January 2007. See Documentation/"
127 "feature-removal-schedule.txt for details. This doesn't "
128 "affect you in case you're using it for purely bridged "
129 "traffic.\n");
130 brnf_deferred_hooks = 1;
131 }
116 return 1; 132 return 1;
117} 133}
118 134
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c
index 3ac703b5cb8f..d2f5320a80bf 100644
--- a/net/netfilter/xt_pkttype.c
+++ b/net/netfilter/xt_pkttype.c
@@ -9,6 +9,8 @@
9#include <linux/skbuff.h> 9#include <linux/skbuff.h>
10#include <linux/if_ether.h> 10#include <linux/if_ether.h>
11#include <linux/if_packet.h> 11#include <linux/if_packet.h>
12#include <linux/in.h>
13#include <linux/ip.h>
12 14
13#include <linux/netfilter/xt_pkttype.h> 15#include <linux/netfilter/xt_pkttype.h>
14#include <linux/netfilter/x_tables.h> 16#include <linux/netfilter/x_tables.h>
@@ -28,9 +30,17 @@ static int match(const struct sk_buff *skb,
28 unsigned int protoff, 30 unsigned int protoff,
29 int *hotdrop) 31 int *hotdrop)
30{ 32{
33 u_int8_t type;
31 const struct xt_pkttype_info *info = matchinfo; 34 const struct xt_pkttype_info *info = matchinfo;
32 35
33 return (skb->pkt_type == info->pkttype) ^ info->invert; 36 if (skb->pkt_type == PACKET_LOOPBACK)
37 type = (MULTICAST(skb->nh.iph->daddr)
38 ? PACKET_MULTICAST
39 : PACKET_BROADCAST);
40 else
41 type = skb->pkt_type;
42
43 return (type == info->pkttype) ^ info->invert;
34} 44}
35 45
36static struct xt_match pkttype_match = { 46static struct xt_match pkttype_match = {
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index 0ebb6ac2c8c7..275330fcdaaa 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -37,7 +37,7 @@ static int match(const struct sk_buff *skb,
37 37
38 return (skb_find_text((struct sk_buff *)skb, conf->from_offset, 38 return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
39 conf->to_offset, conf->config, &state) 39 conf->to_offset, conf->config, &state)
40 != UINT_MAX) && !conf->invert; 40 != UINT_MAX) ^ conf->invert;
41} 41}
42 42
43#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m) 43#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m)
@@ -55,7 +55,10 @@ static int checkentry(const char *tablename,
55 /* Damn, can't handle this case properly with iptables... */ 55 /* Damn, can't handle this case properly with iptables... */
56 if (conf->from_offset > conf->to_offset) 56 if (conf->from_offset > conf->to_offset)
57 return 0; 57 return 0;
58 58 if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0')
59 return 0;
60 if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
61 return 0;
59 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, 62 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
60 GFP_KERNEL, TS_AUTOLOAD); 63 GFP_KERNEL, TS_AUTOLOAD);
61 if (IS_ERR(ts_conf)) 64 if (IS_ERR(ts_conf))
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 55c0adc8f115..8b85036ba8e3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -562,10 +562,9 @@ static int netlink_alloc_groups(struct sock *sk)
562 if (err) 562 if (err)
563 return err; 563 return err;
564 564
565 nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL); 565 nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
566 if (nlk->groups == NULL) 566 if (nlk->groups == NULL)
567 return -ENOMEM; 567 return -ENOMEM;
568 memset(nlk->groups, 0, NLGRPSZ(groups));
569 nlk->ngroups = groups; 568 nlk->ngroups = groups;
570 return 0; 569 return 0;
571} 570}
@@ -1274,8 +1273,7 @@ netlink_kernel_create(int unit, unsigned int groups,
1274 struct netlink_sock *nlk; 1273 struct netlink_sock *nlk;
1275 unsigned long *listeners = NULL; 1274 unsigned long *listeners = NULL;
1276 1275
1277 if (!nl_table) 1276 BUG_ON(!nl_table);
1278 return NULL;
1279 1277
1280 if (unit<0 || unit>=MAX_LINKS) 1278 if (unit<0 || unit>=MAX_LINKS)
1281 return NULL; 1279 return NULL;
@@ -1393,11 +1391,10 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1393 struct sock *sk; 1391 struct sock *sk;
1394 struct netlink_sock *nlk; 1392 struct netlink_sock *nlk;
1395 1393
1396 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1394 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1397 if (cb == NULL) 1395 if (cb == NULL)
1398 return -ENOBUFS; 1396 return -ENOBUFS;
1399 1397
1400 memset(cb, 0, sizeof(*cb));
1401 cb->dump = dump; 1398 cb->dump = dump;
1402 cb->done = done; 1399 cb->done = done;
1403 cb->nlh = nlh; 1400 cb->nlh = nlh;
@@ -1668,7 +1665,7 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
1668 struct nl_seq_iter *iter; 1665 struct nl_seq_iter *iter;
1669 int err; 1666 int err;
1670 1667
1671 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 1668 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1672 if (!iter) 1669 if (!iter)
1673 return -ENOMEM; 1670 return -ENOMEM;
1674 1671
@@ -1678,7 +1675,6 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
1678 return err; 1675 return err;
1679 } 1676 }
1680 1677
1681 memset(iter, 0, sizeof(*iter));
1682 seq = file->private_data; 1678 seq = file->private_data;
1683 seq->private = iter; 1679 seq->private = iter;
1684 return 0; 1680 return 0;
@@ -1747,14 +1743,9 @@ static int __init netlink_proto_init(void)
1747 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) 1743 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1748 netlink_skb_parms_too_large(); 1744 netlink_skb_parms_too_large();
1749 1745
1750 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL); 1746 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1751 if (!nl_table) { 1747 if (!nl_table)
1752enomem: 1748 goto panic;
1753 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1754 return -ENOMEM;
1755 }
1756
1757 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1758 1749
1759 if (num_physpages >= (128 * 1024)) 1750 if (num_physpages >= (128 * 1024))
1760 max = num_physpages >> (21 - PAGE_SHIFT); 1751 max = num_physpages >> (21 - PAGE_SHIFT);
@@ -1774,7 +1765,7 @@ enomem:
1774 nl_pid_hash_free(nl_table[i].hash.table, 1765 nl_pid_hash_free(nl_table[i].hash.table,
1775 1 * sizeof(*hash->table)); 1766 1 * sizeof(*hash->table));
1776 kfree(nl_table); 1767 kfree(nl_table);
1777 goto enomem; 1768 goto panic;
1778 } 1769 }
1779 memset(hash->table, 0, 1 * sizeof(*hash->table)); 1770 memset(hash->table, 0, 1 * sizeof(*hash->table));
1780 hash->max_shift = order; 1771 hash->max_shift = order;
@@ -1791,6 +1782,8 @@ enomem:
1791 rtnetlink_init(); 1782 rtnetlink_init();
1792out: 1783out:
1793 return err; 1784 return err;
1785panic:
1786 panic("netlink_init: Cannot allocate nl_table\n");
1794} 1787}
1795 1788
1796core_initcall(netlink_proto_init); 1789core_initcall(netlink_proto_init);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 389a4119e1b4..1d50f801f181 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -66,6 +66,14 @@ static DEFINE_SPINLOCK(nr_list_lock);
66static const struct proto_ops nr_proto_ops; 66static const struct proto_ops nr_proto_ops;
67 67
68/* 68/*
69 * NETROM network devices are virtual network devices encapsulating NETROM
70 * frames into AX.25 which will be sent through an AX.25 device, so form a
71 * special "super class" of normal net devices; split their locks off into a
72 * separate class since they always nest.
73 */
74static struct lock_class_key nr_netdev_xmit_lock_key;
75
76/*
69 * Socket removal during an interrupt is now safe. 77 * Socket removal during an interrupt is now safe.
70 */ 78 */
71static void nr_remove_socket(struct sock *sk) 79static void nr_remove_socket(struct sock *sk)
@@ -986,18 +994,18 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
986 nr_make->vl = 0; 994 nr_make->vl = 0;
987 nr_make->state = NR_STATE_3; 995 nr_make->state = NR_STATE_3;
988 sk_acceptq_added(sk); 996 sk_acceptq_added(sk);
989
990 nr_insert_socket(make);
991
992 skb_queue_head(&sk->sk_receive_queue, skb); 997 skb_queue_head(&sk->sk_receive_queue, skb);
993 998
994 nr_start_heartbeat(make);
995 nr_start_idletimer(make);
996
997 if (!sock_flag(sk, SOCK_DEAD)) 999 if (!sock_flag(sk, SOCK_DEAD))
998 sk->sk_data_ready(sk, skb->len); 1000 sk->sk_data_ready(sk, skb->len);
999 1001
1000 bh_unlock_sock(sk); 1002 bh_unlock_sock(sk);
1003
1004 nr_insert_socket(make);
1005
1006 nr_start_heartbeat(make);
1007 nr_start_idletimer(make);
1008
1001 return 1; 1009 return 1;
1002} 1010}
1003 1011
@@ -1382,14 +1390,12 @@ static int __init nr_proto_init(void)
1382 return -1; 1390 return -1;
1383 } 1391 }
1384 1392
1385 dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1393 dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1386 if (dev_nr == NULL) { 1394 if (dev_nr == NULL) {
1387 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); 1395 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
1388 return -1; 1396 return -1;
1389 } 1397 }
1390 1398
1391 memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *));
1392
1393 for (i = 0; i < nr_ndevs; i++) { 1399 for (i = 0; i < nr_ndevs; i++) {
1394 char name[IFNAMSIZ]; 1400 char name[IFNAMSIZ];
1395 struct net_device *dev; 1401 struct net_device *dev;
@@ -1407,6 +1413,7 @@ static int __init nr_proto_init(void)
1407 free_netdev(dev); 1413 free_netdev(dev);
1408 goto fail; 1414 goto fail;
1409 } 1415 }
1416 lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key);
1410 dev_nr[i] = dev; 1417 dev_nr[i] = dev;
1411 } 1418 }
1412 1419
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 75b72d389ba9..ddba1c144260 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -138,8 +138,8 @@ static void nr_heartbeat_expiry(unsigned long param)
138 if (sock_flag(sk, SOCK_DESTROY) || 138 if (sock_flag(sk, SOCK_DESTROY) ||
139 (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { 139 (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
140 sock_hold(sk); 140 sock_hold(sk);
141 nr_destroy_socket(sk);
142 bh_unlock_sock(sk); 141 bh_unlock_sock(sk);
142 nr_destroy_socket(sk);
143 sock_put(sk); 143 sock_put(sk);
144 return; 144 return;
145 } 145 }
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f9cef3671593..4172a5235916 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -626,8 +626,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
626 if ((int)snaplen < 0) 626 if ((int)snaplen < 0)
627 snaplen = 0; 627 snaplen = 0;
628 } 628 }
629 if (snaplen > skb->len-skb->data_len)
630 snaplen = skb->len-skb->data_len;
631 629
632 spin_lock(&sk->sk_receive_queue.lock); 630 spin_lock(&sk->sk_receive_queue.lock);
633 h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head); 631 h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
@@ -644,7 +642,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
644 status &= ~TP_STATUS_LOSING; 642 status &= ~TP_STATUS_LOSING;
645 spin_unlock(&sk->sk_receive_queue.lock); 643 spin_unlock(&sk->sk_receive_queue.lock);
646 644
647 memcpy((u8*)h + macoff, skb->data, snaplen); 645 skb_copy_bits(skb, 0, (u8*)h + macoff, snaplen);
648 646
649 h->tp_len = skb->len; 647 h->tp_len = skb->len;
650 h->tp_snaplen = snaplen; 648 h->tp_snaplen = snaplen;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d0a67bb31363..08a542855654 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -67,6 +67,14 @@ static struct proto_ops rose_proto_ops;
67ax25_address rose_callsign; 67ax25_address rose_callsign;
68 68
69/* 69/*
70 * ROSE network devices are virtual network devices encapsulating ROSE
71 * frames into AX.25 which will be sent through an AX.25 device, so form a
72 * special "super class" of normal net devices; split their locks off into a
73 * separate class since they always nest.
74 */
75static struct lock_class_key rose_netdev_xmit_lock_key;
76
77/*
70 * Convert a ROSE address into text. 78 * Convert a ROSE address into text.
71 */ 79 */
72const char *rose2asc(const rose_address *addr) 80const char *rose2asc(const rose_address *addr)
@@ -1490,14 +1498,13 @@ static int __init rose_proto_init(void)
1490 1498
1491 rose_callsign = null_ax25_address; 1499 rose_callsign = null_ax25_address;
1492 1500
1493 dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1501 dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1494 if (dev_rose == NULL) { 1502 if (dev_rose == NULL) {
1495 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1503 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1496 rc = -ENOMEM; 1504 rc = -ENOMEM;
1497 goto out_proto_unregister; 1505 goto out_proto_unregister;
1498 } 1506 }
1499 1507
1500 memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
1501 for (i = 0; i < rose_ndevs; i++) { 1508 for (i = 0; i < rose_ndevs; i++) {
1502 struct net_device *dev; 1509 struct net_device *dev;
1503 char name[IFNAMSIZ]; 1510 char name[IFNAMSIZ];
@@ -1516,6 +1523,7 @@ static int __init rose_proto_init(void)
1516 free_netdev(dev); 1523 free_netdev(dev);
1517 goto fail; 1524 goto fail;
1518 } 1525 }
1526 lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key);
1519 dev_rose[i] = dev; 1527 dev_rose[i] = dev;
1520 } 1528 }
1521 1529
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 573b572f8f91..93d2c55ad2d5 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -58,13 +58,12 @@ static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
58 _enter("%p",peer); 58 _enter("%p",peer);
59 59
60 /* allocate and initialise a connection record */ 60 /* allocate and initialise a connection record */
61 conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); 61 conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
62 if (!conn) { 62 if (!conn) {
63 _leave(" = -ENOMEM"); 63 _leave(" = -ENOMEM");
64 return -ENOMEM; 64 return -ENOMEM;
65 } 65 }
66 66
67 memset(conn, 0, sizeof(struct rxrpc_connection));
68 atomic_set(&conn->usage, 1); 67 atomic_set(&conn->usage, 1);
69 68
70 INIT_LIST_HEAD(&conn->link); 69 INIT_LIST_HEAD(&conn->link);
@@ -535,13 +534,12 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
535 return -EINVAL; 534 return -EINVAL;
536 } 535 }
537 536
538 msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags); 537 msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags);
539 if (!msg) { 538 if (!msg) {
540 _leave(" = -ENOMEM"); 539 _leave(" = -ENOMEM");
541 return -ENOMEM; 540 return -ENOMEM;
542 } 541 }
543 542
544 memset(msg, 0, sizeof(*msg));
545 atomic_set(&msg->usage, 1); 543 atomic_set(&msg->usage, 1);
546 544
547 INIT_LIST_HEAD(&msg->link); 545 INIT_LIST_HEAD(&msg->link);
diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c
index ed38f5b17c1b..8a275157a3bb 100644
--- a/net/rxrpc/peer.c
+++ b/net/rxrpc/peer.c
@@ -58,13 +58,12 @@ static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
58 _enter("%p,%08x", trans, ntohl(addr)); 58 _enter("%p,%08x", trans, ntohl(addr));
59 59
60 /* allocate and initialise a peer record */ 60 /* allocate and initialise a peer record */
61 peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); 61 peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
62 if (!peer) { 62 if (!peer) {
63 _leave(" = -ENOMEM"); 63 _leave(" = -ENOMEM");
64 return -ENOMEM; 64 return -ENOMEM;
65 } 65 }
66 66
67 memset(peer, 0, sizeof(struct rxrpc_peer));
68 atomic_set(&peer->usage, 1); 67 atomic_set(&peer->usage, 1);
69 68
70 INIT_LIST_HEAD(&peer->link); 69 INIT_LIST_HEAD(&peer->link);
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index dbe6105e83a5..465efc86fccf 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -68,11 +68,10 @@ int rxrpc_create_transport(unsigned short port,
68 68
69 _enter("%hu", port); 69 _enter("%hu", port);
70 70
71 trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); 71 trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
72 if (!trans) 72 if (!trans)
73 return -ENOMEM; 73 return -ENOMEM;
74 74
75 memset(trans, 0, sizeof(struct rxrpc_transport));
76 atomic_set(&trans->usage, 1); 75 atomic_set(&trans->usage, 1);
77 INIT_LIST_HEAD(&trans->services); 76 INIT_LIST_HEAD(&trans->services);
78 INIT_LIST_HEAD(&trans->link); 77 INIT_LIST_HEAD(&trans->link);
@@ -312,13 +311,12 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
312 311
313 _enter(""); 312 _enter("");
314 313
315 msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL); 314 msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
316 if (!msg) { 315 if (!msg) {
317 _leave(" = -ENOMEM"); 316 _leave(" = -ENOMEM");
318 return -ENOMEM; 317 return -ENOMEM;
319 } 318 }
320 319
321 memset(msg, 0, sizeof(*msg));
322 atomic_set(&msg->usage, 1); 320 atomic_set(&msg->usage, 1);
323 list_add_tail(&msg->link,msgq); 321 list_add_tail(&msg->link,msgq);
324 322
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 5b9397b33238..a2587b52e531 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -250,15 +250,17 @@ tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
250 RTA_PUT(skb, a->order, 0, NULL); 250 RTA_PUT(skb, a->order, 0, NULL);
251 err = tcf_action_dump_1(skb, a, bind, ref); 251 err = tcf_action_dump_1(skb, a, bind, ref);
252 if (err < 0) 252 if (err < 0)
253 goto rtattr_failure; 253 goto errout;
254 r->rta_len = skb->tail - (u8*)r; 254 r->rta_len = skb->tail - (u8*)r;
255 } 255 }
256 256
257 return 0; 257 return 0;
258 258
259rtattr_failure: 259rtattr_failure:
260 err = -EINVAL;
261errout:
260 skb_trim(skb, b - skb->data); 262 skb_trim(skb, b - skb->data);
261 return -err; 263 return err;
262} 264}
263 265
264struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, 266struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
@@ -305,14 +307,14 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
305 goto err_mod; 307 goto err_mod;
306 } 308 }
307#endif 309#endif
310 *err = -ENOENT;
308 goto err_out; 311 goto err_out;
309 } 312 }
310 313
311 *err = -ENOMEM; 314 *err = -ENOMEM;
312 a = kmalloc(sizeof(*a), GFP_KERNEL); 315 a = kzalloc(sizeof(*a), GFP_KERNEL);
313 if (a == NULL) 316 if (a == NULL)
314 goto err_mod; 317 goto err_mod;
315 memset(a, 0, sizeof(*a));
316 318
317 /* backward compatibility for policer */ 319 /* backward compatibility for policer */
318 if (name == NULL) 320 if (name == NULL)
@@ -489,10 +491,9 @@ tcf_action_get_1(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int *err)
489 index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]); 491 index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]);
490 492
491 *err = -ENOMEM; 493 *err = -ENOMEM;
492 a = kmalloc(sizeof(struct tc_action), GFP_KERNEL); 494 a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
493 if (a == NULL) 495 if (a == NULL)
494 return NULL; 496 return NULL;
495 memset(a, 0, sizeof(struct tc_action));
496 497
497 *err = -EINVAL; 498 *err = -EINVAL;
498 a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]); 499 a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]);
@@ -528,12 +529,11 @@ static struct tc_action *create_a(int i)
528{ 529{
529 struct tc_action *act; 530 struct tc_action *act;
530 531
531 act = kmalloc(sizeof(*act), GFP_KERNEL); 532 act = kzalloc(sizeof(*act), GFP_KERNEL);
532 if (act == NULL) { 533 if (act == NULL) {
533 printk("create_a: failed to alloc!\n"); 534 printk("create_a: failed to alloc!\n");
534 return NULL; 535 return NULL;
535 } 536 }
536 memset(act, 0, sizeof(*act));
537 act->order = i; 537 act->order = i;
538 return act; 538 return act;
539} 539}
@@ -599,8 +599,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
599 return err; 599 return err;
600 600
601rtattr_failure: 601rtattr_failure:
602 module_put(a->ops->owner);
603nlmsg_failure: 602nlmsg_failure:
603 module_put(a->ops->owner);
604err_out: 604err_out:
605 kfree_skb(skb); 605 kfree_skb(skb);
606 kfree(a); 606 kfree(a);
@@ -776,7 +776,7 @@ replay:
776 return ret; 776 return ret;
777} 777}
778 778
779static char * 779static struct rtattr *
780find_dump_kind(struct nlmsghdr *n) 780find_dump_kind(struct nlmsghdr *n)
781{ 781{
782 struct rtattr *tb1, *tb2[TCA_ACT_MAX+1]; 782 struct rtattr *tb1, *tb2[TCA_ACT_MAX+1];
@@ -804,7 +804,7 @@ find_dump_kind(struct nlmsghdr *n)
804 return NULL; 804 return NULL;
805 kind = tb2[TCA_ACT_KIND-1]; 805 kind = tb2[TCA_ACT_KIND-1];
806 806
807 return (char *) RTA_DATA(kind); 807 return kind;
808} 808}
809 809
810static int 810static int
@@ -817,16 +817,15 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
817 struct tc_action a; 817 struct tc_action a;
818 int ret = 0; 818 int ret = 0;
819 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 819 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
820 char *kind = find_dump_kind(cb->nlh); 820 struct rtattr *kind = find_dump_kind(cb->nlh);
821 821
822 if (kind == NULL) { 822 if (kind == NULL) {
823 printk("tc_dump_action: action bad kind\n"); 823 printk("tc_dump_action: action bad kind\n");
824 return 0; 824 return 0;
825 } 825 }
826 826
827 a_o = tc_lookup_action_n(kind); 827 a_o = tc_lookup_action(kind);
828 if (a_o == NULL) { 828 if (a_o == NULL) {
829 printk("failed to find %s\n", kind);
830 return 0; 829 return 0;
831 } 830 }
832 831
@@ -834,7 +833,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
834 a.ops = a_o; 833 a.ops = a_o;
835 834
836 if (a_o->walk == NULL) { 835 if (a_o->walk == NULL) {
837 printk("tc_dump_action: %s !capable of dumping table\n", kind); 836 printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind);
838 goto rtattr_failure; 837 goto rtattr_failure;
839 } 838 }
840 839
@@ -882,8 +881,6 @@ static int __init tc_action_init(void)
882 link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action; 881 link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action;
883 } 882 }
884 883
885 printk("TC classifier action (bugs to netdev@vger.kernel.org cc "
886 "hadi@cyberus.ca)\n");
887 return 0; 884 return 0;
888} 885}
889 886
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 58b3a8652042..f257475e0e0c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -209,10 +209,9 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key); 209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key);
210 210
211 /* netlink spinlocks held above us - must use ATOMIC */ 211 /* netlink spinlocks held above us - must use ATOMIC */
212 opt = kmalloc(s, GFP_ATOMIC); 212 opt = kzalloc(s, GFP_ATOMIC);
213 if (opt == NULL) 213 if (opt == NULL)
214 return -ENOBUFS; 214 return -ENOBUFS;
215 memset(opt, 0, s);
216 215
217 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key)); 216 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key));
218 opt->index = p->index; 217 opt->index = p->index;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 47e00bd9625e..da905d7b4b40 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -196,10 +196,9 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
196 return ret; 196 return ret;
197 } 197 }
198 198
199 p = kmalloc(sizeof(*p), GFP_KERNEL); 199 p = kzalloc(sizeof(*p), GFP_KERNEL);
200 if (p == NULL) 200 if (p == NULL)
201 return -ENOMEM; 201 return -ENOMEM;
202 memset(p, 0, sizeof(*p));
203 202
204 ret = ACT_P_CREATED; 203 ret = ACT_P_CREATED;
205 p->refcnt = 1; 204 p->refcnt = 1;
@@ -429,11 +428,10 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
429 return p; 428 return p;
430 } 429 }
431 430
432 p = kmalloc(sizeof(*p), GFP_KERNEL); 431 p = kzalloc(sizeof(*p), GFP_KERNEL);
433 if (p == NULL) 432 if (p == NULL)
434 return NULL; 433 return NULL;
435 434
436 memset(p, 0, sizeof(*p));
437 p->refcnt = 1; 435 p->refcnt = 1;
438 spin_lock_init(&p->lock); 436 spin_lock_init(&p->lock);
439 p->stats_lock = &p->lock; 437 p->stats_lock = &p->lock;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 61507f006b11..86cac49a0531 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -178,19 +178,17 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
178 178
179 err = -ENOBUFS; 179 err = -ENOBUFS;
180 if (head == NULL) { 180 if (head == NULL) {
181 head = kmalloc(sizeof(*head), GFP_KERNEL); 181 head = kzalloc(sizeof(*head), GFP_KERNEL);
182 if (head == NULL) 182 if (head == NULL)
183 goto errout; 183 goto errout;
184 184
185 memset(head, 0, sizeof(*head));
186 INIT_LIST_HEAD(&head->flist); 185 INIT_LIST_HEAD(&head->flist);
187 tp->root = head; 186 tp->root = head;
188 } 187 }
189 188
190 f = kmalloc(sizeof(*f), GFP_KERNEL); 189 f = kzalloc(sizeof(*f), GFP_KERNEL);
191 if (f == NULL) 190 if (f == NULL)
192 goto errout; 191 goto errout;
193 memset(f, 0, sizeof(*f));
194 192
195 err = -EINVAL; 193 err = -EINVAL;
196 if (handle) 194 if (handle)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index d41de91fc4f6..e6973d9b686d 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -267,20 +267,18 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
267 return -EINVAL; 267 return -EINVAL;
268 268
269 if (head == NULL) { 269 if (head == NULL) {
270 head = kmalloc(sizeof(struct fw_head), GFP_KERNEL); 270 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
271 if (head == NULL) 271 if (head == NULL)
272 return -ENOBUFS; 272 return -ENOBUFS;
273 memset(head, 0, sizeof(*head));
274 273
275 tcf_tree_lock(tp); 274 tcf_tree_lock(tp);
276 tp->root = head; 275 tp->root = head;
277 tcf_tree_unlock(tp); 276 tcf_tree_unlock(tp);
278 } 277 }
279 278
280 f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL); 279 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
281 if (f == NULL) 280 if (f == NULL)
282 return -ENOBUFS; 281 return -ENOBUFS;
283 memset(f, 0, sizeof(*f));
284 282
285 f->id = handle; 283 f->id = handle;
286 284
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index c2e71900f7bd..d3aea730d4c8 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -396,10 +396,9 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
396 h1 = to_hash(nhandle); 396 h1 = to_hash(nhandle);
397 if ((b = head->table[h1]) == NULL) { 397 if ((b = head->table[h1]) == NULL) {
398 err = -ENOBUFS; 398 err = -ENOBUFS;
399 b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL); 399 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
400 if (b == NULL) 400 if (b == NULL)
401 goto errout; 401 goto errout;
402 memset(b, 0, sizeof(*b));
403 402
404 tcf_tree_lock(tp); 403 tcf_tree_lock(tp);
405 head->table[h1] = b; 404 head->table[h1] = b;
@@ -475,20 +474,18 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
475 474
476 err = -ENOBUFS; 475 err = -ENOBUFS;
477 if (head == NULL) { 476 if (head == NULL) {
478 head = kmalloc(sizeof(struct route4_head), GFP_KERNEL); 477 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
479 if (head == NULL) 478 if (head == NULL)
480 goto errout; 479 goto errout;
481 memset(head, 0, sizeof(struct route4_head));
482 480
483 tcf_tree_lock(tp); 481 tcf_tree_lock(tp);
484 tp->root = head; 482 tp->root = head;
485 tcf_tree_unlock(tp); 483 tcf_tree_unlock(tp);
486 } 484 }
487 485
488 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL); 486 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
489 if (f == NULL) 487 if (f == NULL)
490 goto errout; 488 goto errout;
491 memset(f, 0, sizeof(*f));
492 489
493 err = route4_set_parms(tp, base, f, handle, head, tb, 490 err = route4_set_parms(tp, base, f, handle, head, tb,
494 tca[TCA_RATE-1], 1); 491 tca[TCA_RATE-1], 1);
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index ba8741971629..6e230ecfba05 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -240,9 +240,8 @@ static int rsvp_init(struct tcf_proto *tp)
240{ 240{
241 struct rsvp_head *data; 241 struct rsvp_head *data;
242 242
243 data = kmalloc(sizeof(struct rsvp_head), GFP_KERNEL); 243 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
244 if (data) { 244 if (data) {
245 memset(data, 0, sizeof(struct rsvp_head));
246 tp->root = data; 245 tp->root = data;
247 return 0; 246 return 0;
248 } 247 }
@@ -446,11 +445,10 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
446 goto errout2; 445 goto errout2;
447 446
448 err = -ENOBUFS; 447 err = -ENOBUFS;
449 f = kmalloc(sizeof(struct rsvp_filter), GFP_KERNEL); 448 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
450 if (f == NULL) 449 if (f == NULL)
451 goto errout2; 450 goto errout2;
452 451
453 memset(f, 0, sizeof(*f));
454 h2 = 16; 452 h2 = 16;
455 if (tb[TCA_RSVP_SRC-1]) { 453 if (tb[TCA_RSVP_SRC-1]) {
456 err = -EINVAL; 454 err = -EINVAL;
@@ -532,10 +530,9 @@ insert:
532 /* No session found. Create new one. */ 530 /* No session found. Create new one. */
533 531
534 err = -ENOBUFS; 532 err = -ENOBUFS;
535 s = kmalloc(sizeof(struct rsvp_session), GFP_KERNEL); 533 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
536 if (s == NULL) 534 if (s == NULL)
537 goto errout; 535 goto errout;
538 memset(s, 0, sizeof(*s));
539 memcpy(s->dst, dst, sizeof(s->dst)); 536 memcpy(s->dst, dst, sizeof(s->dst));
540 537
541 if (pinfo) { 538 if (pinfo) {
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 7870e7bb0bac..5af8a59e1503 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -148,11 +148,10 @@ static int tcindex_init(struct tcf_proto *tp)
148 struct tcindex_data *p; 148 struct tcindex_data *p;
149 149
150 DPRINTK("tcindex_init(tp %p)\n",tp); 150 DPRINTK("tcindex_init(tp %p)\n",tp);
151 p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL); 151 p = kzalloc(sizeof(struct tcindex_data),GFP_KERNEL);
152 if (!p) 152 if (!p)
153 return -ENOMEM; 153 return -ENOMEM;
154 154
155 memset(p, 0, sizeof(*p));
156 p->mask = 0xffff; 155 p->mask = 0xffff;
157 p->hash = DEFAULT_HASH_SIZE; 156 p->hash = DEFAULT_HASH_SIZE;
158 p->fall_through = 1; 157 p->fall_through = 1;
@@ -296,16 +295,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
296 err = -ENOMEM; 295 err = -ENOMEM;
297 if (!cp.perfect && !cp.h) { 296 if (!cp.perfect && !cp.h) {
298 if (valid_perfect_hash(&cp)) { 297 if (valid_perfect_hash(&cp)) {
299 cp.perfect = kmalloc(cp.hash * sizeof(*r), GFP_KERNEL); 298 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
300 if (!cp.perfect) 299 if (!cp.perfect)
301 goto errout; 300 goto errout;
302 memset(cp.perfect, 0, cp.hash * sizeof(*r));
303 balloc = 1; 301 balloc = 1;
304 } else { 302 } else {
305 cp.h = kmalloc(cp.hash * sizeof(f), GFP_KERNEL); 303 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
306 if (!cp.h) 304 if (!cp.h)
307 goto errout; 305 goto errout;
308 memset(cp.h, 0, cp.hash * sizeof(f));
309 balloc = 2; 306 balloc = 2;
310 } 307 }
311 } 308 }
@@ -316,10 +313,9 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
316 r = tcindex_lookup(&cp, handle) ? : &new_filter_result; 313 r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
317 314
318 if (r == &new_filter_result) { 315 if (r == &new_filter_result) {
319 f = kmalloc(sizeof(*f), GFP_KERNEL); 316 f = kzalloc(sizeof(*f), GFP_KERNEL);
320 if (!f) 317 if (!f)
321 goto errout_alloc; 318 goto errout_alloc;
322 memset(f, 0, sizeof(*f));
323 } 319 }
324 320
325 if (tb[TCA_TCINDEX_CLASSID-1]) { 321 if (tb[TCA_TCINDEX_CLASSID-1]) {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d712edcd1bcf..0a6cfa0005be 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -307,23 +307,21 @@ static int u32_init(struct tcf_proto *tp)
307 if (tp_c->q == tp->q) 307 if (tp_c->q == tp->q)
308 break; 308 break;
309 309
310 root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL); 310 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
311 if (root_ht == NULL) 311 if (root_ht == NULL)
312 return -ENOBUFS; 312 return -ENOBUFS;
313 313
314 memset(root_ht, 0, sizeof(*root_ht));
315 root_ht->divisor = 0; 314 root_ht->divisor = 0;
316 root_ht->refcnt++; 315 root_ht->refcnt++;
317 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; 316 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
318 root_ht->prio = tp->prio; 317 root_ht->prio = tp->prio;
319 318
320 if (tp_c == NULL) { 319 if (tp_c == NULL) {
321 tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL); 320 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
322 if (tp_c == NULL) { 321 if (tp_c == NULL) {
323 kfree(root_ht); 322 kfree(root_ht);
324 return -ENOBUFS; 323 return -ENOBUFS;
325 } 324 }
326 memset(tp_c, 0, sizeof(*tp_c));
327 tp_c->q = tp->q; 325 tp_c->q = tp->q;
328 tp_c->next = u32_list; 326 tp_c->next = u32_list;
329 u32_list = tp_c; 327 u32_list = tp_c;
@@ -571,10 +569,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
571 if (handle == 0) 569 if (handle == 0)
572 return -ENOMEM; 570 return -ENOMEM;
573 } 571 }
574 ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); 572 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
575 if (ht == NULL) 573 if (ht == NULL)
576 return -ENOBUFS; 574 return -ENOBUFS;
577 memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*));
578 ht->tp_c = tp_c; 575 ht->tp_c = tp_c;
579 ht->refcnt = 0; 576 ht->refcnt = 0;
580 ht->divisor = divisor; 577 ht->divisor = divisor;
@@ -617,18 +614,16 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
617 614
618 s = RTA_DATA(tb[TCA_U32_SEL-1]); 615 s = RTA_DATA(tb[TCA_U32_SEL-1]);
619 616
620 n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 617 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
621 if (n == NULL) 618 if (n == NULL)
622 return -ENOBUFS; 619 return -ENOBUFS;
623 620
624 memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
625#ifdef CONFIG_CLS_U32_PERF 621#ifdef CONFIG_CLS_U32_PERF
626 n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 622 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
627 if (n->pf == NULL) { 623 if (n->pf == NULL) {
628 kfree(n); 624 kfree(n);
629 return -ENOBUFS; 625 return -ENOBUFS;
630 } 626 }
631 memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64));
632#endif 627#endif
633 628
634 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); 629 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
@@ -801,7 +796,7 @@ static int __init init_u32(void)
801{ 796{
802 printk("u32 classifier\n"); 797 printk("u32 classifier\n");
803#ifdef CONFIG_CLS_U32_PERF 798#ifdef CONFIG_CLS_U32_PERF
804 printk(" Perfomance counters on\n"); 799 printk(" Performance counters on\n");
805#endif 800#endif
806#ifdef CONFIG_NET_CLS_POLICE 801#ifdef CONFIG_NET_CLS_POLICE
807 printk(" OLD policer on \n"); 802 printk(" OLD policer on \n");
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 698372954f4d..61e3b740ab1a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -773,10 +773,9 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
773 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX) 773 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
774 goto errout; 774 goto errout;
775 775
776 meta = kmalloc(sizeof(*meta), GFP_KERNEL); 776 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
777 if (meta == NULL) 777 if (meta == NULL)
778 goto errout; 778 goto errout;
779 memset(meta, 0, sizeof(*meta));
780 779
781 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); 780 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
782 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right)); 781 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 2405a86093a2..0fd0768a17c6 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -321,10 +321,9 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct rtattr *rta,
321 list_len = RTA_PAYLOAD(rt_list); 321 list_len = RTA_PAYLOAD(rt_list);
322 matches_len = tree_hdr->nmatches * sizeof(*em); 322 matches_len = tree_hdr->nmatches * sizeof(*em);
323 323
324 tree->matches = kmalloc(matches_len, GFP_KERNEL); 324 tree->matches = kzalloc(matches_len, GFP_KERNEL);
325 if (tree->matches == NULL) 325 if (tree->matches == NULL)
326 goto errout; 326 goto errout;
327 memset(tree->matches, 0, matches_len);
328 327
329 /* We do not use rtattr_parse_nested here because the maximum 328 /* We do not use rtattr_parse_nested here because the maximum
330 * number of attributes is unknown. This saves us the allocation 329 * number of attributes is unknown. This saves us the allocation
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
index 5d3ae03e22a7..0ebc98e9be2d 100644
--- a/net/sched/estimator.c
+++ b/net/sched/estimator.c
@@ -139,11 +139,10 @@ int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct r
139 if (parm->interval < -2 || parm->interval > 3) 139 if (parm->interval < -2 || parm->interval > 3)
140 return -EINVAL; 140 return -EINVAL;
141 141
142 est = kmalloc(sizeof(*est), GFP_KERNEL); 142 est = kzalloc(sizeof(*est), GFP_KERNEL);
143 if (est == NULL) 143 if (est == NULL)
144 return -ENOBUFS; 144 return -ENOBUFS;
145 145
146 memset(est, 0, sizeof(*est));
147 est->interval = parm->interval + 2; 146 est->interval = parm->interval + 2;
148 est->stats = stats; 147 est->stats = stats;
149 est->stats_lock = stats_lock; 148 est->stats_lock = stats_lock;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c7844bacbbcb..a19eff12cf78 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -430,7 +430,7 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
430 } 430 }
431#endif 431#endif
432 432
433 err = -EINVAL; 433 err = -ENOENT;
434 if (ops == NULL) 434 if (ops == NULL)
435 goto err_out; 435 goto err_out;
436 436
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 80b7f6a8d008..bac881bfe362 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1926,10 +1926,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1926 } 1926 }
1927 1927
1928 err = -ENOBUFS; 1928 err = -ENOBUFS;
1929 cl = kmalloc(sizeof(*cl), GFP_KERNEL); 1929 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1930 if (cl == NULL) 1930 if (cl == NULL)
1931 goto failure; 1931 goto failure;
1932 memset(cl, 0, sizeof(*cl));
1933 cl->R_tab = rtab; 1932 cl->R_tab = rtab;
1934 rtab = NULL; 1933 rtab = NULL;
1935 cl->refcnt = 1; 1934 cl->refcnt = 1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d735f51686a1..6f9151899795 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -238,9 +238,7 @@ void __netdev_watchdog_up(struct net_device *dev)
238 238
239static void dev_watchdog_up(struct net_device *dev) 239static void dev_watchdog_up(struct net_device *dev)
240{ 240{
241 netif_tx_lock_bh(dev);
242 __netdev_watchdog_up(dev); 241 __netdev_watchdog_up(dev);
243 netif_tx_unlock_bh(dev);
244} 242}
245 243
246static void dev_watchdog_down(struct net_device *dev) 244static void dev_watchdog_down(struct net_device *dev)
@@ -432,10 +430,9 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
432 size = QDISC_ALIGN(sizeof(*sch)); 430 size = QDISC_ALIGN(sizeof(*sch));
433 size += ops->priv_size + (QDISC_ALIGNTO - 1); 431 size += ops->priv_size + (QDISC_ALIGNTO - 1);
434 432
435 p = kmalloc(size, GFP_KERNEL); 433 p = kzalloc(size, GFP_KERNEL);
436 if (!p) 434 if (!p)
437 goto errout; 435 goto errout;
438 memset(p, 0, size);
439 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 436 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
440 sch->padded = (char *) sch - (char *) p; 437 sch->padded = (char *) sch - (char *) p;
441 438
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0cafdd5feb1b..18e81a8ffb01 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -406,10 +406,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
406 struct gred_sched_data *q; 406 struct gred_sched_data *q;
407 407
408 if (table->tab[dp] == NULL) { 408 if (table->tab[dp] == NULL) {
409 table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL); 409 table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
410 if (table->tab[dp] == NULL) 410 if (table->tab[dp] == NULL)
411 return -ENOMEM; 411 return -ENOMEM;
412 memset(table->tab[dp], 0, sizeof(*q));
413 } 412 }
414 413
415 q = table->tab[dp]; 414 q = table->tab[dp];
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6b1b4a981e88..6a6735a2ed35 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1123,10 +1123,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1123 if (rsc == NULL && fsc == NULL) 1123 if (rsc == NULL && fsc == NULL)
1124 return -EINVAL; 1124 return -EINVAL;
1125 1125
1126 cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1126 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1127 if (cl == NULL) 1127 if (cl == NULL)
1128 return -ENOBUFS; 1128 return -ENOBUFS;
1129 memset(cl, 0, sizeof(struct hfsc_class));
1130 1129
1131 if (rsc != NULL) 1130 if (rsc != NULL)
1132 hfsc_change_rsc(cl, rsc, 0); 1131 hfsc_change_rsc(cl, rsc, 0);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 34afe41fa2f3..880a3394a51f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -196,7 +196,7 @@ struct htb_class
196 struct qdisc_rate_table *rate; /* rate table of the class itself */ 196 struct qdisc_rate_table *rate; /* rate table of the class itself */
197 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ 197 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
198 long buffer,cbuffer; /* token bucket depth/rate */ 198 long buffer,cbuffer; /* token bucket depth/rate */
199 long mbuffer; /* max wait time */ 199 psched_tdiff_t mbuffer; /* max wait time */
200 long tokens,ctokens; /* current number of tokens */ 200 long tokens,ctokens; /* current number of tokens */
201 psched_time_t t_c; /* checkpoint time */ 201 psched_time_t t_c; /* checkpoint time */
202}; 202};
@@ -1559,10 +1559,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1559 goto failure; 1559 goto failure;
1560 } 1560 }
1561 err = -ENOBUFS; 1561 err = -ENOBUFS;
1562 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1562 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1563 goto failure; 1563 goto failure;
1564 1564
1565 memset(cl, 0, sizeof(*cl));
1566 cl->refcnt = 1; 1565 cl->refcnt = 1;
1567 INIT_LIST_HEAD(&cl->sibling); 1566 INIT_LIST_HEAD(&cl->sibling);
1568 INIT_LIST_HEAD(&cl->hlist); 1567 INIT_LIST_HEAD(&cl->hlist);
@@ -1601,7 +1600,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1601 /* set class to be in HTB_CAN_SEND state */ 1600 /* set class to be in HTB_CAN_SEND state */
1602 cl->tokens = hopt->buffer; 1601 cl->tokens = hopt->buffer;
1603 cl->ctokens = hopt->cbuffer; 1602 cl->ctokens = hopt->cbuffer;
1604 cl->mbuffer = 60000000; /* 1min */ 1603 cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */
1605 PSCHED_GET_TIME(cl->t_c); 1604 PSCHED_GET_TIME(cl->t_c);
1606 cl->cmode = HTB_CAN_SEND; 1605 cl->cmode = HTB_CAN_SEND;
1607 1606
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c5bd8064e6d8..a08ec4c7c55d 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -148,7 +148,8 @@ static long tabledist(unsigned long mu, long sigma,
148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) 148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
149{ 149{
150 struct netem_sched_data *q = qdisc_priv(sch); 150 struct netem_sched_data *q = qdisc_priv(sch);
151 struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; 151 /* We don't fill cb now as skb_unshare() may invalidate it */
152 struct netem_skb_cb *cb;
152 struct sk_buff *skb2; 153 struct sk_buff *skb2;
153 int ret; 154 int ret;
154 int count = 1; 155 int count = 1;
@@ -200,6 +201,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
200 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 201 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
201 } 202 }
202 203
204 cb = (struct netem_skb_cb *)skb->cb;
203 if (q->gap == 0 /* not doing reordering */ 205 if (q->gap == 0 /* not doing reordering */
204 || q->counter < q->gap /* inside last reordering gap */ 206 || q->counter < q->gap /* inside last reordering gap */
205 || q->reorder < get_crandom(&q->reorder_cor)) { 207 || q->reorder < get_crandom(&q->reorder_cor)) {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9d05e13e92f6..27329ce9c311 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -441,7 +441,8 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
441 /* If the primary path is changing, assume that the 441 /* If the primary path is changing, assume that the
442 * user wants to use this new path. 442 * user wants to use this new path.
443 */ 443 */
444 if (transport->state != SCTP_INACTIVE) 444 if ((transport->state == SCTP_ACTIVE) ||
445 (transport->state == SCTP_UNKNOWN))
445 asoc->peer.active_path = transport; 446 asoc->peer.active_path = transport;
446 447
447 /* 448 /*
@@ -532,11 +533,11 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
532 port = addr->v4.sin_port; 533 port = addr->v4.sin_port;
533 534
534 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", 535 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
535 " port: %d state:%s\n", 536 " port: %d state:%d\n",
536 asoc, 537 asoc,
537 addr, 538 addr,
538 addr->v4.sin_port, 539 addr->v4.sin_port,
539 peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE"); 540 peer_state);
540 541
541 /* Set the port if it has not been set yet. */ 542 /* Set the port if it has not been set yet. */
542 if (0 == asoc->peer.port) 543 if (0 == asoc->peer.port)
@@ -545,9 +546,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
545 /* Check to see if this is a duplicate. */ 546 /* Check to see if this is a duplicate. */
546 peer = sctp_assoc_lookup_paddr(asoc, addr); 547 peer = sctp_assoc_lookup_paddr(asoc, addr);
547 if (peer) { 548 if (peer) {
548 if (peer_state == SCTP_ACTIVE && 549 if (peer->state == SCTP_UNKNOWN) {
549 peer->state == SCTP_UNKNOWN) 550 if (peer_state == SCTP_ACTIVE)
550 peer->state = SCTP_ACTIVE; 551 peer->state = SCTP_ACTIVE;
552 if (peer_state == SCTP_UNCONFIRMED)
553 peer->state = SCTP_UNCONFIRMED;
554 }
551 return peer; 555 return peer;
552 } 556 }
553 557
@@ -739,7 +743,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
739 list_for_each(pos, &asoc->peer.transport_addr_list) { 743 list_for_each(pos, &asoc->peer.transport_addr_list) {
740 t = list_entry(pos, struct sctp_transport, transports); 744 t = list_entry(pos, struct sctp_transport, transports);
741 745
742 if (t->state == SCTP_INACTIVE) 746 if ((t->state == SCTP_INACTIVE) ||
747 (t->state == SCTP_UNCONFIRMED))
743 continue; 748 continue;
744 if (!first || t->last_time_heard > first->last_time_heard) { 749 if (!first || t->last_time_heard > first->last_time_heard) {
745 second = first; 750 second = first;
@@ -759,7 +764,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
759 * [If the primary is active but not most recent, bump the most 764 * [If the primary is active but not most recent, bump the most
760 * recently used transport.] 765 * recently used transport.]
761 */ 766 */
762 if (asoc->peer.primary_path->state != SCTP_INACTIVE && 767 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
768 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
763 first != asoc->peer.primary_path) { 769 first != asoc->peer.primary_path) {
764 second = first; 770 second = first;
765 first = asoc->peer.primary_path; 771 first = asoc->peer.primary_path;
@@ -1054,7 +1060,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
1054 transports); 1060 transports);
1055 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) 1061 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1056 sctp_assoc_add_peer(asoc, &trans->ipaddr, 1062 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1057 GFP_ATOMIC, SCTP_ACTIVE); 1063 GFP_ATOMIC, trans->state);
1058 } 1064 }
1059 1065
1060 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1066 asoc->ctsn_ack_point = asoc->next_tsn - 1;
@@ -1094,7 +1100,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1094 1100
1095 /* Try to find an active transport. */ 1101 /* Try to find an active transport. */
1096 1102
1097 if (t->state != SCTP_INACTIVE) { 1103 if ((t->state == SCTP_ACTIVE) ||
1104 (t->state == SCTP_UNKNOWN)) {
1098 break; 1105 break;
1099 } else { 1106 } else {
1100 /* Keep track of the next transport in case 1107 /* Keep track of the next transport in case
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 2b962627f631..2b9c12a170e5 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -146,7 +146,7 @@ void sctp_bind_addr_free(struct sctp_bind_addr *bp)
146 146
147/* Add an address to the bind address list in the SCTP_bind_addr structure. */ 147/* Add an address to the bind address list in the SCTP_bind_addr structure. */
148int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, 148int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
149 gfp_t gfp) 149 __u8 use_as_src, gfp_t gfp)
150{ 150{
151 struct sctp_sockaddr_entry *addr; 151 struct sctp_sockaddr_entry *addr;
152 152
@@ -163,6 +163,8 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
163 if (!addr->a.v4.sin_port) 163 if (!addr->a.v4.sin_port)
164 addr->a.v4.sin_port = bp->port; 164 addr->a.v4.sin_port = bp->port;
165 165
166 addr->use_as_src = use_as_src;
167
166 INIT_LIST_HEAD(&addr->list); 168 INIT_LIST_HEAD(&addr->list);
167 list_add_tail(&addr->list, &bp->address_list); 169 list_add_tail(&addr->list, &bp->address_list);
168 SCTP_DBG_OBJCNT_INC(addr); 170 SCTP_DBG_OBJCNT_INC(addr);
@@ -274,7 +276,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
274 } 276 }
275 277
276 af->from_addr_param(&addr, rawaddr, port, 0); 278 af->from_addr_param(&addr, rawaddr, port, 0);
277 retval = sctp_add_bind_addr(bp, &addr, gfp); 279 retval = sctp_add_bind_addr(bp, &addr, 1, gfp);
278 if (retval) { 280 if (retval) {
279 /* Can't finish building the list, clean up. */ 281 /* Can't finish building the list, clean up. */
280 sctp_bind_addr_clean(bp); 282 sctp_bind_addr_clean(bp);
@@ -367,7 +369,7 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
367 (((AF_INET6 == addr->sa.sa_family) && 369 (((AF_INET6 == addr->sa.sa_family) &&
368 (flags & SCTP_ADDR6_ALLOWED) && 370 (flags & SCTP_ADDR6_ALLOWED) &&
369 (flags & SCTP_ADDR6_PEERSUPP)))) 371 (flags & SCTP_ADDR6_PEERSUPP))))
370 error = sctp_add_bind_addr(dest, addr, gfp); 372 error = sctp_add_bind_addr(dest, addr, 1, gfp);
371 } 373 }
372 374
373 return error; 375 return error;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 67bd53070ee0..ffda1d680529 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -158,6 +158,12 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
158void sctp_endpoint_free(struct sctp_endpoint *ep) 158void sctp_endpoint_free(struct sctp_endpoint *ep)
159{ 159{
160 ep->base.dead = 1; 160 ep->base.dead = 1;
161
162 ep->base.sk->sk_state = SCTP_SS_CLOSED;
163
164 /* Unlink this endpoint, so we can't find it again! */
165 sctp_unhash_endpoint(ep);
166
161 sctp_endpoint_put(ep); 167 sctp_endpoint_put(ep);
162} 168}
163 169
@@ -166,11 +172,6 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
166{ 172{
167 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
168 174
169 ep->base.sk->sk_state = SCTP_SS_CLOSED;
170
171 /* Unlink this endpoint, so we can't find it again! */
172 sctp_unhash_endpoint(ep);
173
174 /* Free up the HMAC transform. */ 175 /* Free up the HMAC transform. */
175 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); 176 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
176 177
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 8ef08070c8b6..99c0cefc04e0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -290,7 +290,8 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
290 sctp_read_lock(addr_lock); 290 sctp_read_lock(addr_lock);
291 list_for_each(pos, &bp->address_list) { 291 list_for_each(pos, &bp->address_list) {
292 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 292 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
293 if ((laddr->a.sa.sa_family == AF_INET6) && 293 if ((laddr->use_as_src) &&
294 (laddr->a.sa.sa_family == AF_INET6) &&
294 (scope <= sctp_scope(&laddr->a))) { 295 (scope <= sctp_scope(&laddr->a))) {
295 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 296 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
296 if (!baddr || (matchlen < bmatchlen)) { 297 if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e5faa351aaad..30b710c54e64 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -691,7 +691,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
691 691
692 if (!new_transport) { 692 if (!new_transport) {
693 new_transport = asoc->peer.active_path; 693 new_transport = asoc->peer.active_path;
694 } else if (new_transport->state == SCTP_INACTIVE) { 694 } else if ((new_transport->state == SCTP_INACTIVE) ||
695 (new_transport->state == SCTP_UNCONFIRMED)) {
695 /* If the chunk is Heartbeat or Heartbeat Ack, 696 /* If the chunk is Heartbeat or Heartbeat Ack,
696 * send it to chunk->transport, even if it's 697 * send it to chunk->transport, even if it's
697 * inactive. 698 * inactive.
@@ -848,7 +849,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
848 */ 849 */
849 new_transport = chunk->transport; 850 new_transport = chunk->transport;
850 if (!new_transport || 851 if (!new_transport ||
851 new_transport->state == SCTP_INACTIVE) 852 ((new_transport->state == SCTP_INACTIVE) ||
853 (new_transport->state == SCTP_UNCONFIRMED)))
852 new_transport = asoc->peer.active_path; 854 new_transport = asoc->peer.active_path;
853 855
854 /* Change packets if necessary. */ 856 /* Change packets if necessary. */
@@ -1464,7 +1466,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1464 /* Mark the destination transport address as 1466 /* Mark the destination transport address as
1465 * active if it is not so marked. 1467 * active if it is not so marked.
1466 */ 1468 */
1467 if (transport->state == SCTP_INACTIVE) { 1469 if ((transport->state == SCTP_INACTIVE) ||
1470 (transport->state == SCTP_UNCONFIRMED)) {
1468 sctp_assoc_control_transport( 1471 sctp_assoc_control_transport(
1469 transport->asoc, 1472 transport->asoc,
1470 transport, 1473 transport,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 816c033d7886..1ab03a27a76e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -240,7 +240,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
240 (((AF_INET6 == addr->a.sa.sa_family) && 240 (((AF_INET6 == addr->a.sa.sa_family) &&
241 (copy_flags & SCTP_ADDR6_ALLOWED) && 241 (copy_flags & SCTP_ADDR6_ALLOWED) &&
242 (copy_flags & SCTP_ADDR6_PEERSUPP)))) { 242 (copy_flags & SCTP_ADDR6_PEERSUPP)))) {
243 error = sctp_add_bind_addr(bp, &addr->a, 243 error = sctp_add_bind_addr(bp, &addr->a, 1,
244 GFP_ATOMIC); 244 GFP_ATOMIC);
245 if (error) 245 if (error)
246 goto end_copy; 246 goto end_copy;
@@ -486,6 +486,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
486 list_for_each(pos, &bp->address_list) { 486 list_for_each(pos, &bp->address_list) {
487 laddr = list_entry(pos, struct sctp_sockaddr_entry, 487 laddr = list_entry(pos, struct sctp_sockaddr_entry,
488 list); 488 list);
489 if (!laddr->use_as_src)
490 continue;
489 sctp_v4_dst_saddr(&dst_saddr, dst, bp->port); 491 sctp_v4_dst_saddr(&dst_saddr, dst, bp->port);
490 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 492 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
491 goto out_unlock; 493 goto out_unlock;
@@ -506,7 +508,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
506 list_for_each(pos, &bp->address_list) { 508 list_for_each(pos, &bp->address_list) {
507 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 509 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
508 510
509 if (AF_INET == laddr->a.sa.sa_family) { 511 if ((laddr->use_as_src) &&
512 (AF_INET == laddr->a.sa.sa_family)) {
510 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 513 fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
511 if (!ip_route_output_key(&rt, &fl)) { 514 if (!ip_route_output_key(&rt, &fl)) {
512 dst = &rt->u.dst; 515 dst = &rt->u.dst;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2a8773691695..17b509282cf2 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -806,38 +806,26 @@ no_mem:
806 806
807/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ 807/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
808struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, 808struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
809 const struct sctp_chunk *chunk, 809 const struct msghdr *msg,
810 const struct msghdr *msg) 810 size_t paylen)
811{ 811{
812 struct sctp_chunk *retval; 812 struct sctp_chunk *retval;
813 void *payload = NULL, *payoff; 813 void *payload = NULL;
814 size_t paylen = 0; 814 int err;
815 struct iovec *iov = NULL;
816 int iovlen = 0;
817
818 if (msg) {
819 iov = msg->msg_iov;
820 iovlen = msg->msg_iovlen;
821 paylen = get_user_iov_size(iov, iovlen);
822 }
823 815
824 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen); 816 retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen);
825 if (!retval) 817 if (!retval)
826 goto err_chunk; 818 goto err_chunk;
827 819
828 if (paylen) { 820 if (paylen) {
829 /* Put the msg_iov together into payload. */ 821 /* Put the msg_iov together into payload. */
830 payload = kmalloc(paylen, GFP_ATOMIC); 822 payload = kmalloc(paylen, GFP_KERNEL);
831 if (!payload) 823 if (!payload)
832 goto err_payload; 824 goto err_payload;
833 payoff = payload;
834 825
835 for (; iovlen > 0; --iovlen) { 826 err = memcpy_fromiovec(payload, msg->msg_iov, paylen);
836 if (copy_from_user(payoff, iov->iov_base,iov->iov_len)) 827 if (err < 0)
837 goto err_copy; 828 goto err_copy;
838 payoff += iov->iov_len;
839 iov++;
840 }
841 } 829 }
842 830
843 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); 831 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
@@ -1493,7 +1481,7 @@ no_hmac:
1493 1481
1494 /* Also, add the destination address. */ 1482 /* Also, add the destination address. */
1495 if (list_empty(&retval->base.bind_addr.address_list)) { 1483 if (list_empty(&retval->base.bind_addr.address_list)) {
1496 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1484 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
1497 GFP_ATOMIC); 1485 GFP_ATOMIC);
1498 } 1486 }
1499 1487
@@ -2017,7 +2005,7 @@ static int sctp_process_param(struct sctp_association *asoc,
2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0); 2005 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);
2018 scope = sctp_scope(peer_addr); 2006 scope = sctp_scope(peer_addr);
2019 if (sctp_in_scope(&addr, scope)) 2007 if (sctp_in_scope(&addr, scope))
2020 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE)) 2008 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
2021 return 0; 2009 return 0;
2022 break; 2010 break;
2023 2011
@@ -2418,7 +2406,7 @@ static __u16 sctp_process_asconf_param(struct sctp_association *asoc,
2418 * Due to Resource Shortage'. 2406 * Due to Resource Shortage'.
2419 */ 2407 */
2420 2408
2421 peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE); 2409 peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
2422 if (!peer) 2410 if (!peer)
2423 return SCTP_ERROR_RSRC_LOW; 2411 return SCTP_ERROR_RSRC_LOW;
2424 2412
@@ -2565,6 +2553,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2565 union sctp_addr_param *addr_param; 2553 union sctp_addr_param *addr_param;
2566 struct list_head *pos; 2554 struct list_head *pos;
2567 struct sctp_transport *transport; 2555 struct sctp_transport *transport;
2556 struct sctp_sockaddr_entry *saddr;
2568 int retval = 0; 2557 int retval = 0;
2569 2558
2570 addr_param = (union sctp_addr_param *) 2559 addr_param = (union sctp_addr_param *)
@@ -2578,7 +2567,11 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2578 case SCTP_PARAM_ADD_IP: 2567 case SCTP_PARAM_ADD_IP:
2579 sctp_local_bh_disable(); 2568 sctp_local_bh_disable();
2580 sctp_write_lock(&asoc->base.addr_lock); 2569 sctp_write_lock(&asoc->base.addr_lock);
2581 retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC); 2570 list_for_each(pos, &bp->address_list) {
2571 saddr = list_entry(pos, struct sctp_sockaddr_entry, list);
2572 if (sctp_cmp_addr_exact(&saddr->a, &addr))
2573 saddr->use_as_src = 1;
2574 }
2582 sctp_write_unlock(&asoc->base.addr_lock); 2575 sctp_write_unlock(&asoc->base.addr_lock);
2583 sctp_local_bh_enable(); 2576 sctp_local_bh_enable();
2584 break; 2577 break;
@@ -2591,6 +2584,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2591 list_for_each(pos, &asoc->peer.transport_addr_list) { 2584 list_for_each(pos, &asoc->peer.transport_addr_list) {
2592 transport = list_entry(pos, struct sctp_transport, 2585 transport = list_entry(pos, struct sctp_transport,
2593 transports); 2586 transports);
2587 dst_release(transport->dst);
2594 sctp_transport_route(transport, NULL, 2588 sctp_transport_route(transport, NULL,
2595 sctp_sk(asoc->base.sk)); 2589 sctp_sk(asoc->base.sk));
2596 } 2590 }
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c5beb2ad7ef7..9c10bdec1afe 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -430,7 +430,11 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
430 /* The check for association's overall error counter exceeding the 430 /* The check for association's overall error counter exceeding the
431 * threshold is done in the state function. 431 * threshold is done in the state function.
432 */ 432 */
433 asoc->overall_error_count++; 433 /* When probing UNCONFIRMED addresses, the association overall
434 * error count is NOT incremented
435 */
436 if (transport->state != SCTP_UNCONFIRMED)
437 asoc->overall_error_count++;
434 438
435 if (transport->state != SCTP_INACTIVE && 439 if (transport->state != SCTP_INACTIVE &&
436 (transport->error_count++ >= transport->pathmaxrxt)) { 440 (transport->error_count++ >= transport->pathmaxrxt)) {
@@ -610,7 +614,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
610 /* Mark the destination transport address as active if it is not so 614 /* Mark the destination transport address as active if it is not so
611 * marked. 615 * marked.
612 */ 616 */
613 if (t->state == SCTP_INACTIVE) 617 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
614 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 618 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
615 SCTP_HEARTBEAT_SUCCESS); 619 SCTP_HEARTBEAT_SUCCESS);
616 620
@@ -620,6 +624,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
620 */ 624 */
621 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 625 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
622 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 626 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
627
628 /* Update the heartbeat timer. */
629 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
630 sctp_transport_hold(t);
623} 631}
624 632
625/* Helper function to do a transport reset at the expiry of the hearbeat 633/* Helper function to do a transport reset at the expiry of the hearbeat
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9e58144f4851..5b5ae7958322 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -846,6 +846,7 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); 846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
847 hbinfo.daddr = transport->ipaddr; 847 hbinfo.daddr = transport->ipaddr;
848 hbinfo.sent_at = jiffies; 848 hbinfo.sent_at = jiffies;
849 hbinfo.hb_nonce = transport->hb_nonce;
849 850
850 /* Send a heartbeat to our peer. */ 851 /* Send a heartbeat to our peer. */
851 paylen = sizeof(sctp_sender_hb_info_t); 852 paylen = sizeof(sctp_sender_hb_info_t);
@@ -1048,6 +1049,10 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1048 return SCTP_DISPOSITION_DISCARD; 1049 return SCTP_DISPOSITION_DISCARD;
1049 } 1050 }
1050 1051
1052 /* Validate the 64-bit random nonce. */
1053 if (hbinfo->hb_nonce != link->hb_nonce)
1054 return SCTP_DISPOSITION_DISCARD;
1055
1051 max_interval = link->hbinterval + link->rto; 1056 max_interval = link->hbinterval + link->rto;
1052 1057
1053 /* Check if the timestamp looks valid. */ 1058 /* Check if the timestamp looks valid. */
@@ -4026,18 +4031,12 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
4026 * from its upper layer, but retransmits data to the far end 4031 * from its upper layer, but retransmits data to the far end
4027 * if necessary to fill gaps. 4032 * if necessary to fill gaps.
4028 */ 4033 */
4029 struct msghdr *msg = arg; 4034 struct sctp_chunk *abort = arg;
4030 struct sctp_chunk *abort;
4031 sctp_disposition_t retval; 4035 sctp_disposition_t retval;
4032 4036
4033 retval = SCTP_DISPOSITION_CONSUME; 4037 retval = SCTP_DISPOSITION_CONSUME;
4034 4038
4035 /* Generate ABORT chunk to send the peer. */ 4039 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4036 abort = sctp_make_abort_user(asoc, NULL, msg);
4037 if (!abort)
4038 retval = SCTP_DISPOSITION_NOMEM;
4039 else
4040 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4041 4040
4042 /* Even if we can't send the ABORT due to low memory delete the 4041 /* Even if we can't send the ABORT due to low memory delete the
4043 * TCB. This is a departure from our typical NOMEM handling. 4042 * TCB. This is a departure from our typical NOMEM handling.
@@ -4161,8 +4160,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4161 void *arg, 4160 void *arg,
4162 sctp_cmd_seq_t *commands) 4161 sctp_cmd_seq_t *commands)
4163{ 4162{
4164 struct msghdr *msg = arg; 4163 struct sctp_chunk *abort = arg;
4165 struct sctp_chunk *abort;
4166 sctp_disposition_t retval; 4164 sctp_disposition_t retval;
4167 4165
4168 /* Stop T1-init timer */ 4166 /* Stop T1-init timer */
@@ -4170,12 +4168,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4170 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 4168 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
4171 retval = SCTP_DISPOSITION_CONSUME; 4169 retval = SCTP_DISPOSITION_CONSUME;
4172 4170
4173 /* Generate ABORT chunk to send the peer */ 4171 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4174 abort = sctp_make_abort_user(asoc, NULL, msg);
4175 if (!abort)
4176 retval = SCTP_DISPOSITION_NOMEM;
4177 else
4178 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4179 4172
4180 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 4173 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
4181 SCTP_STATE(SCTP_STATE_CLOSED)); 4174 SCTP_STATE(SCTP_STATE_CLOSED));
@@ -5278,7 +5271,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5278 datalen -= sizeof(sctp_data_chunk_t); 5271 datalen -= sizeof(sctp_data_chunk_t);
5279 5272
5280 deliver = SCTP_CMD_CHUNK_ULP; 5273 deliver = SCTP_CMD_CHUNK_ULP;
5281 chunk->data_accepted = 1;
5282 5274
5283 /* Think about partial delivery. */ 5275 /* Think about partial delivery. */
5284 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { 5276 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
@@ -5357,6 +5349,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5357 if (SCTP_CMD_CHUNK_ULP == deliver) 5349 if (SCTP_CMD_CHUNK_ULP == deliver)
5358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 5350 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
5359 5351
5352 chunk->data_accepted = 1;
5353
5360 /* Note: Some chunks may get overcounted (if we drop) or overcounted 5354 /* Note: Some chunks may get overcounted (if we drop) or overcounted
5361 * if we renege and the chunk arrives again. 5355 * if we renege and the chunk arrives again.
5362 */ 5356 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0a2c71d0d8aa..dab15949958e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -369,7 +369,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
369 369
370 /* Use GFP_ATOMIC since BHs are disabled. */ 370 /* Use GFP_ATOMIC since BHs are disabled. */
371 addr->v4.sin_port = ntohs(addr->v4.sin_port); 371 addr->v4.sin_port = ntohs(addr->v4.sin_port);
372 ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC); 372 ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
373 addr->v4.sin_port = htons(addr->v4.sin_port); 373 addr->v4.sin_port = htons(addr->v4.sin_port);
374 sctp_write_unlock(&ep->base.addr_lock); 374 sctp_write_unlock(&ep->base.addr_lock);
375 sctp_local_bh_enable(); 375 sctp_local_bh_enable();
@@ -491,6 +491,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
491 struct sctp_chunk *chunk; 491 struct sctp_chunk *chunk;
492 struct sctp_sockaddr_entry *laddr; 492 struct sctp_sockaddr_entry *laddr;
493 union sctp_addr *addr; 493 union sctp_addr *addr;
494 union sctp_addr saveaddr;
494 void *addr_buf; 495 void *addr_buf;
495 struct sctp_af *af; 496 struct sctp_af *af;
496 struct list_head *pos; 497 struct list_head *pos;
@@ -558,14 +559,26 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
558 } 559 }
559 560
560 retval = sctp_send_asconf(asoc, chunk); 561 retval = sctp_send_asconf(asoc, chunk);
562 if (retval)
563 goto out;
561 564
562 /* FIXME: After sending the add address ASCONF chunk, we 565 /* Add the new addresses to the bind address list with
563 * cannot append the address to the association's binding 566 * use_as_src set to 0.
564 * address list, because the new address may be used as the
565 * source of a message sent to the peer before the ASCONF
566 * chunk is received by the peer. So we should wait until
567 * ASCONF_ACK is received.
568 */ 567 */
568 sctp_local_bh_disable();
569 sctp_write_lock(&asoc->base.addr_lock);
570 addr_buf = addrs;
571 for (i = 0; i < addrcnt; i++) {
572 addr = (union sctp_addr *)addr_buf;
573 af = sctp_get_af_specific(addr->v4.sin_family);
574 memcpy(&saveaddr, addr, af->sockaddr_len);
575 saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
576 retval = sctp_add_bind_addr(bp, &saveaddr, 0,
577 GFP_ATOMIC);
578 addr_buf += af->sockaddr_len;
579 }
580 sctp_write_unlock(&asoc->base.addr_lock);
581 sctp_local_bh_enable();
569 } 582 }
570 583
571out: 584out:
@@ -676,12 +689,15 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
676 struct sctp_sock *sp; 689 struct sctp_sock *sp;
677 struct sctp_endpoint *ep; 690 struct sctp_endpoint *ep;
678 struct sctp_association *asoc; 691 struct sctp_association *asoc;
692 struct sctp_transport *transport;
679 struct sctp_bind_addr *bp; 693 struct sctp_bind_addr *bp;
680 struct sctp_chunk *chunk; 694 struct sctp_chunk *chunk;
681 union sctp_addr *laddr; 695 union sctp_addr *laddr;
696 union sctp_addr saveaddr;
682 void *addr_buf; 697 void *addr_buf;
683 struct sctp_af *af; 698 struct sctp_af *af;
684 struct list_head *pos; 699 struct list_head *pos, *pos1;
700 struct sctp_sockaddr_entry *saddr;
685 int i; 701 int i;
686 int retval = 0; 702 int retval = 0;
687 703
@@ -748,14 +764,42 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
748 goto out; 764 goto out;
749 } 765 }
750 766
751 retval = sctp_send_asconf(asoc, chunk); 767 /* Reset use_as_src flag for the addresses in the bind address
768 * list that are to be deleted.
769 */
770 sctp_local_bh_disable();
771 sctp_write_lock(&asoc->base.addr_lock);
772 addr_buf = addrs;
773 for (i = 0; i < addrcnt; i++) {
774 laddr = (union sctp_addr *)addr_buf;
775 af = sctp_get_af_specific(laddr->v4.sin_family);
776 memcpy(&saveaddr, laddr, af->sockaddr_len);
777 saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
778 list_for_each(pos1, &bp->address_list) {
779 saddr = list_entry(pos1,
780 struct sctp_sockaddr_entry,
781 list);
782 if (sctp_cmp_addr_exact(&saddr->a, &saveaddr))
783 saddr->use_as_src = 0;
784 }
785 addr_buf += af->sockaddr_len;
786 }
787 sctp_write_unlock(&asoc->base.addr_lock);
788 sctp_local_bh_enable();
752 789
753 /* FIXME: After sending the delete address ASCONF chunk, we 790 /* Update the route and saddr entries for all the transports
754 * cannot remove the addresses from the association's bind 791 * as some of the addresses in the bind address list are
755 * address list, because there maybe some packet send to 792 * about to be deleted and cannot be used as source addresses.
756 * the delete addresses, so we should wait until ASCONF_ACK
757 * packet is received.
758 */ 793 */
794 list_for_each(pos1, &asoc->peer.transport_addr_list) {
795 transport = list_entry(pos1, struct sctp_transport,
796 transports);
797 dst_release(transport->dst);
798 sctp_transport_route(transport, NULL,
799 sctp_sk(asoc->base.sk));
800 }
801
802 retval = sctp_send_asconf(asoc, chunk);
759 } 803 }
760out: 804out:
761 return retval; 805 return retval;
@@ -1245,9 +1289,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1245 } 1289 }
1246 } 1290 }
1247 1291
1248 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) 1292 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1249 sctp_primitive_ABORT(asoc, NULL); 1293 struct sctp_chunk *chunk;
1250 else 1294
1295 chunk = sctp_make_abort_user(asoc, NULL, 0);
1296 if (chunk)
1297 sctp_primitive_ABORT(asoc, chunk);
1298 } else
1251 sctp_primitive_SHUTDOWN(asoc, NULL); 1299 sctp_primitive_SHUTDOWN(asoc, NULL);
1252 } 1300 }
1253 1301
@@ -1476,8 +1524,16 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1476 goto out_unlock; 1524 goto out_unlock;
1477 } 1525 }
1478 if (sinfo_flags & SCTP_ABORT) { 1526 if (sinfo_flags & SCTP_ABORT) {
1527 struct sctp_chunk *chunk;
1528
1529 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1530 if (!chunk) {
1531 err = -ENOMEM;
1532 goto out_unlock;
1533 }
1534
1479 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); 1535 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
1480 sctp_primitive_ABORT(asoc, msg); 1536 sctp_primitive_ABORT(asoc, chunk);
1481 err = 0; 1537 err = 0;
1482 goto out_unlock; 1538 goto out_unlock;
1483 } 1539 }
@@ -4977,7 +5033,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
4977/* Caller must hold hashbucket lock for this tb with local BH disabled */ 5033/* Caller must hold hashbucket lock for this tb with local BH disabled */
4978static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 5034static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
4979{ 5035{
4980 if (hlist_empty(&pp->owner)) { 5036 if (pp && hlist_empty(&pp->owner)) {
4981 if (pp->next) 5037 if (pp->next)
4982 pp->next->pprev = pp->pprev; 5038 pp->next->pprev = pp->pprev;
4983 *(pp->pprev) = pp->next; 5039 *(pp->pprev) = pp->next;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 160f62ad1cc5..2763aa93de1a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -49,6 +49,7 @@
49 */ 49 */
50 50
51#include <linux/types.h> 51#include <linux/types.h>
52#include <linux/random.h>
52#include <net/sctp/sctp.h> 53#include <net/sctp/sctp.h>
53#include <net/sctp/sm.h> 54#include <net/sctp/sm.h>
54 55
@@ -85,7 +86,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
85 86
86 peer->init_sent_count = 0; 87 peer->init_sent_count = 0;
87 88
88 peer->state = SCTP_ACTIVE;
89 peer->param_flags = SPP_HB_DISABLE | 89 peer->param_flags = SPP_HB_DISABLE |
90 SPP_PMTUD_ENABLE | 90 SPP_PMTUD_ENABLE |
91 SPP_SACKDELAY_ENABLE; 91 SPP_SACKDELAY_ENABLE;
@@ -109,6 +109,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
109 peer->hb_timer.function = sctp_generate_heartbeat_event; 109 peer->hb_timer.function = sctp_generate_heartbeat_event;
110 peer->hb_timer.data = (unsigned long)peer; 110 peer->hb_timer.data = (unsigned long)peer;
111 111
112 /* Initialize the 64-bit random nonce sent with heartbeat. */
113 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
114
112 atomic_set(&peer->refcnt, 1); 115 atomic_set(&peer->refcnt, 1);
113 peer->dead = 0; 116 peer->dead = 0;
114 117
@@ -517,7 +520,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
517unsigned long sctp_transport_timeout(struct sctp_transport *t) 520unsigned long sctp_transport_timeout(struct sctp_transport *t)
518{ 521{
519 unsigned long timeout; 522 unsigned long timeout;
520 timeout = t->hbinterval + t->rto + sctp_jitter(t->rto); 523 timeout = t->rto + sctp_jitter(t->rto);
524 if (t->state != SCTP_UNCONFIRMED)
525 timeout += t->hbinterval;
521 timeout += jiffies; 526 timeout += jiffies;
522 return timeout; 527 return timeout;
523} 528}
diff --git a/net/socket.c b/net/socket.c
index b4848ce0d6ac..6d261bf206fc 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1178,7 +1178,8 @@ static int __sock_create(int family, int type, int protocol, struct socket **res
1178 */ 1178 */
1179 1179
1180 if (!(sock = sock_alloc())) { 1180 if (!(sock = sock_alloc())) {
1181 printk(KERN_WARNING "socket: no more sockets\n"); 1181 if (net_ratelimit())
1182 printk(KERN_WARNING "socket: no more sockets\n");
1182 err = -ENFILE; /* Not exactly a match, but its the 1183 err = -ENFILE; /* Not exactly a match, but its the
1183 closest posix thing */ 1184 closest posix thing */
1184 goto out; 1185 goto out;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 519ebc17c028..ef1cf5b476c8 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -225,9 +225,8 @@ gss_alloc_context(void)
225{ 225{
226 struct gss_cl_ctx *ctx; 226 struct gss_cl_ctx *ctx;
227 227
228 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 228 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
229 if (ctx != NULL) { 229 if (ctx != NULL) {
230 memset(ctx, 0, sizeof(*ctx));
231 ctx->gc_proc = RPC_GSS_PROC_DATA; 230 ctx->gc_proc = RPC_GSS_PROC_DATA;
232 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 231 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
233 spin_lock_init(&ctx->gc_seq_lock); 232 spin_lock_init(&ctx->gc_seq_lock);
@@ -391,9 +390,8 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
391{ 390{
392 struct gss_upcall_msg *gss_msg; 391 struct gss_upcall_msg *gss_msg;
393 392
394 gss_msg = kmalloc(sizeof(*gss_msg), GFP_KERNEL); 393 gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
395 if (gss_msg != NULL) { 394 if (gss_msg != NULL) {
396 memset(gss_msg, 0, sizeof(*gss_msg));
397 INIT_LIST_HEAD(&gss_msg->list); 395 INIT_LIST_HEAD(&gss_msg->list);
398 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 396 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
399 init_waitqueue_head(&gss_msg->waitqueue); 397 init_waitqueue_head(&gss_msg->waitqueue);
@@ -720,8 +718,7 @@ gss_destroy(struct rpc_auth *auth)
720 auth, auth->au_flavor); 718 auth, auth->au_flavor);
721 719
722 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 720 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
723 rpc_unlink(gss_auth->path); 721 rpc_unlink(gss_auth->dentry);
724 dput(gss_auth->dentry);
725 gss_auth->dentry = NULL; 722 gss_auth->dentry = NULL;
726 gss_mech_put(gss_auth->mech); 723 gss_mech_put(gss_auth->mech);
727 724
@@ -776,10 +773,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
776 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", 773 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
777 acred->uid, auth->au_flavor); 774 acred->uid, auth->au_flavor);
778 775
779 if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) 776 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
780 goto out_err; 777 goto out_err;
781 778
782 memset(cred, 0, sizeof(*cred));
783 atomic_set(&cred->gc_count, 1); 779 atomic_set(&cred->gc_count, 1);
784 cred->gc_uid = acred->uid; 780 cred->gc_uid = acred->uid;
785 /* 781 /*
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index b8714a87b34c..70e1e53a632b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -129,9 +129,8 @@ gss_import_sec_context_kerberos(const void *p,
129 const void *end = (const void *)((const char *)p + len); 129 const void *end = (const void *)((const char *)p + len);
130 struct krb5_ctx *ctx; 130 struct krb5_ctx *ctx;
131 131
132 if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 132 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
133 goto out_err; 133 goto out_err;
134 memset(ctx, 0, sizeof(*ctx));
135 134
136 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 135 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
137 if (IS_ERR(p)) 136 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index d88468d21c37..3db745379d06 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -237,9 +237,8 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
237 struct gss_api_mech *mech, 237 struct gss_api_mech *mech,
238 struct gss_ctx **ctx_id) 238 struct gss_ctx **ctx_id)
239{ 239{
240 if (!(*ctx_id = kmalloc(sizeof(**ctx_id), GFP_KERNEL))) 240 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
241 return GSS_S_FAILURE; 241 return GSS_S_FAILURE;
242 memset(*ctx_id, 0, sizeof(**ctx_id));
243 (*ctx_id)->mech_type = gss_mech_get(mech); 242 (*ctx_id)->mech_type = gss_mech_get(mech);
244 243
245 return mech->gm_ops 244 return mech->gm_ops
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 3d0432aa45c1..88dcb52d171b 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -152,9 +152,8 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
152 const void *end = (const void *)((const char *)p + len); 152 const void *end = (const void *)((const char *)p + len);
153 struct spkm3_ctx *ctx; 153 struct spkm3_ctx *ctx;
154 154
155 if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 155 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
156 goto out_err; 156 goto out_err;
157 memset(ctx, 0, sizeof(*ctx));
158 157
159 p = simple_get_netobj(p, end, &ctx->ctx_id); 158 p = simple_get_netobj(p, end, &ctx->ctx_id);
160 if (IS_ERR(p)) 159 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index af0d7ce74686..854a983ccf26 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -90,10 +90,9 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
90int 90int
91decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) 91decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
92{ 92{
93 if (!(out->data = kmalloc(explen,GFP_KERNEL))) 93 if (!(out->data = kzalloc(explen,GFP_KERNEL)))
94 return 0; 94 return 0;
95 out->len = explen; 95 out->len = explen;
96 memset(out->data, 0, explen);
97 memcpy(out->data, in, enclen); 96 memcpy(out->data, in, enclen);
98 return 1; 97 return 1;
99} 98}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 7026b0866b7b..00cb388ece03 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -71,7 +71,12 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
71 new = detail->alloc(); 71 new = detail->alloc();
72 if (!new) 72 if (!new)
73 return NULL; 73 return NULL;
74 /* must fully initialise 'new', else
75 * we might get lose if we need to
76 * cache_put it soon.
77 */
74 cache_init(new); 78 cache_init(new);
79 detail->init(new, key);
75 80
76 write_lock(&detail->hash_lock); 81 write_lock(&detail->hash_lock);
77 82
@@ -85,7 +90,6 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
85 return tmp; 90 return tmp;
86 } 91 }
87 } 92 }
88 detail->init(new, key);
89 new->next = *head; 93 new->next = *head;
90 *head = new; 94 *head = new;
91 detail->entries++; 95 detail->entries++;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index aa8965e9d307..3e19d321067a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -125,10 +125,9 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
125 goto out_err; 125 goto out_err;
126 126
127 err = -ENOMEM; 127 err = -ENOMEM;
128 clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 128 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
129 if (!clnt) 129 if (!clnt)
130 goto out_err; 130 goto out_err;
131 memset(clnt, 0, sizeof(*clnt));
132 atomic_set(&clnt->cl_users, 0); 131 atomic_set(&clnt->cl_users, 0);
133 atomic_set(&clnt->cl_count, 1); 132 atomic_set(&clnt->cl_count, 1);
134 clnt->cl_parent = clnt; 133 clnt->cl_parent = clnt;
@@ -184,8 +183,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
184 183
185out_no_auth: 184out_no_auth:
186 if (!IS_ERR(clnt->cl_dentry)) { 185 if (!IS_ERR(clnt->cl_dentry)) {
187 rpc_rmdir(clnt->cl_pathname); 186 rpc_rmdir(clnt->cl_dentry);
188 dput(clnt->cl_dentry);
189 rpc_put_mount(); 187 rpc_put_mount();
190 } 188 }
191out_no_path: 189out_no_path:
@@ -252,10 +250,8 @@ rpc_clone_client(struct rpc_clnt *clnt)
252 new->cl_autobind = 0; 250 new->cl_autobind = 0;
253 new->cl_oneshot = 0; 251 new->cl_oneshot = 0;
254 new->cl_dead = 0; 252 new->cl_dead = 0;
255 if (!IS_ERR(new->cl_dentry)) { 253 if (!IS_ERR(new->cl_dentry))
256 dget(new->cl_dentry); 254 dget(new->cl_dentry);
257 rpc_get_mount();
258 }
259 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 255 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
260 if (new->cl_auth) 256 if (new->cl_auth)
261 atomic_inc(&new->cl_auth->au_count); 257 atomic_inc(&new->cl_auth->au_count);
@@ -318,11 +314,15 @@ rpc_destroy_client(struct rpc_clnt *clnt)
318 clnt->cl_auth = NULL; 314 clnt->cl_auth = NULL;
319 } 315 }
320 if (clnt->cl_parent != clnt) { 316 if (clnt->cl_parent != clnt) {
317 if (!IS_ERR(clnt->cl_dentry))
318 dput(clnt->cl_dentry);
321 rpc_destroy_client(clnt->cl_parent); 319 rpc_destroy_client(clnt->cl_parent);
322 goto out_free; 320 goto out_free;
323 } 321 }
324 if (clnt->cl_pathname[0]) 322 if (!IS_ERR(clnt->cl_dentry)) {
325 rpc_rmdir(clnt->cl_pathname); 323 rpc_rmdir(clnt->cl_dentry);
324 rpc_put_mount();
325 }
326 if (clnt->cl_xprt) { 326 if (clnt->cl_xprt) {
327 xprt_destroy(clnt->cl_xprt); 327 xprt_destroy(clnt->cl_xprt);
328 clnt->cl_xprt = NULL; 328 clnt->cl_xprt = NULL;
@@ -332,10 +332,6 @@ rpc_destroy_client(struct rpc_clnt *clnt)
332out_free: 332out_free:
333 rpc_free_iostats(clnt->cl_metrics); 333 rpc_free_iostats(clnt->cl_metrics);
334 clnt->cl_metrics = NULL; 334 clnt->cl_metrics = NULL;
335 if (!IS_ERR(clnt->cl_dentry)) {
336 dput(clnt->cl_dentry);
337 rpc_put_mount();
338 }
339 kfree(clnt); 335 kfree(clnt);
340 return 0; 336 return 0;
341} 337}
@@ -922,26 +918,43 @@ call_transmit(struct rpc_task *task)
922 task->tk_status = xprt_prepare_transmit(task); 918 task->tk_status = xprt_prepare_transmit(task);
923 if (task->tk_status != 0) 919 if (task->tk_status != 0)
924 return; 920 return;
921 task->tk_action = call_transmit_status;
925 /* Encode here so that rpcsec_gss can use correct sequence number. */ 922 /* Encode here so that rpcsec_gss can use correct sequence number. */
926 if (rpc_task_need_encode(task)) { 923 if (rpc_task_need_encode(task)) {
927 task->tk_rqstp->rq_bytes_sent = 0; 924 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
928 call_encode(task); 925 call_encode(task);
929 /* Did the encode result in an error condition? */ 926 /* Did the encode result in an error condition? */
930 if (task->tk_status != 0) 927 if (task->tk_status != 0)
931 goto out_nosend; 928 return;
932 } 929 }
933 task->tk_action = call_transmit_status;
934 xprt_transmit(task); 930 xprt_transmit(task);
935 if (task->tk_status < 0) 931 if (task->tk_status < 0)
936 return; 932 return;
937 if (!task->tk_msg.rpc_proc->p_decode) { 933 /*
938 task->tk_action = rpc_exit_task; 934 * On success, ensure that we call xprt_end_transmit() before sleeping
939 rpc_wake_up_task(task); 935 * in order to allow access to the socket to other RPC requests.
940 } 936 */
941 return; 937 call_transmit_status(task);
942out_nosend: 938 if (task->tk_msg.rpc_proc->p_decode != NULL)
943 /* release socket write lock before attempting to handle error */ 939 return;
944 xprt_abort_transmit(task); 940 task->tk_action = rpc_exit_task;
941 rpc_wake_up_task(task);
942}
943
944/*
945 * 5a. Handle cleanup after a transmission
946 */
947static void
948call_transmit_status(struct rpc_task *task)
949{
950 task->tk_action = call_status;
951 /*
952 * Special case: if we've been waiting on the socket's write_space()
953 * callback, then don't call xprt_end_transmit().
954 */
955 if (task->tk_status == -EAGAIN)
956 return;
957 xprt_end_transmit(task);
945 rpc_task_force_reencode(task); 958 rpc_task_force_reencode(task);
946} 959}
947 960
@@ -993,18 +1006,7 @@ call_status(struct rpc_task *task)
993} 1006}
994 1007
995/* 1008/*
996 * 6a. Handle transmission errors. 1009 * 6a. Handle RPC timeout
997 */
998static void
999call_transmit_status(struct rpc_task *task)
1000{
1001 if (task->tk_status != -EAGAIN)
1002 rpc_task_force_reencode(task);
1003 call_status(task);
1004}
1005
1006/*
1007 * 6b. Handle RPC timeout
1008 * We do not release the request slot, so we keep using the 1010 * We do not release the request slot, so we keep using the
1009 * same XID for all retransmits. 1011 * same XID for all retransmits.
1010 */ 1012 */
@@ -1179,6 +1181,17 @@ call_verify(struct rpc_task *task)
1179 u32 *p = iov->iov_base, n; 1181 u32 *p = iov->iov_base, n;
1180 int error = -EACCES; 1182 int error = -EACCES;
1181 1183
1184 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1185 /* RFC-1014 says that the representation of XDR data must be a
1186 * multiple of four bytes
1187 * - if it isn't pointer subtraction in the NFS client may give
1188 * undefined results
1189 */
1190 printk(KERN_WARNING
1191 "call_verify: XDR representation not a multiple of"
1192 " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len);
1193 goto out_eio;
1194 }
1182 if ((len -= 3) < 0) 1195 if ((len -= 3) < 0)
1183 goto out_overflow; 1196 goto out_overflow;
1184 p += 1; /* skip XID */ 1197 p += 1; /* skip XID */
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index dc6cb93c8830..0b1a1ac8a4bc 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -539,6 +539,7 @@ repeat:
539 rpc_close_pipes(dentry->d_inode); 539 rpc_close_pipes(dentry->d_inode);
540 simple_unlink(dir, dentry); 540 simple_unlink(dir, dentry);
541 } 541 }
542 inode_dir_notify(dir, DN_DELETE);
542 dput(dentry); 543 dput(dentry);
543 } while (n); 544 } while (n);
544 goto repeat; 545 goto repeat;
@@ -610,8 +611,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
610 int error; 611 int error;
611 612
612 shrink_dcache_parent(dentry); 613 shrink_dcache_parent(dentry);
613 if (dentry->d_inode) 614 if (d_unhashed(dentry))
614 rpc_close_pipes(dentry->d_inode); 615 return 0;
615 if ((error = simple_rmdir(dir, dentry)) != 0) 616 if ((error = simple_rmdir(dir, dentry)) != 0)
616 return error; 617 return error;
617 if (!error) { 618 if (!error) {
@@ -667,10 +668,11 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
667 RPCAUTH_info, RPCAUTH_EOF); 668 RPCAUTH_info, RPCAUTH_EOF);
668 if (error) 669 if (error)
669 goto err_depopulate; 670 goto err_depopulate;
671 dget(dentry);
670out: 672out:
671 mutex_unlock(&dir->i_mutex); 673 mutex_unlock(&dir->i_mutex);
672 rpc_release_path(&nd); 674 rpc_release_path(&nd);
673 return dget(dentry); 675 return dentry;
674err_depopulate: 676err_depopulate:
675 rpc_depopulate(dentry); 677 rpc_depopulate(dentry);
676 __rpc_rmdir(dir, dentry); 678 __rpc_rmdir(dir, dentry);
@@ -683,28 +685,20 @@ err_dput:
683} 685}
684 686
685int 687int
686rpc_rmdir(char *path) 688rpc_rmdir(struct dentry *dentry)
687{ 689{
688 struct nameidata nd; 690 struct dentry *parent;
689 struct dentry *dentry;
690 struct inode *dir; 691 struct inode *dir;
691 int error; 692 int error;
692 693
693 if ((error = rpc_lookup_parent(path, &nd)) != 0) 694 parent = dget_parent(dentry);
694 return error; 695 dir = parent->d_inode;
695 dir = nd.dentry->d_inode;
696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
697 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
698 if (IS_ERR(dentry)) {
699 error = PTR_ERR(dentry);
700 goto out_release;
701 }
702 rpc_depopulate(dentry); 697 rpc_depopulate(dentry);
703 error = __rpc_rmdir(dir, dentry); 698 error = __rpc_rmdir(dir, dentry);
704 dput(dentry); 699 dput(dentry);
705out_release:
706 mutex_unlock(&dir->i_mutex); 700 mutex_unlock(&dir->i_mutex);
707 rpc_release_path(&nd); 701 dput(parent);
708 return error; 702 return error;
709} 703}
710 704
@@ -731,10 +725,11 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
731 rpci->flags = flags; 725 rpci->flags = flags;
732 rpci->ops = ops; 726 rpci->ops = ops;
733 inode_dir_notify(dir, DN_CREATE); 727 inode_dir_notify(dir, DN_CREATE);
728 dget(dentry);
734out: 729out:
735 mutex_unlock(&dir->i_mutex); 730 mutex_unlock(&dir->i_mutex);
736 rpc_release_path(&nd); 731 rpc_release_path(&nd);
737 return dget(dentry); 732 return dentry;
738err_dput: 733err_dput:
739 dput(dentry); 734 dput(dentry);
740 dentry = ERR_PTR(-ENOMEM); 735 dentry = ERR_PTR(-ENOMEM);
@@ -744,32 +739,26 @@ err_dput:
744} 739}
745 740
746int 741int
747rpc_unlink(char *path) 742rpc_unlink(struct dentry *dentry)
748{ 743{
749 struct nameidata nd; 744 struct dentry *parent;
750 struct dentry *dentry;
751 struct inode *dir; 745 struct inode *dir;
752 int error; 746 int error = 0;
753 747
754 if ((error = rpc_lookup_parent(path, &nd)) != 0) 748 parent = dget_parent(dentry);
755 return error; 749 dir = parent->d_inode;
756 dir = nd.dentry->d_inode;
757 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 750 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
758 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 751 if (!d_unhashed(dentry)) {
759 if (IS_ERR(dentry)) { 752 d_drop(dentry);
760 error = PTR_ERR(dentry); 753 if (dentry->d_inode) {
761 goto out_release; 754 rpc_close_pipes(dentry->d_inode);
762 } 755 error = simple_unlink(dir, dentry);
763 d_drop(dentry); 756 }
764 if (dentry->d_inode) { 757 inode_dir_notify(dir, DN_DELETE);
765 rpc_close_pipes(dentry->d_inode);
766 error = simple_unlink(dir, dentry);
767 } 758 }
768 dput(dentry); 759 dput(dentry);
769 inode_dir_notify(dir, DN_DELETE);
770out_release:
771 mutex_unlock(&dir->i_mutex); 760 mutex_unlock(&dir->i_mutex);
772 rpc_release_path(&nd); 761 dput(parent);
773 return error; 762 return error;
774} 763}
775 764
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 15c2db26767b..bd98124c3a64 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -114,13 +114,8 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
114 */ 114 */
115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
116{ 116{
117 unsigned int ops = clnt->cl_maxproc;
118 size_t size = ops * sizeof(struct rpc_iostats);
119 struct rpc_iostats *new; 117 struct rpc_iostats *new;
120 118 new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
121 new = kmalloc(size, GFP_KERNEL);
122 if (new)
123 memset(new, 0 , size);
124 return new; 119 return new;
125} 120}
126EXPORT_SYMBOL(rpc_alloc_iostats); 121EXPORT_SYMBOL(rpc_alloc_iostats);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 01ba60a49572..b76a227dd3ad 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -32,9 +32,8 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
32 int vers; 32 int vers;
33 unsigned int xdrsize; 33 unsigned int xdrsize;
34 34
35 if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL))) 35 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
36 return NULL; 36 return NULL;
37 memset(serv, 0, sizeof(*serv));
38 serv->sv_name = prog->pg_name; 37 serv->sv_name = prog->pg_name;
39 serv->sv_program = prog; 38 serv->sv_program = prog;
40 serv->sv_nrthreads = 1; 39 serv->sv_nrthreads = 1;
@@ -159,11 +158,10 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
159 struct svc_rqst *rqstp; 158 struct svc_rqst *rqstp;
160 int error = -ENOMEM; 159 int error = -ENOMEM;
161 160
162 rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL); 161 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
163 if (!rqstp) 162 if (!rqstp)
164 goto out; 163 goto out;
165 164
166 memset(rqstp, 0, sizeof(*rqstp));
167 init_waitqueue_head(&rqstp->rq_wait); 165 init_waitqueue_head(&rqstp->rq_wait);
168 166
169 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 167 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a27905a0ad27..d9a95732df46 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1322,11 +1322,10 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1322 struct sock *inet; 1322 struct sock *inet;
1323 1323
1324 dprintk("svc: svc_setup_socket %p\n", sock); 1324 dprintk("svc: svc_setup_socket %p\n", sock);
1325 if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) { 1325 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1326 *errp = -ENOMEM; 1326 *errp = -ENOMEM;
1327 return NULL; 1327 return NULL;
1328 } 1328 }
1329 memset(svsk, 0, sizeof(*svsk));
1330 1329
1331 inet = sock->sk; 1330 inet = sock->sk;
1332 1331
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 49174f0d0a3e..6ac45103a272 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -191,7 +191,6 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
191 do { 191 do {
192 /* Are any pointers crossing a page boundary? */ 192 /* Are any pointers crossing a page boundary? */
193 if (pgto_base == 0) { 193 if (pgto_base == 0) {
194 flush_dcache_page(*pgto);
195 pgto_base = PAGE_CACHE_SIZE; 194 pgto_base = PAGE_CACHE_SIZE;
196 pgto--; 195 pgto--;
197 } 196 }
@@ -211,11 +210,11 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
211 vto = kmap_atomic(*pgto, KM_USER0); 210 vto = kmap_atomic(*pgto, KM_USER0);
212 vfrom = kmap_atomic(*pgfrom, KM_USER1); 211 vfrom = kmap_atomic(*pgfrom, KM_USER1);
213 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 212 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
213 flush_dcache_page(*pgto);
214 kunmap_atomic(vfrom, KM_USER1); 214 kunmap_atomic(vfrom, KM_USER1);
215 kunmap_atomic(vto, KM_USER0); 215 kunmap_atomic(vto, KM_USER0);
216 216
217 } while ((len -= copy) != 0); 217 } while ((len -= copy) != 0);
218 flush_dcache_page(*pgto);
219} 218}
220 219
221/* 220/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 02060d0e7be8..e8c2bc4977f3 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -707,12 +707,9 @@ out_unlock:
707 return err; 707 return err;
708} 708}
709 709
710void 710void xprt_end_transmit(struct rpc_task *task)
711xprt_abort_transmit(struct rpc_task *task)
712{ 711{
713 struct rpc_xprt *xprt = task->tk_xprt; 712 xprt_release_write(task->tk_xprt, task);
714
715 xprt_release_write(xprt, task);
716} 713}
717 714
718/** 715/**
@@ -761,8 +758,6 @@ void xprt_transmit(struct rpc_task *task)
761 task->tk_status = -ENOTCONN; 758 task->tk_status = -ENOTCONN;
762 else if (!req->rq_received) 759 else if (!req->rq_received)
763 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 760 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
764
765 xprt->ops->release_xprt(xprt, task);
766 spin_unlock_bh(&xprt->transport_lock); 761 spin_unlock_bh(&xprt->transport_lock);
767 return; 762 return;
768 } 763 }
@@ -772,18 +767,8 @@ void xprt_transmit(struct rpc_task *task)
772 * schedq, and being picked up by a parallel run of rpciod(). 767 * schedq, and being picked up by a parallel run of rpciod().
773 */ 768 */
774 task->tk_status = status; 769 task->tk_status = status;
775 770 if (status == -ECONNREFUSED)
776 switch (status) {
777 case -ECONNREFUSED:
778 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 771 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
779 case -EAGAIN:
780 case -ENOTCONN:
781 return;
782 default:
783 break;
784 }
785 xprt_release_write(xprt, task);
786 return;
787} 772}
788 773
789static inline void do_xprt_reserve(struct rpc_task *task) 774static inline void do_xprt_reserve(struct rpc_task *task)
@@ -908,9 +893,8 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
908 struct rpc_xprt *xprt; 893 struct rpc_xprt *xprt;
909 struct rpc_rqst *req; 894 struct rpc_rqst *req;
910 895
911 if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) 896 if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
912 return ERR_PTR(-ENOMEM); 897 return ERR_PTR(-ENOMEM);
913 memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
914 898
915 xprt->addr = *ap; 899 xprt->addr = *ap;
916 900
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 21006b109101..441bd53f5eca 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -414,6 +414,33 @@ static int xs_tcp_send_request(struct rpc_task *task)
414} 414}
415 415
416/** 416/**
417 * xs_tcp_release_xprt - clean up after a tcp transmission
418 * @xprt: transport
419 * @task: rpc task
420 *
421 * This cleans up if an error causes us to abort the transmission of a request.
422 * In this case, the socket may need to be reset in order to avoid confusing
423 * the server.
424 */
425static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
426{
427 struct rpc_rqst *req;
428
429 if (task != xprt->snd_task)
430 return;
431 if (task == NULL)
432 goto out_release;
433 req = task->tk_rqstp;
434 if (req->rq_bytes_sent == 0)
435 goto out_release;
436 if (req->rq_bytes_sent == req->rq_snd_buf.len)
437 goto out_release;
438 set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
439out_release:
440 xprt_release_xprt(xprt, task);
441}
442
443/**
417 * xs_close - close a socket 444 * xs_close - close a socket
418 * @xprt: transport 445 * @xprt: transport
419 * 446 *
@@ -1250,7 +1277,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
1250 1277
1251static struct rpc_xprt_ops xs_tcp_ops = { 1278static struct rpc_xprt_ops xs_tcp_ops = {
1252 .reserve_xprt = xprt_reserve_xprt, 1279 .reserve_xprt = xprt_reserve_xprt,
1253 .release_xprt = xprt_release_xprt, 1280 .release_xprt = xs_tcp_release_xprt,
1254 .set_port = xs_set_port, 1281 .set_port = xs_set_port,
1255 .connect = xs_connect, 1282 .connect = xs_connect,
1256 .buf_alloc = rpc_malloc, 1283 .buf_alloc = rpc_malloc,
@@ -1276,10 +1303,9 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1276 1303
1277 xprt->max_reqs = xprt_udp_slot_table_entries; 1304 xprt->max_reqs = xprt_udp_slot_table_entries;
1278 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1305 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1279 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1306 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1280 if (xprt->slot == NULL) 1307 if (xprt->slot == NULL)
1281 return -ENOMEM; 1308 return -ENOMEM;
1282 memset(xprt->slot, 0, slot_table_size);
1283 1309
1284 xprt->prot = IPPROTO_UDP; 1310 xprt->prot = IPPROTO_UDP;
1285 xprt->port = xs_get_random_port(); 1311 xprt->port = xs_get_random_port();
@@ -1318,10 +1344,9 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1318 1344
1319 xprt->max_reqs = xprt_tcp_slot_table_entries; 1345 xprt->max_reqs = xprt_tcp_slot_table_entries;
1320 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1346 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
1321 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1347 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
1322 if (xprt->slot == NULL) 1348 if (xprt->slot == NULL)
1323 return -ENOMEM; 1349 return -ENOMEM;
1324 memset(xprt->slot, 0, slot_table_size);
1325 1350
1326 xprt->prot = IPPROTO_TCP; 1351 xprt->prot = IPPROTO_TCP;
1327 xprt->port = xs_get_random_port(); 1352 xprt->port = xs_get_random_port();
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 7ef17a449cfd..75a5968c2139 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -665,11 +665,9 @@ int tipc_bearer_init(void)
665 int res; 665 int res;
666 666
667 write_lock_bh(&tipc_net_lock); 667 write_lock_bh(&tipc_net_lock);
668 tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC); 668 tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
669 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC); 669 media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
670 if (tipc_bearers && media_list) { 670 if (tipc_bearers && media_list) {
671 memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer));
672 memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
673 res = TIPC_OK; 671 res = TIPC_OK;
674 } else { 672 } else {
675 kfree(tipc_bearers); 673 kfree(tipc_bearers);
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 1dcb6940e338..b46b5188a9fd 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -57,29 +57,25 @@ struct cluster *tipc_cltr_create(u32 addr)
57 struct _zone *z_ptr; 57 struct _zone *z_ptr;
58 struct cluster *c_ptr; 58 struct cluster *c_ptr;
59 int max_nodes; 59 int max_nodes;
60 int alloc;
61 60
62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); 61 c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
63 if (c_ptr == NULL) { 62 if (c_ptr == NULL) {
64 warn("Cluster creation failure, no memory\n"); 63 warn("Cluster creation failure, no memory\n");
65 return NULL; 64 return NULL;
66 } 65 }
67 memset(c_ptr, 0, sizeof(*c_ptr));
68 66
69 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
70 if (in_own_cluster(addr)) 68 if (in_own_cluster(addr))
71 max_nodes = LOWEST_SLAVE + tipc_max_slaves; 69 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
72 else 70 else
73 max_nodes = tipc_max_nodes + 1; 71 max_nodes = tipc_max_nodes + 1;
74 alloc = sizeof(void *) * (max_nodes + 1);
75 72
76 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); 73 c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC);
77 if (c_ptr->nodes == NULL) { 74 if (c_ptr->nodes == NULL) {
78 warn("Cluster creation failure, no memory for node area\n"); 75 warn("Cluster creation failure, no memory for node area\n");
79 kfree(c_ptr); 76 kfree(c_ptr);
80 return NULL; 77 return NULL;
81 } 78 }
82 memset(c_ptr->nodes, 0, alloc);
83 79
84 if (in_own_cluster(addr)) 80 if (in_own_cluster(addr))
85 tipc_local_nodes = c_ptr->nodes; 81 tipc_local_nodes = c_ptr->nodes;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 2b8441203120..ee94de92ae99 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -295,7 +295,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
295{ 295{
296 struct link_req *req; 296 struct link_req *req;
297 297
298 req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC); 298 req = kmalloc(sizeof(*req), GFP_ATOMIC);
299 if (!req) 299 if (!req)
300 return NULL; 300 return NULL;
301 301
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c10e18a49b96..693f02eca6d6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -417,12 +417,11 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
417 struct tipc_msg *msg; 417 struct tipc_msg *msg;
418 char *if_name; 418 char *if_name;
419 419
420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC); 420 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
421 if (!l_ptr) { 421 if (!l_ptr) {
422 warn("Link creation failed, no memory\n"); 422 warn("Link creation failed, no memory\n");
423 return NULL; 423 return NULL;
424 } 424 }
425 memset(l_ptr, 0, sizeof(*l_ptr));
426 425
427 l_ptr->addr = peer; 426 l_ptr->addr = peer;
428 if_name = strchr(b_ptr->publ.name, ':') + 1; 427 if_name = strchr(b_ptr->publ.name, ':') + 1;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index a6926ff07bcc..049242ea5c38 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -117,14 +117,12 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
117 u32 scope, u32 node, u32 port_ref, 117 u32 scope, u32 node, u32 port_ref,
118 u32 key) 118 u32 key)
119{ 119{
120 struct publication *publ = 120 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
122 if (publ == NULL) { 121 if (publ == NULL) {
123 warn("Publication creation failure, no memory\n"); 122 warn("Publication creation failure, no memory\n");
124 return NULL; 123 return NULL;
125 } 124 }
126 125
127 memset(publ, 0, sizeof(*publ));
128 publ->type = type; 126 publ->type = type;
129 publ->lower = lower; 127 publ->lower = lower;
130 publ->upper = upper; 128 publ->upper = upper;
@@ -144,11 +142,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
144 142
145static struct sub_seq *tipc_subseq_alloc(u32 cnt) 143static struct sub_seq *tipc_subseq_alloc(u32 cnt)
146{ 144{
147 u32 sz = cnt * sizeof(struct sub_seq); 145 struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
149
150 if (sseq)
151 memset(sseq, 0, sz);
152 return sseq; 146 return sseq;
153} 147}
154 148
@@ -160,8 +154,7 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
160 154
161static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) 155static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
162{ 156{
163 struct name_seq *nseq = 157 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
165 struct sub_seq *sseq = tipc_subseq_alloc(1); 158 struct sub_seq *sseq = tipc_subseq_alloc(1);
166 159
167 if (!nseq || !sseq) { 160 if (!nseq || !sseq) {
@@ -171,7 +164,6 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
171 return NULL; 164 return NULL;
172 } 165 }
173 166
174 memset(nseq, 0, sizeof(*nseq));
175 spin_lock_init(&nseq->lock); 167 spin_lock_init(&nseq->lock);
176 nseq->type = type; 168 nseq->type = type;
177 nseq->sseqs = sseq; 169 nseq->sseqs = sseq;
@@ -1060,7 +1052,7 @@ int tipc_nametbl_init(void)
1060{ 1052{
1061 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1053 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
1062 1054
1063 table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC); 1055 table.types = kmalloc(array_size, GFP_ATOMIC);
1064 if (!table.types) 1056 if (!table.types)
1065 return -ENOMEM; 1057 return -ENOMEM;
1066 1058
diff --git a/net/tipc/net.c b/net/tipc/net.c
index e5a359ab4930..a991bf8a7f74 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -160,14 +160,11 @@ void tipc_net_send_external_routes(u32 dest)
160 160
161static int net_init(void) 161static int net_init(void)
162{ 162{
163 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
164
165 memset(&tipc_net, 0, sizeof(tipc_net)); 163 memset(&tipc_net, 0, sizeof(tipc_net));
166 tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC); 164 tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
167 if (!tipc_net.zones) { 165 if (!tipc_net.zones) {
168 return -ENOMEM; 166 return -ENOMEM;
169 } 167 }
170 memset(tipc_net.zones, 0, sz);
171 return TIPC_OK; 168 return TIPC_OK;
172} 169}
173 170
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 3251c8d8e53c..b9c8c6b9e94f 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -226,12 +226,11 @@ u32 tipc_createport_raw(void *usr_handle,
226 struct tipc_msg *msg; 226 struct tipc_msg *msg;
227 u32 ref; 227 u32 ref;
228 228
229 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC); 229 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
230 if (!p_ptr) { 230 if (!p_ptr) {
231 warn("Port creation failed, no memory\n"); 231 warn("Port creation failed, no memory\n");
232 return 0; 232 return 0;
233 } 233 }
234 memset(p_ptr, 0, sizeof(*p_ptr));
235 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 234 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
236 if (!ref) { 235 if (!ref) {
237 warn("Port creation failed, reference table exhausted\n"); 236 warn("Port creation failed, reference table exhausted\n");
@@ -1058,7 +1057,7 @@ int tipc_createport(u32 user_ref,
1058 struct port *p_ptr; 1057 struct port *p_ptr;
1059 u32 ref; 1058 u32 ref;
1060 1059
1061 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1060 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1062 if (!up_ptr) { 1061 if (!up_ptr) {
1063 warn("Port creation failed, no memory\n"); 1062 warn("Port creation failed, no memory\n");
1064 return -ENOMEM; 1063 return -ENOMEM;
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 596d3c8ff750..e6d6ae22ea49 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -79,7 +79,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
79 while (sz < requested_size) { 79 while (sz < requested_size) {
80 sz <<= 1; 80 sz <<= 1;
81 } 81 }
82 table = (struct reference *)vmalloc(sz * sizeof(struct reference)); 82 table = vmalloc(sz * sizeof(*table));
83 if (table == NULL) 83 if (table == NULL)
84 return -ENOMEM; 84 return -ENOMEM;
85 85
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index e19b4bcd67ec..c51600ba5f4a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -393,12 +393,11 @@ static void subscr_named_msg_event(void *usr_handle,
393 393
394 /* Create subscriber object */ 394 /* Create subscriber object */
395 395
396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC); 396 subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
397 if (subscriber == NULL) { 397 if (subscriber == NULL) {
398 warn("Subscriber rejected, no memory\n"); 398 warn("Subscriber rejected, no memory\n");
399 return; 399 return;
400 } 400 }
401 memset(subscriber, 0, sizeof(struct subscriber));
402 INIT_LIST_HEAD(&subscriber->subscription_list); 401 INIT_LIST_HEAD(&subscriber->subscription_list);
403 INIT_LIST_HEAD(&subscriber->subscriber_list); 402 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock); 403 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 1e3ae57c7228..04d1b9be9c51 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -82,9 +82,8 @@ static int reg_init(void)
82 82
83 spin_lock_bh(&reg_lock); 83 spin_lock_bh(&reg_lock);
84 if (!users) { 84 if (!users) {
85 users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC); 85 users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
86 if (users) { 86 if (users) {
87 memset(users, 0, USER_LIST_SIZE);
88 for (i = 1; i <= MAX_USERID; i++) { 87 for (i = 1; i <= MAX_USERID; i++) {
89 users[i].next = i - 1; 88 users[i].next = i - 1;
90 } 89 }
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 316c4872ff5b..f5b00ea2d5ac 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -52,13 +52,12 @@ struct _zone *tipc_zone_create(u32 addr)
52 return NULL; 52 return NULL;
53 } 53 }
54 54
55 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 55 z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC);
56 if (!z_ptr) { 56 if (!z_ptr) {
57 warn("Zone creation failed, insufficient memory\n"); 57 warn("Zone creation failed, insufficient memory\n");
58 return NULL; 58 return NULL;
59 } 59 }
60 60
61 memset(z_ptr, 0, sizeof(*z_ptr));
62 z_num = tipc_zone(addr); 61 z_num = tipc_zone(addr);
63 z_ptr->addr = tipc_addr(z_num, 0, 0); 62 z_ptr->addr = tipc_addr(z_num, 0, 0);
64 tipc_net.zones[z_num] = z_ptr; 63 tipc_net.zones[z_num] = z_ptr;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f70475bfb62a..de6ec519272e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -128,23 +128,17 @@ static atomic_t unix_nr_socks = ATOMIC_INIT(0);
128#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 128#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
129 129
130#ifdef CONFIG_SECURITY_NETWORK 130#ifdef CONFIG_SECURITY_NETWORK
131static void unix_get_peersec_dgram(struct sk_buff *skb) 131static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
132{ 132{
133 int err; 133 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
134
135 err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb),
136 UNIXSECLEN(skb));
137 if (err)
138 *(UNIXSECDATA(skb)) = NULL;
139} 134}
140 135
141static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 136static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142{ 137{
143 scm->secdata = *UNIXSECDATA(skb); 138 scm->secid = *UNIXSID(skb);
144 scm->seclen = *UNIXSECLEN(skb);
145} 139}
146#else 140#else
147static inline void unix_get_peersec_dgram(struct sk_buff *skb) 141static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
148{ } 142{ }
149 143
150static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 144static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -663,11 +657,10 @@ static int unix_autobind(struct socket *sock)
663 goto out; 657 goto out;
664 658
665 err = -ENOMEM; 659 err = -ENOMEM;
666 addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); 660 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
667 if (!addr) 661 if (!addr)
668 goto out; 662 goto out;
669 663
670 memset(addr, 0, sizeof(*addr) + sizeof(short) + 16);
671 addr->name->sun_family = AF_UNIX; 664 addr->name->sun_family = AF_UNIX;
672 atomic_set(&addr->refcnt, 1); 665 atomic_set(&addr->refcnt, 1);
673 666
@@ -1323,8 +1316,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1323 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1316 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1324 if (siocb->scm->fp) 1317 if (siocb->scm->fp)
1325 unix_attach_fds(siocb->scm, skb); 1318 unix_attach_fds(siocb->scm, skb);
1326 1319 unix_get_secdata(siocb->scm, skb);
1327 unix_get_peersec_dgram(skb);
1328 1320
1329 skb->h.raw = skb->data; 1321 skb->h.raw = skb->data;
1330 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); 1322 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index a690cf773b6a..6f39faa15832 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -370,12 +370,11 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
370 * used by the ioctl call to read call information 370 * used by the ioctl call to read call information
371 * and to execute commands. 371 * and to execute commands.
372 */ 372 */
373 if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { 373 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) {
374 wanpipe_kill_sock_irq (newsk); 374 wanpipe_kill_sock_irq (newsk);
375 release_device(dev); 375 release_device(dev);
376 return -ENOMEM; 376 return -ENOMEM;
377 } 377 }
378 memset(mbox_ptr, 0, sizeof(mbox_cmd_t));
379 memcpy(mbox_ptr,skb->data,skb->len); 378 memcpy(mbox_ptr,skb->data,skb->len);
380 379
381 /* Register the lcn on which incoming call came 380 /* Register the lcn on which incoming call came
@@ -507,11 +506,10 @@ static struct sock *wanpipe_alloc_socket(void)
507 if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL) 506 if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL)
508 return NULL; 507 return NULL;
509 508
510 if ((wan_opt = kmalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { 509 if ((wan_opt = kzalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) {
511 sk_free(sk); 510 sk_free(sk);
512 return NULL; 511 return NULL;
513 } 512 }
514 memset(wan_opt, 0x00, sizeof(struct wanpipe_opt));
515 513
516 wp_sk(sk) = wan_opt; 514 wp_sk(sk) = wan_opt;
517 515
@@ -2011,10 +2009,9 @@ static int set_ioctl_cmd (struct sock *sk, void *arg)
2011 2009
2012 dev_put(dev); 2010 dev_put(dev);
2013 2011
2014 if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) 2012 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL)
2015 return -ENOMEM; 2013 return -ENOMEM;
2016 2014
2017 memset(mbox_ptr, 0, sizeof(mbox_cmd_t));
2018 wp_sk(sk)->mbox = mbox_ptr; 2015 wp_sk(sk)->mbox = mbox_ptr;
2019 2016
2020 wanpipe_link_driver(dev,sk); 2017 wanpipe_link_driver(dev,sk);
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index ad8e8a797790..9479659277ae 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -642,18 +642,16 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
642 642
643 if (cnf->config_id == WANCONFIG_MPPP) { 643 if (cnf->config_id == WANCONFIG_MPPP) {
644#ifdef CONFIG_WANPIPE_MULTPPP 644#ifdef CONFIG_WANPIPE_MULTPPP
645 pppdev = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); 645 pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL);
646 err = -ENOBUFS; 646 err = -ENOBUFS;
647 if (pppdev == NULL) 647 if (pppdev == NULL)
648 goto out; 648 goto out;
649 memset(pppdev, 0, sizeof(struct ppp_device)); 649 pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
650 pppdev->dev = kmalloc(sizeof(struct net_device), GFP_KERNEL);
651 if (pppdev->dev == NULL) { 650 if (pppdev->dev == NULL) {
652 kfree(pppdev); 651 kfree(pppdev);
653 err = -ENOBUFS; 652 err = -ENOBUFS;
654 goto out; 653 goto out;
655 } 654 }
656 memset(pppdev->dev, 0, sizeof(struct net_device));
657 err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); 655 err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf);
658 dev = pppdev->dev; 656 dev = pppdev->dev;
659#else 657#else
@@ -663,11 +661,10 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
663 goto out; 661 goto out;
664#endif 662#endif
665 } else { 663 } else {
666 dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); 664 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
667 err = -ENOBUFS; 665 err = -ENOBUFS;
668 if (dev == NULL) 666 if (dev == NULL)
669 goto out; 667 goto out;
670 memset(dev, 0, sizeof(struct net_device));
671 err = wandev->new_if(wandev, dev, cnf); 668 err = wandev->new_if(wandev, dev, cnf);
672 } 669 }
673 670
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 405b741dff43..3da67ca2c3ce 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -307,10 +307,9 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
307{ 307{
308 struct xfrm_policy *policy; 308 struct xfrm_policy *policy;
309 309
310 policy = kmalloc(sizeof(struct xfrm_policy), gfp); 310 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
311 311
312 if (policy) { 312 if (policy) {
313 memset(policy, 0, sizeof(struct xfrm_policy));
314 atomic_set(&policy->refcnt, 1); 313 atomic_set(&policy->refcnt, 1);
315 rwlock_init(&policy->lock); 314 rwlock_init(&policy->lock);
316 init_timer(&policy->timer); 315 init_timer(&policy->timer);
@@ -1135,12 +1134,33 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1135} 1134}
1136EXPORT_SYMBOL(__xfrm_route_forward); 1135EXPORT_SYMBOL(__xfrm_route_forward);
1137 1136
1137/* Optimize later using cookies and generation ids. */
1138
1138static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 1139static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1139{ 1140{
1140 /* If it is marked obsolete, which is how we even get here, 1141 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
1141 * then we have purged it from the policy bundle list and we 1142 * to "-1" to force all XFRM destinations to get validated by
1142 * did that for a good reason. 1143 * dst_ops->check on every use. We do this because when a
1144 * normal route referenced by an XFRM dst is obsoleted we do
1145 * not go looking around for all parent referencing XFRM dsts
1146 * so that we can invalidate them. It is just too much work.
1147 * Instead we make the checks here on every use. For example:
1148 *
1149 * XFRM dst A --> IPv4 dst X
1150 *
1151 * X is the "xdst->route" of A (X is also the "dst->path" of A
1152 * in this example). If X is marked obsolete, "A" will not
1153 * notice. That's what we are validating here via the
1154 * stale_bundle() check.
1155 *
1156 * When a policy's bundle is pruned, we dst_free() the XFRM
1157 * dst which causes it's ->obsolete field to be set to a
1158 * positive non-zero integer. If an XFRM dst has been pruned
1159 * like this, we want to force a new route lookup.
1143 */ 1160 */
1161 if (dst->obsolete < 0 && !stale_bundle(dst))
1162 return dst;
1163
1144 return NULL; 1164 return NULL;
1145} 1165}
1146 1166
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 43f00fc28a3d..0021aad5db43 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -194,10 +194,9 @@ struct xfrm_state *xfrm_state_alloc(void)
194{ 194{
195 struct xfrm_state *x; 195 struct xfrm_state *x;
196 196
197 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 197 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
198 198
199 if (x) { 199 if (x) {
200 memset(x, 0, sizeof(struct xfrm_state));
201 atomic_set(&x->refcnt, 1); 200 atomic_set(&x->refcnt, 1);
202 atomic_set(&x->tunnel_users, 0); 201 atomic_set(&x->tunnel_users, 0);
203 INIT_LIST_HEAD(&x->bydst); 202 INIT_LIST_HEAD(&x->bydst);