aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/common.c3
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_netfilter.c29
-rw-r--r--net/bridge/br_netlink.c14
-rw-r--r--net/bridge/br_sysfs_br.c234
-rw-r--r--net/bridge/br_sysfs_if.c2
-rw-r--r--net/bridge/netfilter/ebt_ip.c1
-rw-r--r--net/bridge/netfilter/ebt_log.c1
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/dst.c9
-rw-r--r--net/core/fib_rules.c14
-rw-r--r--net/core/neighbour.c29
-rw-r--r--net/core/net-sysfs.c175
-rw-r--r--net/core/rtnetlink.c23
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dccp/ccids/ccid3.c5
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/dn_dev.c14
-rw-r--r--net/decnet/dn_table.c11
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c6
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c14
-rw-r--r--net/ipv4/fib_semantics.c14
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_diag.c19
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c4
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/netfilter/Kconfig26
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c40
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c12
-rw-r--r--net/ipv4/netfilter/ip_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_tcp.c5
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_udp.c5
-rw-r--r--net/ipv4/netfilter/ip_nat_rule.c32
-rw-r--r--net/ipv4/netfilter/ip_tables.c40
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c15
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c13
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c16
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c9
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c8
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c8
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c12
-rw-r--r--net/ipv4/netfilter/ipt_SAME.c8
-rw-r--r--net/ipv4/netfilter/ipt_TCPMSS.c207
-rw-r--r--net/ipv4/netfilter/ipt_TOS.c11
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c11
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c18
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c9
-rw-r--r--net/ipv4/netfilter/ipt_ah.c10
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c10
-rw-r--r--net/ipv4/netfilter/ipt_iprange.c10
-rw-r--r--net/ipv4/netfilter/ipt_owner.c9
-rw-r--r--net/ipv4/netfilter/ipt_recent.c12
-rw-r--r--net/ipv4/netfilter/ipt_tos.c10
-rw-r--r--net/ipv4/netfilter/ipt_ttl.c11
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c2
-rw-r--r--net/ipv4/netfilter/iptable_raw.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c12
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c6
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c105
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c57
-rw-r--r--net/ipv4/xfrm4_policy.c51
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c70
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/mip6.c26
-rw-r--r--net/ipv6/netfilter/Kconfig8
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c12
-rw-r--r--net/ipv6/netfilter/ip6t_HL.c17
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c15
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c10
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c8
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c8
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c8
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c1
-rw-r--r--net/ipv6/netfilter/ip6t_hl.c11
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c8
-rw-r--r--net/ipv6/netfilter/ip6t_mh.c108
-rw-r--r--net/ipv6/netfilter/ip6t_owner.c8
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c8
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c21
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c21
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c19
-rw-r--r--net/ipv6/raw.c15
-rw-r--r--net/ipv6/route.c33
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c42
-rw-r--r--net/ipv6/xfrm6_policy.c46
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/irda/irias_object.c40
-rw-r--r--net/irda/irlan/irlan_common.c23
-rw-r--r--net/iucv/Kconfig15
-rw-r--r--net/iucv/Makefile6
-rw-r--r--net/iucv/af_iucv.c1077
-rw-r--r--net/iucv/iucv.c1619
-rw-r--r--net/key/af_key.c422
-rw-r--r--net/netfilter/Kconfig39
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c40
-rw-r--r--net/netfilter/nf_conntrack_sane.c242
-rw-r--r--net/netfilter/xt_CLASSIFY.c4
-rw-r--r--net/netfilter/xt_CONNMARK.c5
-rw-r--r--net/netfilter/xt_CONNSECMARK.c6
-rw-r--r--net/netfilter/xt_MARK.c8
-rw-r--r--net/netfilter/xt_SECMARK.c4
-rw-r--r--net/netfilter/xt_TCPMSS.c296
-rw-r--r--net/netfilter/xt_hashlimit.c1
-rw-r--r--net/packet/af_packet.c79
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_prio.c15
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/socket.c29
-rw-r--r--net/wanrouter/wanmain.c17
-rw-r--r--net/x25/Makefile2
-rw-r--r--net/x25/af_x25.c32
-rw-r--r--net/x25/sysctl_net_x25.c8
-rw-r--r--net/x25/x25_dev.c13
-rw-r--r--net/x25/x25_forward.c163
-rw-r--r--net/x25/x25_proc.c98
-rw-r--r--net/x25/x25_route.c3
-rw-r--r--net/xfrm/Kconfig26
-rw-r--r--net/xfrm/xfrm_policy.c231
-rw-r--r--net/xfrm/xfrm_state.c184
-rw-r--r--net/xfrm/xfrm_user.c173
150 files changed, 5926 insertions, 1086 deletions
diff --git a/net/Kconfig b/net/Kconfig
index 7dfc94920697..915657832d94 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -37,6 +37,7 @@ config NETDEBUG
37source "net/packet/Kconfig" 37source "net/packet/Kconfig"
38source "net/unix/Kconfig" 38source "net/unix/Kconfig"
39source "net/xfrm/Kconfig" 39source "net/xfrm/Kconfig"
40source "net/iucv/Kconfig"
40 41
41config INET 42config INET
42 bool "TCP/IP networking" 43 bool "TCP/IP networking"
diff --git a/net/Makefile b/net/Makefile
index ad4d14f4bb29..4854ac506313 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_IP_SCTP) += sctp/
47obj-$(CONFIG_IEEE80211) += ieee80211/ 47obj-$(CONFIG_IEEE80211) += ieee80211/
48obj-$(CONFIG_TIPC) += tipc/ 48obj-$(CONFIG_TIPC) += tipc/
49obj-$(CONFIG_NETLABEL) += netlabel/ 49obj-$(CONFIG_NETLABEL) += netlabel/
50obj-$(CONFIG_IUCV) += iucv/
50 51
51ifeq ($(CONFIG_NET),y) 52ifeq ($(CONFIG_NET),y)
52obj-$(CONFIG_SYSCTL) += sysctl_net.o 53obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/atm/common.c b/net/atm/common.c
index fbabff494468..a2878e92c3ab 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -816,7 +816,8 @@ static void __exit atm_exit(void)
816 proto_unregister(&vcc_proto); 816 proto_unregister(&vcc_proto);
817} 817}
818 818
819module_init(atm_init); 819subsys_initcall(atm_init);
820
820module_exit(atm_exit); 821module_exit(atm_exit);
821 822
822MODULE_LICENSE("GPL"); 823MODULE_LICENSE("GPL");
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 55bb2634c088..2b7c2c7dad48 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -286,7 +286,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
286 kobject_init(&p->kobj); 286 kobject_init(&p->kobj);
287 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR); 287 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
288 p->kobj.ktype = &brport_ktype; 288 p->kobj.ktype = &brport_ktype;
289 p->kobj.parent = &(dev->class_dev.kobj); 289 p->kobj.parent = &(dev->dev.kobj);
290 p->kobj.kset = NULL; 290 p->kobj.kset = NULL;
291 291
292 return p; 292 return p;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index ea3337ad0edc..a25fa8cb5284 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -949,44 +949,29 @@ static ctl_table brnf_net_table[] = {
949}; 949};
950#endif 950#endif
951 951
952int br_netfilter_init(void) 952int __init br_netfilter_init(void)
953{ 953{
954 int i; 954 int ret;
955
956 for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++) {
957 int ret;
958
959 if ((ret = nf_register_hook(&br_nf_ops[i])) >= 0)
960 continue;
961
962 while (i--)
963 nf_unregister_hook(&br_nf_ops[i]);
964 955
956 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
957 if (ret < 0)
965 return ret; 958 return ret;
966 }
967
968#ifdef CONFIG_SYSCTL 959#ifdef CONFIG_SYSCTL
969 brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0); 960 brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0);
970 if (brnf_sysctl_header == NULL) { 961 if (brnf_sysctl_header == NULL) {
971 printk(KERN_WARNING 962 printk(KERN_WARNING
972 "br_netfilter: can't register to sysctl.\n"); 963 "br_netfilter: can't register to sysctl.\n");
973 for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++) 964 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
974 nf_unregister_hook(&br_nf_ops[i]); 965 return -ENOMEM;
975 return -EFAULT;
976 } 966 }
977#endif 967#endif
978
979 printk(KERN_NOTICE "Bridge firewalling registered\n"); 968 printk(KERN_NOTICE "Bridge firewalling registered\n");
980
981 return 0; 969 return 0;
982} 970}
983 971
984void br_netfilter_fini(void) 972void br_netfilter_fini(void)
985{ 973{
986 int i; 974 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
987
988 for (i = ARRAY_SIZE(br_nf_ops) - 1; i >= 0; i--)
989 nf_unregister_hook(&br_nf_ops[i]);
990#ifdef CONFIG_SYSCTL 975#ifdef CONFIG_SYSCTL
991 unregister_sysctl_table(brnf_sysctl_header); 976 unregister_sysctl_table(brnf_sysctl_header);
992#endif 977#endif
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a9139682c49b..7d68b24b5654 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -45,7 +45,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
45 45
46 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 46 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
47 if (nlh == NULL) 47 if (nlh == NULL)
48 return -ENOBUFS; 48 return -EMSGSIZE;
49 49
50 hdr = nlmsg_data(nlh); 50 hdr = nlmsg_data(nlh);
51 hdr->ifi_family = AF_BRIDGE; 51 hdr->ifi_family = AF_BRIDGE;
@@ -72,7 +72,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
72 return nlmsg_end(skb, nlh); 72 return nlmsg_end(skb, nlh);
73 73
74nla_put_failure: 74nla_put_failure:
75 return nlmsg_cancel(skb, nlh); 75 nlmsg_cancel(skb, nlh);
76 return -EMSGSIZE;
76} 77}
77 78
78/* 79/*
@@ -89,9 +90,12 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
89 goto errout; 90 goto errout;
90 91
91 err = br_fill_ifinfo(skb, port, 0, 0, event, 0); 92 err = br_fill_ifinfo(skb, port, 0, 0, event, 0);
92 /* failure implies BUG in br_nlmsg_size() */ 93 if (err < 0) {
93 BUG_ON(err < 0); 94 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
94 95 WARN_ON(err == -EMSGSIZE);
96 kfree_skb(skb);
97 goto errout;
98 }
95 err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 99 err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
96errout: 100errout:
97 if (err < 0) 101 if (err < 0)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index de9d1a9473f2..ce10464716a7 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -21,18 +21,17 @@
21 21
22#include "br_private.h" 22#include "br_private.h"
23 23
24#define to_class_dev(obj) container_of(obj,struct class_device,kobj) 24#define to_dev(obj) container_of(obj, struct device, kobj)
25#define to_net_dev(class) container_of(class, struct net_device, class_dev)
26#define to_bridge(cd) ((struct net_bridge *)(to_net_dev(cd)->priv)) 25#define to_bridge(cd) ((struct net_bridge *)(to_net_dev(cd)->priv))
27 26
28/* 27/*
29 * Common code for storing bridge parameters. 28 * Common code for storing bridge parameters.
30 */ 29 */
31static ssize_t store_bridge_parm(struct class_device *cd, 30static ssize_t store_bridge_parm(struct device *d,
32 const char *buf, size_t len, 31 const char *buf, size_t len,
33 void (*set)(struct net_bridge *, unsigned long)) 32 void (*set)(struct net_bridge *, unsigned long))
34{ 33{
35 struct net_bridge *br = to_bridge(cd); 34 struct net_bridge *br = to_bridge(d);
36 char *endp; 35 char *endp;
37 unsigned long val; 36 unsigned long val;
38 37
@@ -50,9 +49,10 @@ static ssize_t store_bridge_parm(struct class_device *cd,
50} 49}
51 50
52 51
53static ssize_t show_forward_delay(struct class_device *cd, char *buf) 52static ssize_t show_forward_delay(struct device *d,
53 struct device_attribute *attr, char *buf)
54{ 54{
55 struct net_bridge *br = to_bridge(cd); 55 struct net_bridge *br = to_bridge(d);
56 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 56 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
57} 57}
58 58
@@ -64,18 +64,20 @@ static void set_forward_delay(struct net_bridge *br, unsigned long val)
64 br->bridge_forward_delay = delay; 64 br->bridge_forward_delay = delay;
65} 65}
66 66
67static ssize_t store_forward_delay(struct class_device *cd, const char *buf, 67static ssize_t store_forward_delay(struct device *d,
68 size_t len) 68 struct device_attribute *attr,
69 const char *buf, size_t len)
69{ 70{
70 return store_bridge_parm(cd, buf, len, set_forward_delay); 71 return store_bridge_parm(d, buf, len, set_forward_delay);
71} 72}
72static CLASS_DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, 73static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
73 show_forward_delay, store_forward_delay); 74 show_forward_delay, store_forward_delay);
74 75
75static ssize_t show_hello_time(struct class_device *cd, char *buf) 76static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
77 char *buf)
76{ 78{
77 return sprintf(buf, "%lu\n", 79 return sprintf(buf, "%lu\n",
78 jiffies_to_clock_t(to_bridge(cd)->hello_time)); 80 jiffies_to_clock_t(to_bridge(d)->hello_time));
79} 81}
80 82
81static void set_hello_time(struct net_bridge *br, unsigned long val) 83static void set_hello_time(struct net_bridge *br, unsigned long val)
@@ -86,19 +88,20 @@ static void set_hello_time(struct net_bridge *br, unsigned long val)
86 br->bridge_hello_time = t; 88 br->bridge_hello_time = t;
87} 89}
88 90
89static ssize_t store_hello_time(struct class_device *cd, const char *buf, 91static ssize_t store_hello_time(struct device *d,
92 struct device_attribute *attr, const char *buf,
90 size_t len) 93 size_t len)
91{ 94{
92 return store_bridge_parm(cd, buf, len, set_hello_time); 95 return store_bridge_parm(d, buf, len, set_hello_time);
93} 96}
97static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
98 store_hello_time);
94 99
95static CLASS_DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, 100static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
96 store_hello_time); 101 char *buf)
97
98static ssize_t show_max_age(struct class_device *cd, char *buf)
99{ 102{
100 return sprintf(buf, "%lu\n", 103 return sprintf(buf, "%lu\n",
101 jiffies_to_clock_t(to_bridge(cd)->max_age)); 104 jiffies_to_clock_t(to_bridge(d)->max_age));
102} 105}
103 106
104static void set_max_age(struct net_bridge *br, unsigned long val) 107static void set_max_age(struct net_bridge *br, unsigned long val)
@@ -109,18 +112,17 @@ static void set_max_age(struct net_bridge *br, unsigned long val)
109 br->bridge_max_age = t; 112 br->bridge_max_age = t;
110} 113}
111 114
112static ssize_t store_max_age(struct class_device *cd, const char *buf, 115static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
113 size_t len) 116 const char *buf, size_t len)
114{ 117{
115 return store_bridge_parm(cd, buf, len, set_max_age); 118 return store_bridge_parm(d, buf, len, set_max_age);
116} 119}
120static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
117 121
118static CLASS_DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, 122static ssize_t show_ageing_time(struct device *d,
119 store_max_age); 123 struct device_attribute *attr, char *buf)
120
121static ssize_t show_ageing_time(struct class_device *cd, char *buf)
122{ 124{
123 struct net_bridge *br = to_bridge(cd); 125 struct net_bridge *br = to_bridge(d);
124 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); 126 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time));
125} 127}
126 128
@@ -129,17 +131,19 @@ static void set_ageing_time(struct net_bridge *br, unsigned long val)
129 br->ageing_time = clock_t_to_jiffies(val); 131 br->ageing_time = clock_t_to_jiffies(val);
130} 132}
131 133
132static ssize_t store_ageing_time(struct class_device *cd, const char *buf, 134static ssize_t store_ageing_time(struct device *d,
133 size_t len) 135 struct device_attribute *attr,
136 const char *buf, size_t len)
134{ 137{
135 return store_bridge_parm(cd, buf, len, set_ageing_time); 138 return store_bridge_parm(d, buf, len, set_ageing_time);
136} 139}
140static DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time,
141 store_ageing_time);
137 142
138static CLASS_DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time, 143static ssize_t show_stp_state(struct device *d,
139 store_ageing_time); 144 struct device_attribute *attr, char *buf)
140static ssize_t show_stp_state(struct class_device *cd, char *buf)
141{ 145{
142 struct net_bridge *br = to_bridge(cd); 146 struct net_bridge *br = to_bridge(d);
143 return sprintf(buf, "%d\n", br->stp_enabled); 147 return sprintf(buf, "%d\n", br->stp_enabled);
144} 148}
145 149
@@ -148,18 +152,19 @@ static void set_stp_state(struct net_bridge *br, unsigned long val)
148 br->stp_enabled = val; 152 br->stp_enabled = val;
149} 153}
150 154
151static ssize_t store_stp_state(struct class_device *cd, 155static ssize_t store_stp_state(struct device *d,
152 const char *buf, size_t len) 156 struct device_attribute *attr, const char *buf,
157 size_t len)
153{ 158{
154 return store_bridge_parm(cd, buf, len, set_stp_state); 159 return store_bridge_parm(d, buf, len, set_stp_state);
155} 160}
161static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
162 store_stp_state);
156 163
157static CLASS_DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state, 164static ssize_t show_priority(struct device *d, struct device_attribute *attr,
158 store_stp_state); 165 char *buf)
159
160static ssize_t show_priority(struct class_device *cd, char *buf)
161{ 166{
162 struct net_bridge *br = to_bridge(cd); 167 struct net_bridge *br = to_bridge(d);
163 return sprintf(buf, "%d\n", 168 return sprintf(buf, "%d\n",
164 (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); 169 (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]);
165} 170}
@@ -169,92 +174,107 @@ static void set_priority(struct net_bridge *br, unsigned long val)
169 br_stp_set_bridge_priority(br, (u16) val); 174 br_stp_set_bridge_priority(br, (u16) val);
170} 175}
171 176
172static ssize_t store_priority(struct class_device *cd, 177static ssize_t store_priority(struct device *d, struct device_attribute *attr,
173 const char *buf, size_t len) 178 const char *buf, size_t len)
174{ 179{
175 return store_bridge_parm(cd, buf, len, set_priority); 180 return store_bridge_parm(d, buf, len, set_priority);
176} 181}
177static CLASS_DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority, 182static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority, store_priority);
178 store_priority);
179 183
180static ssize_t show_root_id(struct class_device *cd, char *buf) 184static ssize_t show_root_id(struct device *d, struct device_attribute *attr,
185 char *buf)
181{ 186{
182 return br_show_bridge_id(buf, &to_bridge(cd)->designated_root); 187 return br_show_bridge_id(buf, &to_bridge(d)->designated_root);
183} 188}
184static CLASS_DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL); 189static DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL);
185 190
186static ssize_t show_bridge_id(struct class_device *cd, char *buf) 191static ssize_t show_bridge_id(struct device *d, struct device_attribute *attr,
192 char *buf)
187{ 193{
188 return br_show_bridge_id(buf, &to_bridge(cd)->bridge_id); 194 return br_show_bridge_id(buf, &to_bridge(d)->bridge_id);
189} 195}
190static CLASS_DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL); 196static DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL);
191 197
192static ssize_t show_root_port(struct class_device *cd, char *buf) 198static ssize_t show_root_port(struct device *d, struct device_attribute *attr,
199 char *buf)
193{ 200{
194 return sprintf(buf, "%d\n", to_bridge(cd)->root_port); 201 return sprintf(buf, "%d\n", to_bridge(d)->root_port);
195} 202}
196static CLASS_DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL); 203static DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL);
197 204
198static ssize_t show_root_path_cost(struct class_device *cd, char *buf) 205static ssize_t show_root_path_cost(struct device *d,
206 struct device_attribute *attr, char *buf)
199{ 207{
200 return sprintf(buf, "%d\n", to_bridge(cd)->root_path_cost); 208 return sprintf(buf, "%d\n", to_bridge(d)->root_path_cost);
201} 209}
202static CLASS_DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL); 210static DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL);
203 211
204static ssize_t show_topology_change(struct class_device *cd, char *buf) 212static ssize_t show_topology_change(struct device *d,
213 struct device_attribute *attr, char *buf)
205{ 214{
206 return sprintf(buf, "%d\n", to_bridge(cd)->topology_change); 215 return sprintf(buf, "%d\n", to_bridge(d)->topology_change);
207} 216}
208static CLASS_DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL); 217static DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL);
209 218
210static ssize_t show_topology_change_detected(struct class_device *cd, char *buf) 219static ssize_t show_topology_change_detected(struct device *d,
220 struct device_attribute *attr,
221 char *buf)
211{ 222{
212 struct net_bridge *br = to_bridge(cd); 223 struct net_bridge *br = to_bridge(d);
213 return sprintf(buf, "%d\n", br->topology_change_detected); 224 return sprintf(buf, "%d\n", br->topology_change_detected);
214} 225}
215static CLASS_DEVICE_ATTR(topology_change_detected, S_IRUGO, show_topology_change_detected, NULL); 226static DEVICE_ATTR(topology_change_detected, S_IRUGO,
227 show_topology_change_detected, NULL);
216 228
217static ssize_t show_hello_timer(struct class_device *cd, char *buf) 229static ssize_t show_hello_timer(struct device *d,
230 struct device_attribute *attr, char *buf)
218{ 231{
219 struct net_bridge *br = to_bridge(cd); 232 struct net_bridge *br = to_bridge(d);
220 return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer)); 233 return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer));
221} 234}
222static CLASS_DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL); 235static DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL);
223 236
224static ssize_t show_tcn_timer(struct class_device *cd, char *buf) 237static ssize_t show_tcn_timer(struct device *d, struct device_attribute *attr,
238 char *buf)
225{ 239{
226 struct net_bridge *br = to_bridge(cd); 240 struct net_bridge *br = to_bridge(d);
227 return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer)); 241 return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer));
228} 242}
229static CLASS_DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL); 243static DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL);
230 244
231static ssize_t show_topology_change_timer(struct class_device *cd, char *buf) 245static ssize_t show_topology_change_timer(struct device *d,
246 struct device_attribute *attr,
247 char *buf)
232{ 248{
233 struct net_bridge *br = to_bridge(cd); 249 struct net_bridge *br = to_bridge(d);
234 return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer)); 250 return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer));
235} 251}
236static CLASS_DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer, NULL); 252static DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer,
253 NULL);
237 254
238static ssize_t show_gc_timer(struct class_device *cd, char *buf) 255static ssize_t show_gc_timer(struct device *d, struct device_attribute *attr,
256 char *buf)
239{ 257{
240 struct net_bridge *br = to_bridge(cd); 258 struct net_bridge *br = to_bridge(d);
241 return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer)); 259 return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer));
242} 260}
243static CLASS_DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL); 261static DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL);
244 262
245static ssize_t show_group_addr(struct class_device *cd, char *buf) 263static ssize_t show_group_addr(struct device *d,
264 struct device_attribute *attr, char *buf)
246{ 265{
247 struct net_bridge *br = to_bridge(cd); 266 struct net_bridge *br = to_bridge(d);
248 return sprintf(buf, "%x:%x:%x:%x:%x:%x\n", 267 return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
249 br->group_addr[0], br->group_addr[1], 268 br->group_addr[0], br->group_addr[1],
250 br->group_addr[2], br->group_addr[3], 269 br->group_addr[2], br->group_addr[3],
251 br->group_addr[4], br->group_addr[5]); 270 br->group_addr[4], br->group_addr[5]);
252} 271}
253 272
254static ssize_t store_group_addr(struct class_device *cd, const char *buf, 273static ssize_t store_group_addr(struct device *d,
255 size_t len) 274 struct device_attribute *attr,
275 const char *buf, size_t len)
256{ 276{
257 struct net_bridge *br = to_bridge(cd); 277 struct net_bridge *br = to_bridge(d);
258 unsigned new_addr[6]; 278 unsigned new_addr[6];
259 int i; 279 int i;
260 280
@@ -286,28 +306,28 @@ static ssize_t store_group_addr(struct class_device *cd, const char *buf,
286 return len; 306 return len;
287} 307}
288 308
289static CLASS_DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR, 309static DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR,
290 show_group_addr, store_group_addr); 310 show_group_addr, store_group_addr);
291 311
292 312
293static struct attribute *bridge_attrs[] = { 313static struct attribute *bridge_attrs[] = {
294 &class_device_attr_forward_delay.attr, 314 &dev_attr_forward_delay.attr,
295 &class_device_attr_hello_time.attr, 315 &dev_attr_hello_time.attr,
296 &class_device_attr_max_age.attr, 316 &dev_attr_max_age.attr,
297 &class_device_attr_ageing_time.attr, 317 &dev_attr_ageing_time.attr,
298 &class_device_attr_stp_state.attr, 318 &dev_attr_stp_state.attr,
299 &class_device_attr_priority.attr, 319 &dev_attr_priority.attr,
300 &class_device_attr_bridge_id.attr, 320 &dev_attr_bridge_id.attr,
301 &class_device_attr_root_id.attr, 321 &dev_attr_root_id.attr,
302 &class_device_attr_root_path_cost.attr, 322 &dev_attr_root_path_cost.attr,
303 &class_device_attr_root_port.attr, 323 &dev_attr_root_port.attr,
304 &class_device_attr_topology_change.attr, 324 &dev_attr_topology_change.attr,
305 &class_device_attr_topology_change_detected.attr, 325 &dev_attr_topology_change_detected.attr,
306 &class_device_attr_hello_timer.attr, 326 &dev_attr_hello_timer.attr,
307 &class_device_attr_tcn_timer.attr, 327 &dev_attr_tcn_timer.attr,
308 &class_device_attr_topology_change_timer.attr, 328 &dev_attr_topology_change_timer.attr,
309 &class_device_attr_gc_timer.attr, 329 &dev_attr_gc_timer.attr,
310 &class_device_attr_group_addr.attr, 330 &dev_attr_group_addr.attr,
311 NULL 331 NULL
312}; 332};
313 333
@@ -325,8 +345,8 @@ static struct attribute_group bridge_group = {
325static ssize_t brforward_read(struct kobject *kobj, char *buf, 345static ssize_t brforward_read(struct kobject *kobj, char *buf,
326 loff_t off, size_t count) 346 loff_t off, size_t count)
327{ 347{
328 struct class_device *cdev = to_class_dev(kobj); 348 struct device *dev = to_dev(kobj);
329 struct net_bridge *br = to_bridge(cdev); 349 struct net_bridge *br = to_bridge(dev);
330 int n; 350 int n;
331 351
332 /* must read whole records */ 352 /* must read whole records */
@@ -363,7 +383,7 @@ static struct bin_attribute bridge_forward = {
363 */ 383 */
364int br_sysfs_addbr(struct net_device *dev) 384int br_sysfs_addbr(struct net_device *dev)
365{ 385{
366 struct kobject *brobj = &dev->class_dev.kobj; 386 struct kobject *brobj = &dev->dev.kobj;
367 struct net_bridge *br = netdev_priv(dev); 387 struct net_bridge *br = netdev_priv(dev);
368 int err; 388 int err;
369 389
@@ -395,9 +415,9 @@ int br_sysfs_addbr(struct net_device *dev)
395 } 415 }
396 return 0; 416 return 0;
397 out3: 417 out3:
398 sysfs_remove_bin_file(&dev->class_dev.kobj, &bridge_forward); 418 sysfs_remove_bin_file(&dev->dev.kobj, &bridge_forward);
399 out2: 419 out2:
400 sysfs_remove_group(&dev->class_dev.kobj, &bridge_group); 420 sysfs_remove_group(&dev->dev.kobj, &bridge_group);
401 out1: 421 out1:
402 return err; 422 return err;
403 423
@@ -405,7 +425,7 @@ int br_sysfs_addbr(struct net_device *dev)
405 425
406void br_sysfs_delbr(struct net_device *dev) 426void br_sysfs_delbr(struct net_device *dev)
407{ 427{
408 struct kobject *kobj = &dev->class_dev.kobj; 428 struct kobject *kobj = &dev->dev.kobj;
409 struct net_bridge *br = netdev_priv(dev); 429 struct net_bridge *br = netdev_priv(dev);
410 430
411 kobject_unregister(&br->ifobj); 431 kobject_unregister(&br->ifobj);
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index c51c9e42aeb3..0bc2aef8f9f3 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -211,7 +211,7 @@ int br_sysfs_addif(struct net_bridge_port *p)
211 struct brport_attribute **a; 211 struct brport_attribute **a;
212 int err; 212 int err;
213 213
214 err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj, 214 err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
215 SYSFS_BRIDGE_PORT_LINK); 215 SYSFS_BRIDGE_PORT_LINK);
216 if (err) 216 if (err)
217 goto out2; 217 goto out2;
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index e4c642448e1b..6afa4d017d4a 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -93,6 +93,7 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask,
93 return -EINVAL; 93 return -EINVAL;
94 if (info->protocol != IPPROTO_TCP && 94 if (info->protocol != IPPROTO_TCP &&
95 info->protocol != IPPROTO_UDP && 95 info->protocol != IPPROTO_UDP &&
96 info->protocol != IPPROTO_UDPLITE &&
96 info->protocol != IPPROTO_SCTP && 97 info->protocol != IPPROTO_SCTP &&
97 info->protocol != IPPROTO_DCCP) 98 info->protocol != IPPROTO_DCCP)
98 return -EINVAL; 99 return -EINVAL;
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index a184f879f253..985df82e427b 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -96,6 +96,7 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
96 NIPQUAD(ih->daddr), ih->tos, ih->protocol); 96 NIPQUAD(ih->daddr), ih->tos, ih->protocol);
97 if (ih->protocol == IPPROTO_TCP || 97 if (ih->protocol == IPPROTO_TCP ||
98 ih->protocol == IPPROTO_UDP || 98 ih->protocol == IPPROTO_UDP ||
99 ih->protocol == IPPROTO_UDPLITE ||
99 ih->protocol == IPPROTO_SCTP || 100 ih->protocol == IPPROTO_SCTP ||
100 ih->protocol == IPPROTO_DCCP) { 101 ih->protocol == IPPROTO_DCCP) {
101 struct tcpudphdr _ports, *pptr; 102 struct tcpudphdr _ports, *pptr;
diff --git a/net/core/dev.c b/net/core/dev.c
index e660cb57e42a..1e94a1b9a0f4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -751,7 +751,7 @@ int dev_change_name(struct net_device *dev, char *newname)
751 else 751 else
752 strlcpy(dev->name, newname, IFNAMSIZ); 752 strlcpy(dev->name, newname, IFNAMSIZ);
753 753
754 err = class_device_rename(&dev->class_dev, dev->name); 754 err = device_rename(&dev->dev, dev->name);
755 if (!err) { 755 if (!err) {
756 hlist_del(&dev->name_hlist); 756 hlist_del(&dev->name_hlist);
757 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); 757 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
@@ -3221,8 +3221,8 @@ void free_netdev(struct net_device *dev)
3221 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 3221 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3222 dev->reg_state = NETREG_RELEASED; 3222 dev->reg_state = NETREG_RELEASED;
3223 3223
3224 /* will free via class release */ 3224 /* will free via device release */
3225 class_device_put(&dev->class_dev); 3225 put_device(&dev->dev);
3226#else 3226#else
3227 kfree((char *)dev - dev->padded); 3227 kfree((char *)dev - dev->padded);
3228#endif 3228#endif
@@ -3247,7 +3247,7 @@ void synchronize_net(void)
3247 * unregister_netdev() instead of this. 3247 * unregister_netdev() instead of this.
3248 */ 3248 */
3249 3249
3250int unregister_netdevice(struct net_device *dev) 3250void unregister_netdevice(struct net_device *dev)
3251{ 3251{
3252 struct net_device *d, **dp; 3252 struct net_device *d, **dp;
3253 3253
@@ -3258,7 +3258,9 @@ int unregister_netdevice(struct net_device *dev)
3258 if (dev->reg_state == NETREG_UNINITIALIZED) { 3258 if (dev->reg_state == NETREG_UNINITIALIZED) {
3259 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " 3259 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3260 "was registered\n", dev->name, dev); 3260 "was registered\n", dev->name, dev);
3261 return -ENODEV; 3261
3262 WARN_ON(1);
3263 return;
3262 } 3264 }
3263 3265
3264 BUG_ON(dev->reg_state != NETREG_REGISTERED); 3266 BUG_ON(dev->reg_state != NETREG_REGISTERED);
@@ -3280,11 +3282,7 @@ int unregister_netdevice(struct net_device *dev)
3280 break; 3282 break;
3281 } 3283 }
3282 } 3284 }
3283 if (!d) { 3285 BUG_ON(!d);
3284 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3285 dev->name);
3286 return -ENODEV;
3287 }
3288 3286
3289 dev->reg_state = NETREG_UNREGISTERING; 3287 dev->reg_state = NETREG_UNREGISTERING;
3290 3288
@@ -3316,7 +3314,6 @@ int unregister_netdevice(struct net_device *dev)
3316 synchronize_net(); 3314 synchronize_net();
3317 3315
3318 dev_put(dev); 3316 dev_put(dev);
3319 return 0;
3320} 3317}
3321 3318
3322/** 3319/**
diff --git a/net/core/dst.c b/net/core/dst.c
index 836ec6606925..1a53fb39b7e0 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -99,7 +99,14 @@ static void dst_run_gc(unsigned long dummy)
99 printk("dst_total: %d/%d %ld\n", 99 printk("dst_total: %d/%d %ld\n",
100 atomic_read(&dst_total), delayed, dst_gc_timer_expires); 100 atomic_read(&dst_total), delayed, dst_gc_timer_expires);
101#endif 101#endif
102 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); 102 /* if the next desired timer is more than 4 seconds in the future
103 * then round the timer to whole seconds
104 */
105 if (dst_gc_timer_expires > 4*HZ)
106 mod_timer(&dst_gc_timer,
107 round_jiffies(jiffies + dst_gc_timer_expires));
108 else
109 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
103 110
104out: 111out:
105 spin_unlock(&dst_lock); 112 spin_unlock(&dst_lock);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1df6cd4568d3..215f1bff048f 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -331,7 +331,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
331 331
332 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 332 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
333 if (nlh == NULL) 333 if (nlh == NULL)
334 return -1; 334 return -EMSGSIZE;
335 335
336 frh = nlmsg_data(nlh); 336 frh = nlmsg_data(nlh);
337 frh->table = rule->table; 337 frh->table = rule->table;
@@ -359,7 +359,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
359 return nlmsg_end(skb, nlh); 359 return nlmsg_end(skb, nlh);
360 360
361nla_put_failure: 361nla_put_failure:
362 return nlmsg_cancel(skb, nlh); 362 nlmsg_cancel(skb, nlh);
363 return -EMSGSIZE;
363} 364}
364 365
365int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family) 366int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
@@ -405,9 +406,12 @@ static void notify_rule_change(int event, struct fib_rule *rule,
405 goto errout; 406 goto errout;
406 407
407 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 408 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
408 /* failure implies BUG in fib_rule_nlmsg_size() */ 409 if (err < 0) {
409 BUG_ON(err < 0); 410 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
410 411 WARN_ON(err == -EMSGSIZE);
412 kfree_skb(skb);
413 goto errout;
414 }
411 err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL); 415 err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
412errout: 416errout:
413 if (err < 0) 417 if (err < 0)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e7300b6b4079..054d46493d2b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -696,7 +696,10 @@ next_elt:
696 if (!expire) 696 if (!expire)
697 expire = 1; 697 expire = 1;
698 698
699 mod_timer(&tbl->gc_timer, now + expire); 699 if (expire>HZ)
700 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
701 else
702 mod_timer(&tbl->gc_timer, now + expire);
700 703
701 write_unlock(&tbl->lock); 704 write_unlock(&tbl->lock);
702} 705}
@@ -1637,7 +1640,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1637 1640
1638 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 1641 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1639 if (nlh == NULL) 1642 if (nlh == NULL)
1640 return -ENOBUFS; 1643 return -EMSGSIZE;
1641 1644
1642 ndtmsg = nlmsg_data(nlh); 1645 ndtmsg = nlmsg_data(nlh);
1643 1646
@@ -1706,7 +1709,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1706 1709
1707nla_put_failure: 1710nla_put_failure:
1708 read_unlock_bh(&tbl->lock); 1711 read_unlock_bh(&tbl->lock);
1709 return nlmsg_cancel(skb, nlh); 1712 nlmsg_cancel(skb, nlh);
1713 return -EMSGSIZE;
1710} 1714}
1711 1715
1712static int neightbl_fill_param_info(struct sk_buff *skb, 1716static int neightbl_fill_param_info(struct sk_buff *skb,
@@ -1720,7 +1724,7 @@ static int neightbl_fill_param_info(struct sk_buff *skb,
1720 1724
1721 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 1725 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1722 if (nlh == NULL) 1726 if (nlh == NULL)
1723 return -ENOBUFS; 1727 return -EMSGSIZE;
1724 1728
1725 ndtmsg = nlmsg_data(nlh); 1729 ndtmsg = nlmsg_data(nlh);
1726 1730
@@ -1737,7 +1741,8 @@ static int neightbl_fill_param_info(struct sk_buff *skb,
1737 return nlmsg_end(skb, nlh); 1741 return nlmsg_end(skb, nlh);
1738errout: 1742errout:
1739 read_unlock_bh(&tbl->lock); 1743 read_unlock_bh(&tbl->lock);
1740 return nlmsg_cancel(skb, nlh); 1744 nlmsg_cancel(skb, nlh);
1745 return -EMSGSIZE;
1741} 1746}
1742 1747
1743static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, 1748static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
@@ -1955,7 +1960,7 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1955 1960
1956 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 1961 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1957 if (nlh == NULL) 1962 if (nlh == NULL)
1958 return -ENOBUFS; 1963 return -EMSGSIZE;
1959 1964
1960 ndm = nlmsg_data(nlh); 1965 ndm = nlmsg_data(nlh);
1961 ndm->ndm_family = neigh->ops->family; 1966 ndm->ndm_family = neigh->ops->family;
@@ -1987,7 +1992,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1987 return nlmsg_end(skb, nlh); 1992 return nlmsg_end(skb, nlh);
1988 1993
1989nla_put_failure: 1994nla_put_failure:
1990 return nlmsg_cancel(skb, nlh); 1995 nlmsg_cancel(skb, nlh);
1996 return -EMSGSIZE;
1991} 1997}
1992 1998
1993 1999
@@ -2429,9 +2435,12 @@ static void __neigh_notify(struct neighbour *n, int type, int flags)
2429 goto errout; 2435 goto errout;
2430 2436
2431 err = neigh_fill_info(skb, n, 0, 0, type, flags); 2437 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2432 /* failure implies BUG in neigh_nlmsg_size() */ 2438 if (err < 0) {
2433 BUG_ON(err < 0); 2439 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2434 2440 WARN_ON(err == -EMSGSIZE);
2441 kfree_skb(skb);
2442 goto errout;
2443 }
2435 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 2444 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2436errout: 2445errout:
2437 if (err < 0) 2446 if (err < 0)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f47f319bb7dc..44db095a8f7e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -18,9 +18,6 @@
18#include <linux/wireless.h> 18#include <linux/wireless.h>
19#include <net/iw_handler.h> 19#include <net/iw_handler.h>
20 20
21#define to_class_dev(obj) container_of(obj,struct class_device,kobj)
22#define to_net_dev(class) container_of(class, struct net_device, class_dev)
23
24static const char fmt_hex[] = "%#x\n"; 21static const char fmt_hex[] = "%#x\n";
25static const char fmt_long_hex[] = "%#lx\n"; 22static const char fmt_long_hex[] = "%#lx\n";
26static const char fmt_dec[] = "%d\n"; 23static const char fmt_dec[] = "%d\n";
@@ -32,10 +29,11 @@ static inline int dev_isalive(const struct net_device *dev)
32} 29}
33 30
34/* use same locking rules as GIF* ioctl's */ 31/* use same locking rules as GIF* ioctl's */
35static ssize_t netdev_show(const struct class_device *cd, char *buf, 32static ssize_t netdev_show(const struct device *dev,
33 struct device_attribute *attr, char *buf,
36 ssize_t (*format)(const struct net_device *, char *)) 34 ssize_t (*format)(const struct net_device *, char *))
37{ 35{
38 struct net_device *net = to_net_dev(cd); 36 struct net_device *net = to_net_dev(dev);
39 ssize_t ret = -EINVAL; 37 ssize_t ret = -EINVAL;
40 38
41 read_lock(&dev_base_lock); 39 read_lock(&dev_base_lock);
@@ -52,14 +50,15 @@ static ssize_t format_##field(const struct net_device *net, char *buf) \
52{ \ 50{ \
53 return sprintf(buf, format_string, net->field); \ 51 return sprintf(buf, format_string, net->field); \
54} \ 52} \
55static ssize_t show_##field(struct class_device *cd, char *buf) \ 53static ssize_t show_##field(struct device *dev, \
54 struct device_attribute *attr, char *buf) \
56{ \ 55{ \
57 return netdev_show(cd, buf, format_##field); \ 56 return netdev_show(dev, attr, buf, format_##field); \
58} 57}
59 58
60 59
61/* use same locking and permission rules as SIF* ioctl's */ 60/* use same locking and permission rules as SIF* ioctl's */
62static ssize_t netdev_store(struct class_device *dev, 61static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
63 const char *buf, size_t len, 62 const char *buf, size_t len,
64 int (*set)(struct net_device *, unsigned long)) 63 int (*set)(struct net_device *, unsigned long))
65{ 64{
@@ -104,7 +103,8 @@ static ssize_t format_addr(char *buf, const unsigned char *addr, int len)
104 return cp - buf; 103 return cp - buf;
105} 104}
106 105
107static ssize_t show_address(struct class_device *dev, char *buf) 106static ssize_t show_address(struct device *dev, struct device_attribute *attr,
107 char *buf)
108{ 108{
109 struct net_device *net = to_net_dev(dev); 109 struct net_device *net = to_net_dev(dev);
110 ssize_t ret = -EINVAL; 110 ssize_t ret = -EINVAL;
@@ -116,7 +116,8 @@ static ssize_t show_address(struct class_device *dev, char *buf)
116 return ret; 116 return ret;
117} 117}
118 118
119static ssize_t show_broadcast(struct class_device *dev, char *buf) 119static ssize_t show_broadcast(struct device *dev,
120 struct device_attribute *attr, char *buf)
120{ 121{
121 struct net_device *net = to_net_dev(dev); 122 struct net_device *net = to_net_dev(dev);
122 if (dev_isalive(net)) 123 if (dev_isalive(net))
@@ -124,7 +125,8 @@ static ssize_t show_broadcast(struct class_device *dev, char *buf)
124 return -EINVAL; 125 return -EINVAL;
125} 126}
126 127
127static ssize_t show_carrier(struct class_device *dev, char *buf) 128static ssize_t show_carrier(struct device *dev,
129 struct device_attribute *attr, char *buf)
128{ 130{
129 struct net_device *netdev = to_net_dev(dev); 131 struct net_device *netdev = to_net_dev(dev);
130 if (netif_running(netdev)) { 132 if (netif_running(netdev)) {
@@ -133,7 +135,8 @@ static ssize_t show_carrier(struct class_device *dev, char *buf)
133 return -EINVAL; 135 return -EINVAL;
134} 136}
135 137
136static ssize_t show_dormant(struct class_device *dev, char *buf) 138static ssize_t show_dormant(struct device *dev,
139 struct device_attribute *attr, char *buf)
137{ 140{
138 struct net_device *netdev = to_net_dev(dev); 141 struct net_device *netdev = to_net_dev(dev);
139 142
@@ -153,7 +156,8 @@ static const char *operstates[] = {
153 "up" 156 "up"
154}; 157};
155 158
156static ssize_t show_operstate(struct class_device *dev, char *buf) 159static ssize_t show_operstate(struct device *dev,
160 struct device_attribute *attr, char *buf)
157{ 161{
158 const struct net_device *netdev = to_net_dev(dev); 162 const struct net_device *netdev = to_net_dev(dev);
159 unsigned char operstate; 163 unsigned char operstate;
@@ -178,9 +182,10 @@ static int change_mtu(struct net_device *net, unsigned long new_mtu)
178 return dev_set_mtu(net, (int) new_mtu); 182 return dev_set_mtu(net, (int) new_mtu);
179} 183}
180 184
181static ssize_t store_mtu(struct class_device *dev, const char *buf, size_t len) 185static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
186 const char *buf, size_t len)
182{ 187{
183 return netdev_store(dev, buf, len, change_mtu); 188 return netdev_store(dev, attr, buf, len, change_mtu);
184} 189}
185 190
186NETDEVICE_SHOW(flags, fmt_hex); 191NETDEVICE_SHOW(flags, fmt_hex);
@@ -190,9 +195,10 @@ static int change_flags(struct net_device *net, unsigned long new_flags)
190 return dev_change_flags(net, (unsigned) new_flags); 195 return dev_change_flags(net, (unsigned) new_flags);
191} 196}
192 197
193static ssize_t store_flags(struct class_device *dev, const char *buf, size_t len) 198static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
199 const char *buf, size_t len)
194{ 200{
195 return netdev_store(dev, buf, len, change_flags); 201 return netdev_store(dev, attr, buf, len, change_flags);
196} 202}
197 203
198NETDEVICE_SHOW(tx_queue_len, fmt_ulong); 204NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
@@ -203,9 +209,11 @@ static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
203 return 0; 209 return 0;
204} 210}
205 211
206static ssize_t store_tx_queue_len(struct class_device *dev, const char *buf, size_t len) 212static ssize_t store_tx_queue_len(struct device *dev,
213 struct device_attribute *attr,
214 const char *buf, size_t len)
207{ 215{
208 return netdev_store(dev, buf, len, change_tx_queue_len); 216 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
209} 217}
210 218
211NETDEVICE_SHOW(weight, fmt_dec); 219NETDEVICE_SHOW(weight, fmt_dec);
@@ -216,12 +224,13 @@ static int change_weight(struct net_device *net, unsigned long new_weight)
216 return 0; 224 return 0;
217} 225}
218 226
219static ssize_t store_weight(struct class_device *dev, const char *buf, size_t len) 227static ssize_t store_weight(struct device *dev, struct device_attribute *attr,
228 const char *buf, size_t len)
220{ 229{
221 return netdev_store(dev, buf, len, change_weight); 230 return netdev_store(dev, attr, buf, len, change_weight);
222} 231}
223 232
224static struct class_device_attribute net_class_attributes[] = { 233static struct device_attribute net_class_attributes[] = {
225 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), 234 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
226 __ATTR(iflink, S_IRUGO, show_iflink, NULL), 235 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
227 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), 236 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
@@ -242,10 +251,11 @@ static struct class_device_attribute net_class_attributes[] = {
242}; 251};
243 252
244/* Show a given an attribute in the statistics group */ 253/* Show a given an attribute in the statistics group */
245static ssize_t netstat_show(const struct class_device *cd, char *buf, 254static ssize_t netstat_show(const struct device *d,
255 struct device_attribute *attr, char *buf,
246 unsigned long offset) 256 unsigned long offset)
247{ 257{
248 struct net_device *dev = to_net_dev(cd); 258 struct net_device *dev = to_net_dev(d);
249 struct net_device_stats *stats; 259 struct net_device_stats *stats;
250 ssize_t ret = -EINVAL; 260 ssize_t ret = -EINVAL;
251 261
@@ -265,12 +275,13 @@ static ssize_t netstat_show(const struct class_device *cd, char *buf,
265 275
266/* generate a read-only statistics attribute */ 276/* generate a read-only statistics attribute */
267#define NETSTAT_ENTRY(name) \ 277#define NETSTAT_ENTRY(name) \
268static ssize_t show_##name(struct class_device *cd, char *buf) \ 278static ssize_t show_##name(struct device *d, \
279 struct device_attribute *attr, char *buf) \
269{ \ 280{ \
270 return netstat_show(cd, buf, \ 281 return netstat_show(d, attr, buf, \
271 offsetof(struct net_device_stats, name)); \ 282 offsetof(struct net_device_stats, name)); \
272} \ 283} \
273static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 284static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
274 285
275NETSTAT_ENTRY(rx_packets); 286NETSTAT_ENTRY(rx_packets);
276NETSTAT_ENTRY(tx_packets); 287NETSTAT_ENTRY(tx_packets);
@@ -297,29 +308,29 @@ NETSTAT_ENTRY(rx_compressed);
297NETSTAT_ENTRY(tx_compressed); 308NETSTAT_ENTRY(tx_compressed);
298 309
299static struct attribute *netstat_attrs[] = { 310static struct attribute *netstat_attrs[] = {
300 &class_device_attr_rx_packets.attr, 311 &dev_attr_rx_packets.attr,
301 &class_device_attr_tx_packets.attr, 312 &dev_attr_tx_packets.attr,
302 &class_device_attr_rx_bytes.attr, 313 &dev_attr_rx_bytes.attr,
303 &class_device_attr_tx_bytes.attr, 314 &dev_attr_tx_bytes.attr,
304 &class_device_attr_rx_errors.attr, 315 &dev_attr_rx_errors.attr,
305 &class_device_attr_tx_errors.attr, 316 &dev_attr_tx_errors.attr,
306 &class_device_attr_rx_dropped.attr, 317 &dev_attr_rx_dropped.attr,
307 &class_device_attr_tx_dropped.attr, 318 &dev_attr_tx_dropped.attr,
308 &class_device_attr_multicast.attr, 319 &dev_attr_multicast.attr,
309 &class_device_attr_collisions.attr, 320 &dev_attr_collisions.attr,
310 &class_device_attr_rx_length_errors.attr, 321 &dev_attr_rx_length_errors.attr,
311 &class_device_attr_rx_over_errors.attr, 322 &dev_attr_rx_over_errors.attr,
312 &class_device_attr_rx_crc_errors.attr, 323 &dev_attr_rx_crc_errors.attr,
313 &class_device_attr_rx_frame_errors.attr, 324 &dev_attr_rx_frame_errors.attr,
314 &class_device_attr_rx_fifo_errors.attr, 325 &dev_attr_rx_fifo_errors.attr,
315 &class_device_attr_rx_missed_errors.attr, 326 &dev_attr_rx_missed_errors.attr,
316 &class_device_attr_tx_aborted_errors.attr, 327 &dev_attr_tx_aborted_errors.attr,
317 &class_device_attr_tx_carrier_errors.attr, 328 &dev_attr_tx_carrier_errors.attr,
318 &class_device_attr_tx_fifo_errors.attr, 329 &dev_attr_tx_fifo_errors.attr,
319 &class_device_attr_tx_heartbeat_errors.attr, 330 &dev_attr_tx_heartbeat_errors.attr,
320 &class_device_attr_tx_window_errors.attr, 331 &dev_attr_tx_window_errors.attr,
321 &class_device_attr_rx_compressed.attr, 332 &dev_attr_rx_compressed.attr,
322 &class_device_attr_tx_compressed.attr, 333 &dev_attr_tx_compressed.attr,
323 NULL 334 NULL
324}; 335};
325 336
@@ -331,11 +342,11 @@ static struct attribute_group netstat_group = {
331 342
332#ifdef WIRELESS_EXT 343#ifdef WIRELESS_EXT
333/* helper function that does all the locking etc for wireless stats */ 344/* helper function that does all the locking etc for wireless stats */
334static ssize_t wireless_show(struct class_device *cd, char *buf, 345static ssize_t wireless_show(struct device *d, char *buf,
335 ssize_t (*format)(const struct iw_statistics *, 346 ssize_t (*format)(const struct iw_statistics *,
336 char *)) 347 char *))
337{ 348{
338 struct net_device *dev = to_net_dev(cd); 349 struct net_device *dev = to_net_dev(d);
339 const struct iw_statistics *iw = NULL; 350 const struct iw_statistics *iw = NULL;
340 ssize_t ret = -EINVAL; 351 ssize_t ret = -EINVAL;
341 352
@@ -358,11 +369,12 @@ static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
358{ \ 369{ \
359 return sprintf(buf, format_string, iw->field); \ 370 return sprintf(buf, format_string, iw->field); \
360} \ 371} \
361static ssize_t show_iw_##name(struct class_device *cd, char *buf) \ 372static ssize_t show_iw_##name(struct device *d, \
373 struct device_attribute *attr, char *buf) \
362{ \ 374{ \
363 return wireless_show(cd, buf, format_iw_##name); \ 375 return wireless_show(d, buf, format_iw_##name); \
364} \ 376} \
365static CLASS_DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL) 377static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
366 378
367WIRELESS_SHOW(status, status, fmt_hex); 379WIRELESS_SHOW(status, status, fmt_hex);
368WIRELESS_SHOW(link, qual.qual, fmt_dec); 380WIRELESS_SHOW(link, qual.qual, fmt_dec);
@@ -376,16 +388,16 @@ WIRELESS_SHOW(retries, discard.retries, fmt_dec);
376WIRELESS_SHOW(beacon, miss.beacon, fmt_dec); 388WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
377 389
378static struct attribute *wireless_attrs[] = { 390static struct attribute *wireless_attrs[] = {
379 &class_device_attr_status.attr, 391 &dev_attr_status.attr,
380 &class_device_attr_link.attr, 392 &dev_attr_link.attr,
381 &class_device_attr_level.attr, 393 &dev_attr_level.attr,
382 &class_device_attr_noise.attr, 394 &dev_attr_noise.attr,
383 &class_device_attr_nwid.attr, 395 &dev_attr_nwid.attr,
384 &class_device_attr_crypt.attr, 396 &dev_attr_crypt.attr,
385 &class_device_attr_fragment.attr, 397 &dev_attr_fragment.attr,
386 &class_device_attr_retries.attr, 398 &dev_attr_retries.attr,
387 &class_device_attr_misc.attr, 399 &dev_attr_misc.attr,
388 &class_device_attr_beacon.attr, 400 &dev_attr_beacon.attr,
389 NULL 401 NULL
390}; 402};
391 403
@@ -396,10 +408,10 @@ static struct attribute_group wireless_group = {
396#endif 408#endif
397 409
398#ifdef CONFIG_HOTPLUG 410#ifdef CONFIG_HOTPLUG
399static int netdev_uevent(struct class_device *cd, char **envp, 411static int netdev_uevent(struct device *d, char **envp,
400 int num_envp, char *buf, int size) 412 int num_envp, char *buf, int size)
401{ 413{
402 struct net_device *dev = to_net_dev(cd); 414 struct net_device *dev = to_net_dev(d);
403 int i = 0; 415 int i = 0;
404 int n; 416 int n;
405 417
@@ -419,12 +431,11 @@ static int netdev_uevent(struct class_device *cd, char **envp,
419 431
420/* 432/*
421 * netdev_release -- destroy and free a dead device. 433 * netdev_release -- destroy and free a dead device.
422 * Called when last reference to class_device kobject is gone. 434 * Called when last reference to device kobject is gone.
423 */ 435 */
424static void netdev_release(struct class_device *cd) 436static void netdev_release(struct device *d)
425{ 437{
426 struct net_device *dev 438 struct net_device *dev = to_net_dev(d);
427 = container_of(cd, struct net_device, class_dev);
428 439
429 BUG_ON(dev->reg_state != NETREG_RELEASED); 440 BUG_ON(dev->reg_state != NETREG_RELEASED);
430 441
@@ -433,31 +444,31 @@ static void netdev_release(struct class_device *cd)
433 444
434static struct class net_class = { 445static struct class net_class = {
435 .name = "net", 446 .name = "net",
436 .release = netdev_release, 447 .dev_release = netdev_release,
437 .class_dev_attrs = net_class_attributes, 448 .dev_attrs = net_class_attributes,
438#ifdef CONFIG_HOTPLUG 449#ifdef CONFIG_HOTPLUG
439 .uevent = netdev_uevent, 450 .dev_uevent = netdev_uevent,
440#endif 451#endif
441}; 452};
442 453
443void netdev_unregister_sysfs(struct net_device * net) 454void netdev_unregister_sysfs(struct net_device * net)
444{ 455{
445 class_device_del(&(net->class_dev)); 456 device_del(&(net->dev));
446} 457}
447 458
448/* Create sysfs entries for network device. */ 459/* Create sysfs entries for network device. */
449int netdev_register_sysfs(struct net_device *net) 460int netdev_register_sysfs(struct net_device *net)
450{ 461{
451 struct class_device *class_dev = &(net->class_dev); 462 struct device *dev = &(net->dev);
452 struct attribute_group **groups = net->sysfs_groups; 463 struct attribute_group **groups = net->sysfs_groups;
453 464
454 class_device_initialize(class_dev); 465 device_initialize(dev);
455 class_dev->class = &net_class; 466 dev->class = &net_class;
456 class_dev->class_data = net; 467 dev->platform_data = net;
457 class_dev->groups = groups; 468 dev->groups = groups;
458 469
459 BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ); 470 BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
460 strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE); 471 strlcpy(dev->bus_id, net->name, BUS_ID_SIZE);
461 472
462 if (net->get_stats) 473 if (net->get_stats)
463 *groups++ = &netstat_group; 474 *groups++ = &netstat_group;
@@ -467,7 +478,7 @@ int netdev_register_sysfs(struct net_device *net)
467 *groups++ = &wireless_group; 478 *groups++ = &wireless_group;
468#endif 479#endif
469 480
470 return class_device_add(class_dev); 481 return device_add(dev);
471} 482}
472 483
473int netdev_sysfs_init(void) 484int netdev_sysfs_init(void)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e76539a5eb5e..9bf9ae05f157 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -320,7 +320,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
320 320
321 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 321 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
322 if (nlh == NULL) 322 if (nlh == NULL)
323 return -ENOBUFS; 323 return -EMSGSIZE;
324 324
325 ifm = nlmsg_data(nlh); 325 ifm = nlmsg_data(nlh);
326 ifm->ifi_family = AF_UNSPEC; 326 ifm->ifi_family = AF_UNSPEC;
@@ -384,7 +384,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
384 return nlmsg_end(skb, nlh); 384 return nlmsg_end(skb, nlh);
385 385
386nla_put_failure: 386nla_put_failure:
387 return nlmsg_cancel(skb, nlh); 387 nlmsg_cancel(skb, nlh);
388 return -EMSGSIZE;
388} 389}
389 390
390static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 391static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
@@ -633,9 +634,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
633 634
634 err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK, 635 err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK,
635 NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0); 636 NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0);
636 /* failure impilies BUG in if_nlmsg_size or wireless_rtnetlink_get */ 637 if (err < 0) {
637 BUG_ON(err < 0); 638 /* -EMSGSIZE implies BUG in if_nlmsg_size */
638 639 WARN_ON(err == -EMSGSIZE);
640 kfree_skb(nskb);
641 goto errout;
642 }
639 err = rtnl_unicast(nskb, NETLINK_CB(skb).pid); 643 err = rtnl_unicast(nskb, NETLINK_CB(skb).pid);
640errout: 644errout:
641 kfree(iw_buf); 645 kfree(iw_buf);
@@ -678,9 +682,12 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
678 goto errout; 682 goto errout;
679 683
680 err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0); 684 err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0);
681 /* failure implies BUG in if_nlmsg_size() */ 685 if (err < 0) {
682 BUG_ON(err < 0); 686 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
683 687 WARN_ON(err == -EMSGSIZE);
688 kfree_skb(skb);
689 goto errout;
690 }
684 err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); 691 err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
685errout: 692errout:
686 if (err < 0) 693 if (err < 0)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index de7801d589e7..f3404ae9f190 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -268,7 +268,7 @@ nodata:
268struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 268struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
269 unsigned int length, gfp_t gfp_mask) 269 unsigned int length, gfp_t gfp_mask)
270{ 270{
271 int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1; 271 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
272 struct sk_buff *skb; 272 struct sk_buff *skb;
273 273
274 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 274 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 40402c59506a..5c452a3ec4d1 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -479,7 +479,8 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
479 479
480 ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, " 480 ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, "
481 "R_sample=%dus, X=%u\n", dccp_role(sk), 481 "R_sample=%dus, X=%u\n", dccp_role(sk),
482 sk, hctx->ccid3hctx_s, w_init, 482 sk, hctx->ccid3hctx_s,
483 (unsigned long long)w_init,
483 (int)r_sample, 484 (int)r_sample,
484 (unsigned)(hctx->ccid3hctx_x >> 6)); 485 (unsigned)(hctx->ccid3hctx_x >> 6));
485 486
@@ -1005,7 +1006,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1005 DCCP_BUG_ON(r_sample < 0); 1006 DCCP_BUG_ON(r_sample < 0);
1006 if (unlikely(r_sample <= t_elapsed)) 1007 if (unlikely(r_sample <= t_elapsed))
1007 DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n", 1008 DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n",
1008 r_sample, t_elapsed); 1009 (long)r_sample, (long)t_elapsed);
1009 else 1010 else
1010 r_sample -= t_elapsed; 1011 r_sample -= t_elapsed;
1011 CCID3_RTT_SANITY_CHECK(r_sample); 1012 CCID3_RTT_SANITY_CHECK(r_sample);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 90c74b4adb73..fa2c982d4309 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -72,7 +72,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
72 tmp = ip_route_connect(&rt, nexthop, inet->saddr, 72 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
73 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 73 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
74 IPPROTO_DCCP, 74 IPPROTO_DCCP,
75 inet->sport, usin->sin_port, sk); 75 inet->sport, usin->sin_port, sk, 1);
76 if (tmp < 0) 76 if (tmp < 0)
77 return tmp; 77 return tmp;
78 78
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6b91a9dd0411..79140b3e592e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1041,7 +1041,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1041 if (final_p) 1041 if (final_p)
1042 ipv6_addr_copy(&fl.fl6_dst, final_p); 1042 ipv6_addr_copy(&fl.fl6_dst, final_p);
1043 1043
1044 err = xfrm_lookup(&dst, &fl, sk, 0); 1044 err = xfrm_lookup(&dst, &fl, sk, 1);
1045 if (err < 0) 1045 if (err < 0)
1046 goto failure; 1046 goto failure;
1047 1047
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 63b3fa20e14b..48438565d70f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1024,7 +1024,6 @@ static int __init dccp_init(void)
1024 do { 1024 do {
1025 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE / 1025 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1026 sizeof(struct inet_ehash_bucket); 1026 sizeof(struct inet_ehash_bucket);
1027 dccp_hashinfo.ehash_size >>= 1;
1028 while (dccp_hashinfo.ehash_size & 1027 while (dccp_hashinfo.ehash_size &
1029 (dccp_hashinfo.ehash_size - 1)) 1028 (dccp_hashinfo.ehash_size - 1))
1030 dccp_hashinfo.ehash_size--; 1029 dccp_hashinfo.ehash_size--;
@@ -1037,9 +1036,10 @@ static int __init dccp_init(void)
1037 goto out_free_bind_bucket_cachep; 1036 goto out_free_bind_bucket_cachep;
1038 } 1037 }
1039 1038
1040 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) { 1039 for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
1041 rwlock_init(&dccp_hashinfo.ehash[i].lock); 1040 rwlock_init(&dccp_hashinfo.ehash[i].lock);
1042 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain); 1041 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1042 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain);
1043 } 1043 }
1044 1044
1045 bhash_order = ehash_order; 1045 bhash_order = ehash_order;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ed083ab455b7..90b3dfd72b49 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -749,7 +749,7 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
749 749
750 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); 750 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
751 if (nlh == NULL) 751 if (nlh == NULL)
752 return -ENOBUFS; 752 return -EMSGSIZE;
753 753
754 ifm = nlmsg_data(nlh); 754 ifm = nlmsg_data(nlh);
755 ifm->ifa_family = AF_DECnet; 755 ifm->ifa_family = AF_DECnet;
@@ -768,7 +768,8 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
768 return nlmsg_end(skb, nlh); 768 return nlmsg_end(skb, nlh);
769 769
770nla_put_failure: 770nla_put_failure:
771 return nlmsg_cancel(skb, nlh); 771 nlmsg_cancel(skb, nlh);
772 return -EMSGSIZE;
772} 773}
773 774
774static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) 775static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
@@ -781,9 +782,12 @@ static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
781 goto errout; 782 goto errout;
782 783
783 err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); 784 err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
784 /* failure implies BUG in dn_ifaddr_nlmsg_size() */ 785 if (err < 0) {
785 BUG_ON(err < 0); 786 /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */
786 787 WARN_ON(err == -EMSGSIZE);
788 kfree_skb(skb);
789 goto errout;
790 }
787 err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); 791 err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
788errout: 792errout:
789 if (err < 0) 793 if (err < 0)
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 13b2421991ba..c1f0cc1b1c60 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -350,7 +350,7 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
350nlmsg_failure: 350nlmsg_failure:
351rtattr_failure: 351rtattr_failure:
352 skb_trim(skb, b - skb->data); 352 skb_trim(skb, b - skb->data);
353 return -1; 353 return -EMSGSIZE;
354} 354}
355 355
356 356
@@ -368,9 +368,12 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
368 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, 368 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
369 f->fn_type, f->fn_scope, &f->fn_key, z, 369 f->fn_type, f->fn_scope, &f->fn_key, z,
370 DN_FIB_INFO(f), 0); 370 DN_FIB_INFO(f), 0);
371 /* failure implies BUG in dn_fib_nlmsg_size() */ 371 if (err < 0) {
372 BUG_ON(err < 0); 372 /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */
373 373 WARN_ON(err == -EMSGSIZE);
374 kfree_skb(skb);
375 goto errout;
376 }
374 err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL); 377 err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
375errout: 378errout:
376 if (err < 0) 379 if (err < 0)
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index fa2f7da606a9..fb58e03b3fbd 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -265,6 +265,12 @@ ieee80211softmac_wx_get_rate(struct net_device *net_dev,
265 int err = -EINVAL; 265 int err = -EINVAL;
266 266
267 spin_lock_irqsave(&mac->lock, flags); 267 spin_lock_irqsave(&mac->lock, flags);
268
269 if (unlikely(!mac->running)) {
270 err = -ENODEV;
271 goto out_unlock;
272 }
273
268 switch (mac->txrates.default_rate) { 274 switch (mac->txrates.default_rate) {
269 case IEEE80211_CCK_RATE_1MB: 275 case IEEE80211_CCK_RATE_1MB:
270 data->bitrate.value = 1000000; 276 data->bitrate.value = 1000000;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 864009643675..5750a2b2a0d6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1007,7 +1007,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1007 RT_CONN_FLAGS(sk), 1007 RT_CONN_FLAGS(sk),
1008 sk->sk_bound_dev_if, 1008 sk->sk_bound_dev_if,
1009 sk->sk_protocol, 1009 sk->sk_protocol,
1010 inet->sport, inet->dport, sk); 1010 inet->sport, inet->dport, sk, 0);
1011 if (err) 1011 if (err)
1012 return err; 1012 return err;
1013 1013
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 7b068a891953..0072d79f0c2a 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -49,7 +49,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
49 err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr, 49 err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
50 RT_CONN_FLAGS(sk), oif, 50 RT_CONN_FLAGS(sk), oif,
51 sk->sk_protocol, 51 sk->sk_protocol,
52 inet->sport, usin->sin_port, sk); 52 inet->sport, usin->sin_port, sk, 1);
53 if (err) 53 if (err)
54 return err; 54 return err;
55 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) { 55 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 480ace9819f6..c40203640966 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1140,7 +1140,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1140 1140
1141 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); 1141 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
1142 if (nlh == NULL) 1142 if (nlh == NULL)
1143 return -ENOBUFS; 1143 return -EMSGSIZE;
1144 1144
1145 ifm = nlmsg_data(nlh); 1145 ifm = nlmsg_data(nlh);
1146 ifm->ifa_family = AF_INET; 1146 ifm->ifa_family = AF_INET;
@@ -1167,7 +1167,8 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1167 return nlmsg_end(skb, nlh); 1167 return nlmsg_end(skb, nlh);
1168 1168
1169nla_put_failure: 1169nla_put_failure:
1170 return nlmsg_cancel(skb, nlh); 1170 nlmsg_cancel(skb, nlh);
1171 return -EMSGSIZE;
1171} 1172}
1172 1173
1173static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 1174static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
@@ -1225,9 +1226,12 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh,
1225 goto errout; 1226 goto errout;
1226 1227
1227 err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0); 1228 err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0);
1228 /* failure implies BUG in inet_nlmsg_size() */ 1229 if (err < 0) {
1229 BUG_ON(err < 0); 1230 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1230 1231 WARN_ON(err == -EMSGSIZE);
1232 kfree_skb(skb);
1233 goto errout;
1234 }
1231 err = rtnl_notify(skb, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); 1235 err = rtnl_notify(skb, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1232errout: 1236errout:
1233 if (err < 0) 1237 if (err < 0)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e63b8a98fb4d..be1028c9933e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -314,9 +314,12 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
314 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 314 err = fib_dump_info(skb, info->pid, seq, event, tb_id,
315 fa->fa_type, fa->fa_scope, key, dst_len, 315 fa->fa_type, fa->fa_scope, key, dst_len,
316 fa->fa_tos, fa->fa_info, 0); 316 fa->fa_tos, fa->fa_info, 0);
317 /* failure implies BUG in fib_nlmsg_size() */ 317 if (err < 0) {
318 BUG_ON(err < 0); 318 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
319 319 WARN_ON(err == -EMSGSIZE);
320 kfree_skb(skb);
321 goto errout;
322 }
320 err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE, 323 err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE,
321 info->nlh, GFP_KERNEL); 324 info->nlh, GFP_KERNEL);
322errout: 325errout:
@@ -960,7 +963,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
960 963
961 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); 964 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
962 if (nlh == NULL) 965 if (nlh == NULL)
963 return -ENOBUFS; 966 return -EMSGSIZE;
964 967
965 rtm = nlmsg_data(nlh); 968 rtm = nlmsg_data(nlh);
966 rtm->rtm_family = AF_INET; 969 rtm->rtm_family = AF_INET;
@@ -1031,7 +1034,8 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
1031 return nlmsg_end(skb, nlh); 1034 return nlmsg_end(skb, nlh);
1032 1035
1033nla_put_failure: 1036nla_put_failure:
1034 return nlmsg_cancel(skb, nlh); 1037 nlmsg_cancel(skb, nlh);
1038 return -EMSGSIZE;
1035} 1039}
1036 1040
1037/* 1041/*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 0017ccb01d6d..024ae56cab25 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -455,6 +455,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
455 skb = add_grhead(skb, pmc, type, &pgr); 455 skb = add_grhead(skb, pmc, type, &pgr);
456 first = 0; 456 first = 0;
457 } 457 }
458 if (!skb)
459 return NULL;
458 psrc = (__be32 *)skb_put(skb, sizeof(__be32)); 460 psrc = (__be32 *)skb_put(skb, sizeof(__be32));
459 *psrc = psf->sf_inaddr; 461 *psrc = psf->sf_inaddr;
460 scount++; stotal++; 462 scount++; stotal++;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 77761ac4f7bb..8aa7d51e6881 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -153,7 +153,7 @@ static int inet_csk_diag_fill(struct sock *sk,
153rtattr_failure: 153rtattr_failure:
154nlmsg_failure: 154nlmsg_failure:
155 skb_trim(skb, b - skb->data); 155 skb_trim(skb, b - skb->data);
156 return -1; 156 return -EMSGSIZE;
157} 157}
158 158
159static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 159static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
@@ -209,7 +209,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
209 return skb->len; 209 return skb->len;
210nlmsg_failure: 210nlmsg_failure:
211 skb_trim(skb, previous_tail - skb->data); 211 skb_trim(skb, previous_tail - skb->data);
212 return -1; 212 return -EMSGSIZE;
213} 213}
214 214
215static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 215static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -274,11 +274,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
274 if (!rep) 274 if (!rep)
275 goto out; 275 goto out;
276 276
277 if (sk_diag_fill(sk, rep, req->idiag_ext, 277 err = sk_diag_fill(sk, rep, req->idiag_ext,
278 NETLINK_CB(in_skb).pid, 278 NETLINK_CB(in_skb).pid,
279 nlh->nlmsg_seq, 0, nlh) <= 0) 279 nlh->nlmsg_seq, 0, nlh);
280 BUG(); 280 if (err < 0) {
281 281 WARN_ON(err == -EMSGSIZE);
282 kfree_skb(rep);
283 goto out;
284 }
282 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid, 285 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
283 MSG_DONTWAIT); 286 MSG_DONTWAIT);
284 if (err > 0) 287 if (err > 0)
@@ -775,7 +778,7 @@ next_normal:
775 struct inet_timewait_sock *tw; 778 struct inet_timewait_sock *tw;
776 779
777 inet_twsk_for_each(tw, node, 780 inet_twsk_for_each(tw, node,
778 &hashinfo->ehash[i + hashinfo->ehash_size].chain) { 781 &head->twchain) {
779 782
780 if (num < s_num) 783 if (num < s_num)
781 goto next_dying; 784 goto next_dying;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8c79c8a4ea5c..150ace18dc75 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -212,7 +212,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
212 write_lock(&head->lock); 212 write_lock(&head->lock);
213 213
214 /* Check TIME-WAIT sockets first. */ 214 /* Check TIME-WAIT sockets first. */
215 sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) { 215 sk_for_each(sk2, node, &head->twchain) {
216 tw = inet_twsk(sk2); 216 tw = inet_twsk(sk2);
217 217
218 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) { 218 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 9f414e35c488..a73cf93cee36 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -78,8 +78,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
78 if (__sk_del_node_init(sk)) 78 if (__sk_del_node_init(sk))
79 sock_prot_dec_use(sk->sk_prot); 79 sock_prot_dec_use(sk->sk_prot);
80 80
81 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ 81 /* Step 3: Hash TW into TIMEWAIT chain. */
82 inet_twsk_add_node(tw, &(ehead + hashinfo->ehash_size)->chain); 82 inet_twsk_add_node(tw, &ehead->twchain);
83 atomic_inc(&tw->tw_refcnt); 83 atomic_inc(&tw->tw_refcnt);
84 84
85 write_unlock(&ehead->lock); 85 write_unlock(&ehead->lock);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 476cb6084c75..51c83500790f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1008,7 +1008,8 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1008 goto done; 1008 goto done;
1009 dev = t->dev; 1009 dev = t->dev;
1010 } 1010 }
1011 err = unregister_netdevice(dev); 1011 unregister_netdevice(dev);
1012 err = 0;
1012 break; 1013 break;
1013 1014
1014 default: 1015 default:
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 9d719d664e5b..da8bbd20c7ed 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -754,7 +754,8 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
754 goto done; 754 goto done;
755 dev = t->dev; 755 dev = t->dev;
756 } 756 }
757 err = unregister_netdevice(dev); 757 unregister_netdevice(dev);
758 err = 0;
758 break; 759 break;
759 760
760 default: 761 default:
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 47bd3ad18b71..9b08e7ad71bc 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -361,32 +361,6 @@ config IP_NF_TARGET_ULOG
361 361
362 To compile it as a module, choose M here. If unsure, say N. 362 To compile it as a module, choose M here. If unsure, say N.
363 363
364config IP_NF_TARGET_TCPMSS
365 tristate "TCPMSS target support"
366 depends on IP_NF_IPTABLES
367 ---help---
368 This option adds a `TCPMSS' target, which allows you to alter the
369 MSS value of TCP SYN packets, to control the maximum size for that
370 connection (usually limiting it to your outgoing interface's MTU
371 minus 40).
372
373 This is used to overcome criminally braindead ISPs or servers which
374 block ICMP Fragmentation Needed packets. The symptoms of this
375 problem are that everything works fine from your Linux
376 firewall/router, but machines behind it can never exchange large
377 packets:
378 1) Web browsers connect, then hang with no data received.
379 2) Small mail works fine, but large emails hang.
380 3) ssh works fine, but scp hangs after initial handshaking.
381
382 Workaround: activate this option and add a rule to your firewall
383 configuration like:
384
385 iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN \
386 -j TCPMSS --clamp-mss-to-pmtu
387
388 To compile it as a module, choose M here. If unsure, say N.
389
390# NAT + specific targets: ip_conntrack 364# NAT + specific targets: ip_conntrack
391config IP_NF_NAT 365config IP_NF_NAT
392 tristate "Full NAT" 366 tristate "Full NAT"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 16d177b71bf8..6625ec68180c 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -103,7 +103,6 @@ obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
103obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o 103obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
104obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o 104obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
105obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o 105obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
106obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
107obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o 106obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
108obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o 107obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
109 108
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 06e4e8a6dd9f..c34f48fe5478 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -50,12 +50,9 @@ static DEFINE_RWLOCK(tcp_lock);
50 If it's non-zero, we mark only out of window RST segments as INVALID. */ 50 If it's non-zero, we mark only out of window RST segments as INVALID. */
51int ip_ct_tcp_be_liberal __read_mostly = 0; 51int ip_ct_tcp_be_liberal __read_mostly = 0;
52 52
53/* When connection is picked up from the middle, how many packets are required 53/* If it is set to zero, we disable picking up already established
54 to pass in each direction when we assume we are in sync - if any side uses
55 window scaling, we lost the game.
56 If it is set to zero, we disable picking up already established
57 connections. */ 54 connections. */
58int ip_ct_tcp_loose __read_mostly = 3; 55int ip_ct_tcp_loose __read_mostly = 1;
59 56
60/* Max number of the retransmitted packets without receiving an (acceptable) 57/* Max number of the retransmitted packets without receiving an (acceptable)
61 ACK from the destination. If this number is reached, a shorter timer 58 ACK from the destination. If this number is reached, a shorter timer
@@ -694,11 +691,10 @@ static int tcp_in_window(struct ip_ct_tcp *state,
694 before(sack, receiver->td_end + 1), 691 before(sack, receiver->td_end + 1),
695 after(ack, receiver->td_end - MAXACKWINDOW(sender))); 692 after(ack, receiver->td_end - MAXACKWINDOW(sender)));
696 693
697 if (sender->loose || receiver->loose || 694 if (before(seq, sender->td_maxend + 1) &&
698 (before(seq, sender->td_maxend + 1) && 695 after(end, sender->td_end - receiver->td_maxwin - 1) &&
699 after(end, sender->td_end - receiver->td_maxwin - 1) && 696 before(sack, receiver->td_end + 1) &&
700 before(sack, receiver->td_end + 1) && 697 after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
701 after(ack, receiver->td_end - MAXACKWINDOW(sender)))) {
702 /* 698 /*
703 * Take into account window scaling (RFC 1323). 699 * Take into account window scaling (RFC 1323).
704 */ 700 */
@@ -743,15 +739,13 @@ static int tcp_in_window(struct ip_ct_tcp *state,
743 state->retrans = 0; 739 state->retrans = 0;
744 } 740 }
745 } 741 }
746 /*
747 * Close the window of disabled window tracking :-)
748 */
749 if (sender->loose)
750 sender->loose--;
751
752 res = 1; 742 res = 1;
753 } else { 743 } else {
754 if (LOG_INVALID(IPPROTO_TCP)) 744 res = 0;
745 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
746 ip_ct_tcp_be_liberal)
747 res = 1;
748 if (!res && LOG_INVALID(IPPROTO_TCP))
755 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 749 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
756 "ip_ct_tcp: %s ", 750 "ip_ct_tcp: %s ",
757 before(seq, sender->td_maxend + 1) ? 751 before(seq, sender->td_maxend + 1) ?
@@ -762,8 +756,6 @@ static int tcp_in_window(struct ip_ct_tcp *state,
762 : "ACK is over the upper bound (ACKed data not seen yet)" 756 : "ACK is over the upper bound (ACKed data not seen yet)"
763 : "SEQ is under the lower bound (already ACKed data retransmitted)" 757 : "SEQ is under the lower bound (already ACKed data retransmitted)"
764 : "SEQ is over the upper bound (over the window of the receiver)"); 758 : "SEQ is over the upper bound (over the window of the receiver)");
765
766 res = ip_ct_tcp_be_liberal;
767 } 759 }
768 760
769 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " 761 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
@@ -1105,8 +1097,6 @@ static int tcp_new(struct ip_conntrack *conntrack,
1105 1097
1106 tcp_options(skb, iph, th, &conntrack->proto.tcp.seen[0]); 1098 tcp_options(skb, iph, th, &conntrack->proto.tcp.seen[0]);
1107 conntrack->proto.tcp.seen[1].flags = 0; 1099 conntrack->proto.tcp.seen[1].flags = 0;
1108 conntrack->proto.tcp.seen[0].loose =
1109 conntrack->proto.tcp.seen[1].loose = 0;
1110 } else if (ip_ct_tcp_loose == 0) { 1100 } else if (ip_ct_tcp_loose == 0) {
1111 /* Don't try to pick up connections. */ 1101 /* Don't try to pick up connections. */
1112 return 0; 1102 return 0;
@@ -1127,11 +1117,11 @@ static int tcp_new(struct ip_conntrack *conntrack,
1127 conntrack->proto.tcp.seen[0].td_maxwin; 1117 conntrack->proto.tcp.seen[0].td_maxwin;
1128 conntrack->proto.tcp.seen[0].td_scale = 0; 1118 conntrack->proto.tcp.seen[0].td_scale = 0;
1129 1119
1130 /* We assume SACK. Should we assume window scaling too? */ 1120 /* We assume SACK and liberal window checking to handle
1121 * window scaling */
1131 conntrack->proto.tcp.seen[0].flags = 1122 conntrack->proto.tcp.seen[0].flags =
1132 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM; 1123 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1133 conntrack->proto.tcp.seen[0].loose = 1124 IP_CT_TCP_FLAG_BE_LIBERAL;
1134 conntrack->proto.tcp.seen[1].loose = ip_ct_tcp_loose;
1135 } 1125 }
1136 1126
1137 conntrack->proto.tcp.seen[1].td_end = 0; 1127 conntrack->proto.tcp.seen[1].td_end = 0;
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 9d1a5175dcd4..5e08c2bf887d 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -246,8 +246,9 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
246 if (maniptype == IP_NAT_MANIP_SRC) { 246 if (maniptype == IP_NAT_MANIP_SRC) {
247 if (find_appropriate_src(orig_tuple, tuple, range)) { 247 if (find_appropriate_src(orig_tuple, tuple, range)) {
248 DEBUGP("get_unique_tuple: Found current src map\n"); 248 DEBUGP("get_unique_tuple: Found current src map\n");
249 if (!ip_nat_used_tuple(tuple, conntrack)) 249 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
250 return; 250 if (!ip_nat_used_tuple(tuple, conntrack))
251 return;
251 } 252 }
252 } 253 }
253 254
@@ -261,6 +262,13 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
261 262
262 proto = ip_nat_proto_find_get(orig_tuple->dst.protonum); 263 proto = ip_nat_proto_find_get(orig_tuple->dst.protonum);
263 264
265 /* Change protocol info to have some randomization */
266 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
267 proto->unique_tuple(tuple, range, maniptype, conntrack);
268 ip_nat_proto_put(proto);
269 return;
270 }
271
264 /* Only bother mapping if it's not already in range and unique */ 272 /* Only bother mapping if it's not already in range and unique */
265 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) 273 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
266 || proto->in_range(tuple, maniptype, &range->min, &range->max)) 274 || proto->in_range(tuple, maniptype, &range->min, &range->max))
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index ee80feb4b2a9..2e5c4bc52a60 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -183,7 +183,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
183 datalen = (*pskb)->len - iph->ihl*4; 183 datalen = (*pskb)->len - iph->ihl*4;
184 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { 184 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
185 tcph->check = 0; 185 tcph->check = 0;
186 tcph->check = tcp_v4_check(tcph, datalen, 186 tcph->check = tcp_v4_check(datalen,
187 iph->saddr, iph->daddr, 187 iph->saddr, iph->daddr,
188 csum_partial((char *)tcph, 188 csum_partial((char *)tcph,
189 datalen, 0)); 189 datalen, 0));
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
index b586d18b3fb3..14ff24f53a7a 100644
--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
11#include <linux/netfilter.h> 12#include <linux/netfilter.h>
12#include <linux/ip.h> 13#include <linux/ip.h>
13#include <linux/tcp.h> 14#include <linux/tcp.h>
@@ -75,6 +76,10 @@ tcp_unique_tuple(struct ip_conntrack_tuple *tuple,
75 range_size = ntohs(range->max.tcp.port) - min + 1; 76 range_size = ntohs(range->max.tcp.port) - min + 1;
76 } 77 }
77 78
79 /* Start from random port to avoid prediction */
80 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
81 port = net_random();
82
78 for (i = 0; i < range_size; i++, port++) { 83 for (i = 0; i < range_size; i++, port++) {
79 *portptr = htons(min + port % range_size); 84 *portptr = htons(min + port % range_size);
80 if (!ip_nat_used_tuple(tuple, conntrack)) { 85 if (!ip_nat_used_tuple(tuple, conntrack)) {
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
index 5ced0877b32f..dfd521672891 100644
--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
11#include <linux/netfilter.h> 12#include <linux/netfilter.h>
12#include <linux/ip.h> 13#include <linux/ip.h>
13#include <linux/udp.h> 14#include <linux/udp.h>
@@ -74,6 +75,10 @@ udp_unique_tuple(struct ip_conntrack_tuple *tuple,
74 range_size = ntohs(range->max.udp.port) - min + 1; 75 range_size = ntohs(range->max.udp.port) - min + 1;
75 } 76 }
76 77
78 /* Start from random port to avoid prediction */
79 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
80 port = net_random();
81
77 for (i = 0; i < range_size; i++, port++) { 82 for (i = 0; i < range_size; i++, port++) {
78 *portptr = htons(min + port % range_size); 83 *portptr = htons(min + port % range_size);
79 if (!ip_nat_used_tuple(tuple, conntrack)) 84 if (!ip_nat_used_tuple(tuple, conntrack))
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
index a176aa3031e0..e1c8a05f3dc6 100644
--- a/net/ipv4/netfilter/ip_nat_rule.c
+++ b/net/ipv4/netfilter/ip_nat_rule.c
@@ -86,7 +86,7 @@ static struct
86 } 86 }
87}; 87};
88 88
89static struct ipt_table nat_table = { 89static struct xt_table nat_table = {
90 .name = "nat", 90 .name = "nat",
91 .valid_hooks = NAT_VALID_HOOKS, 91 .valid_hooks = NAT_VALID_HOOKS,
92 .lock = RW_LOCK_UNLOCKED, 92 .lock = RW_LOCK_UNLOCKED,
@@ -99,7 +99,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb,
99 const struct net_device *in, 99 const struct net_device *in,
100 const struct net_device *out, 100 const struct net_device *out,
101 unsigned int hooknum, 101 unsigned int hooknum,
102 const struct ipt_target *target, 102 const struct xt_target *target,
103 const void *targinfo) 103 const void *targinfo)
104{ 104{
105 struct ip_conntrack *ct; 105 struct ip_conntrack *ct;
@@ -141,7 +141,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
141 const struct net_device *in, 141 const struct net_device *in,
142 const struct net_device *out, 142 const struct net_device *out,
143 unsigned int hooknum, 143 unsigned int hooknum,
144 const struct ipt_target *target, 144 const struct xt_target *target,
145 const void *targinfo) 145 const void *targinfo)
146{ 146{
147 struct ip_conntrack *ct; 147 struct ip_conntrack *ct;
@@ -166,7 +166,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
166 166
167static int ipt_snat_checkentry(const char *tablename, 167static int ipt_snat_checkentry(const char *tablename,
168 const void *entry, 168 const void *entry,
169 const struct ipt_target *target, 169 const struct xt_target *target,
170 void *targinfo, 170 void *targinfo,
171 unsigned int hook_mask) 171 unsigned int hook_mask)
172{ 172{
@@ -182,7 +182,7 @@ static int ipt_snat_checkentry(const char *tablename,
182 182
183static int ipt_dnat_checkentry(const char *tablename, 183static int ipt_dnat_checkentry(const char *tablename,
184 const void *entry, 184 const void *entry,
185 const struct ipt_target *target, 185 const struct xt_target *target,
186 void *targinfo, 186 void *targinfo,
187 unsigned int hook_mask) 187 unsigned int hook_mask)
188{ 188{
@@ -193,6 +193,10 @@ static int ipt_dnat_checkentry(const char *tablename,
193 printk("DNAT: multiple ranges no longer supported\n"); 193 printk("DNAT: multiple ranges no longer supported\n");
194 return 0; 194 return 0;
195 } 195 }
196 if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) {
197 printk("DNAT: port randomization not supported\n");
198 return 0;
199 }
196 return 1; 200 return 1;
197} 201}
198 202
@@ -257,8 +261,9 @@ int ip_nat_rule_find(struct sk_buff **pskb,
257 return ret; 261 return ret;
258} 262}
259 263
260static struct ipt_target ipt_snat_reg = { 264static struct xt_target ipt_snat_reg = {
261 .name = "SNAT", 265 .name = "SNAT",
266 .family = AF_INET,
262 .target = ipt_snat_target, 267 .target = ipt_snat_target,
263 .targetsize = sizeof(struct ip_nat_multi_range_compat), 268 .targetsize = sizeof(struct ip_nat_multi_range_compat),
264 .table = "nat", 269 .table = "nat",
@@ -266,8 +271,9 @@ static struct ipt_target ipt_snat_reg = {
266 .checkentry = ipt_snat_checkentry, 271 .checkentry = ipt_snat_checkentry,
267}; 272};
268 273
269static struct ipt_target ipt_dnat_reg = { 274static struct xt_target ipt_dnat_reg = {
270 .name = "DNAT", 275 .name = "DNAT",
276 .family = AF_INET,
271 .target = ipt_dnat_target, 277 .target = ipt_dnat_target,
272 .targetsize = sizeof(struct ip_nat_multi_range_compat), 278 .targetsize = sizeof(struct ip_nat_multi_range_compat),
273 .table = "nat", 279 .table = "nat",
@@ -282,27 +288,27 @@ int __init ip_nat_rule_init(void)
282 ret = ipt_register_table(&nat_table, &nat_initial_table.repl); 288 ret = ipt_register_table(&nat_table, &nat_initial_table.repl);
283 if (ret != 0) 289 if (ret != 0)
284 return ret; 290 return ret;
285 ret = ipt_register_target(&ipt_snat_reg); 291 ret = xt_register_target(&ipt_snat_reg);
286 if (ret != 0) 292 if (ret != 0)
287 goto unregister_table; 293 goto unregister_table;
288 294
289 ret = ipt_register_target(&ipt_dnat_reg); 295 ret = xt_register_target(&ipt_dnat_reg);
290 if (ret != 0) 296 if (ret != 0)
291 goto unregister_snat; 297 goto unregister_snat;
292 298
293 return ret; 299 return ret;
294 300
295 unregister_snat: 301 unregister_snat:
296 ipt_unregister_target(&ipt_snat_reg); 302 xt_unregister_target(&ipt_snat_reg);
297 unregister_table: 303 unregister_table:
298 ipt_unregister_table(&nat_table); 304 xt_unregister_table(&nat_table);
299 305
300 return ret; 306 return ret;
301} 307}
302 308
303void ip_nat_rule_cleanup(void) 309void ip_nat_rule_cleanup(void)
304{ 310{
305 ipt_unregister_target(&ipt_dnat_reg); 311 xt_unregister_target(&ipt_dnat_reg);
306 ipt_unregister_target(&ipt_snat_reg); 312 xt_unregister_target(&ipt_snat_reg);
307 ipt_unregister_table(&nat_table); 313 ipt_unregister_table(&nat_table);
308} 314}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index fc1f153c86ba..5a7b3a341389 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -216,7 +216,7 @@ ipt_do_table(struct sk_buff **pskb,
216 unsigned int hook, 216 unsigned int hook,
217 const struct net_device *in, 217 const struct net_device *in,
218 const struct net_device *out, 218 const struct net_device *out,
219 struct ipt_table *table) 219 struct xt_table *table)
220{ 220{
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
222 u_int16_t offset; 222 u_int16_t offset;
@@ -507,7 +507,7 @@ check_entry(struct ipt_entry *e, const char *name)
507static inline int check_match(struct ipt_entry_match *m, const char *name, 507static inline int check_match(struct ipt_entry_match *m, const char *name,
508 const struct ipt_ip *ip, unsigned int hookmask) 508 const struct ipt_ip *ip, unsigned int hookmask)
509{ 509{
510 struct ipt_match *match; 510 struct xt_match *match;
511 int ret; 511 int ret;
512 512
513 match = m->u.kernel.match; 513 match = m->u.kernel.match;
@@ -531,7 +531,7 @@ find_check_match(struct ipt_entry_match *m,
531 unsigned int hookmask, 531 unsigned int hookmask,
532 unsigned int *i) 532 unsigned int *i)
533{ 533{
534 struct ipt_match *match; 534 struct xt_match *match;
535 int ret; 535 int ret;
536 536
537 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, 537 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
@@ -557,7 +557,7 @@ err:
557static inline int check_target(struct ipt_entry *e, const char *name) 557static inline int check_target(struct ipt_entry *e, const char *name)
558{ 558{
559 struct ipt_entry_target *t; 559 struct ipt_entry_target *t;
560 struct ipt_target *target; 560 struct xt_target *target;
561 int ret; 561 int ret;
562 562
563 t = ipt_get_target(e); 563 t = ipt_get_target(e);
@@ -580,7 +580,7 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
580 unsigned int *i) 580 unsigned int *i)
581{ 581{
582 struct ipt_entry_target *t; 582 struct ipt_entry_target *t;
583 struct ipt_target *target; 583 struct xt_target *target;
584 int ret; 584 int ret;
585 unsigned int j; 585 unsigned int j;
586 586
@@ -818,7 +818,7 @@ get_counters(const struct xt_table_info *t,
818 } 818 }
819} 819}
820 820
821static inline struct xt_counters * alloc_counters(struct ipt_table *table) 821static inline struct xt_counters * alloc_counters(struct xt_table *table)
822{ 822{
823 unsigned int countersize; 823 unsigned int countersize;
824 struct xt_counters *counters; 824 struct xt_counters *counters;
@@ -843,7 +843,7 @@ static inline struct xt_counters * alloc_counters(struct ipt_table *table)
843 843
844static int 844static int
845copy_entries_to_user(unsigned int total_size, 845copy_entries_to_user(unsigned int total_size,
846 struct ipt_table *table, 846 struct xt_table *table,
847 void __user *userptr) 847 void __user *userptr)
848{ 848{
849 unsigned int off, num; 849 unsigned int off, num;
@@ -1046,7 +1046,7 @@ static int compat_table_info(struct xt_table_info *info,
1046static int get_info(void __user *user, int *len, int compat) 1046static int get_info(void __user *user, int *len, int compat)
1047{ 1047{
1048 char name[IPT_TABLE_MAXNAMELEN]; 1048 char name[IPT_TABLE_MAXNAMELEN];
1049 struct ipt_table *t; 1049 struct xt_table *t;
1050 int ret; 1050 int ret;
1051 1051
1052 if (*len != sizeof(struct ipt_getinfo)) { 1052 if (*len != sizeof(struct ipt_getinfo)) {
@@ -1107,7 +1107,7 @@ get_entries(struct ipt_get_entries __user *uptr, int *len)
1107{ 1107{
1108 int ret; 1108 int ret;
1109 struct ipt_get_entries get; 1109 struct ipt_get_entries get;
1110 struct ipt_table *t; 1110 struct xt_table *t;
1111 1111
1112 if (*len < sizeof(get)) { 1112 if (*len < sizeof(get)) {
1113 duprintf("get_entries: %u < %d\n", *len, 1113 duprintf("get_entries: %u < %d\n", *len,
@@ -1151,7 +1151,7 @@ __do_replace(const char *name, unsigned int valid_hooks,
1151 void __user *counters_ptr) 1151 void __user *counters_ptr)
1152{ 1152{
1153 int ret; 1153 int ret;
1154 struct ipt_table *t; 1154 struct xt_table *t;
1155 struct xt_table_info *oldinfo; 1155 struct xt_table_info *oldinfo;
1156 struct xt_counters *counters; 1156 struct xt_counters *counters;
1157 void *loc_cpu_old_entry; 1157 void *loc_cpu_old_entry;
@@ -1302,7 +1302,7 @@ do_add_counters(void __user *user, unsigned int len, int compat)
1302 char *name; 1302 char *name;
1303 int size; 1303 int size;
1304 void *ptmp; 1304 void *ptmp;
1305 struct ipt_table *t; 1305 struct xt_table *t;
1306 struct xt_table_info *private; 1306 struct xt_table_info *private;
1307 int ret = 0; 1307 int ret = 0;
1308 void *loc_cpu_entry; 1308 void *loc_cpu_entry;
@@ -1437,7 +1437,7 @@ compat_check_calc_match(struct ipt_entry_match *m,
1437 unsigned int hookmask, 1437 unsigned int hookmask,
1438 int *size, int *i) 1438 int *size, int *i)
1439{ 1439{
1440 struct ipt_match *match; 1440 struct xt_match *match;
1441 1441
1442 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name, 1442 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1443 m->u.user.revision), 1443 m->u.user.revision),
@@ -1466,7 +1466,7 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e,
1466 const char *name) 1466 const char *name)
1467{ 1467{
1468 struct ipt_entry_target *t; 1468 struct ipt_entry_target *t;
1469 struct ipt_target *target; 1469 struct xt_target *target;
1470 unsigned int entry_offset; 1470 unsigned int entry_offset;
1471 int ret, off, h, j; 1471 int ret, off, h, j;
1472 1472
@@ -1550,7 +1550,7 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1550 struct xt_table_info *newinfo, unsigned char *base) 1550 struct xt_table_info *newinfo, unsigned char *base)
1551{ 1551{
1552 struct ipt_entry_target *t; 1552 struct ipt_entry_target *t;
1553 struct ipt_target *target; 1553 struct xt_target *target;
1554 struct ipt_entry *de; 1554 struct ipt_entry *de;
1555 unsigned int origsize; 1555 unsigned int origsize;
1556 int ret, h; 1556 int ret, h;
@@ -1795,7 +1795,7 @@ struct compat_ipt_get_entries
1795}; 1795};
1796 1796
1797static int compat_copy_entries_to_user(unsigned int total_size, 1797static int compat_copy_entries_to_user(unsigned int total_size,
1798 struct ipt_table *table, void __user *userptr) 1798 struct xt_table *table, void __user *userptr)
1799{ 1799{
1800 unsigned int off, num; 1800 unsigned int off, num;
1801 struct compat_ipt_entry e; 1801 struct compat_ipt_entry e;
@@ -1869,7 +1869,7 @@ compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1869{ 1869{
1870 int ret; 1870 int ret;
1871 struct compat_ipt_get_entries get; 1871 struct compat_ipt_get_entries get;
1872 struct ipt_table *t; 1872 struct xt_table *t;
1873 1873
1874 1874
1875 if (*len < sizeof(get)) { 1875 if (*len < sizeof(get)) {
@@ -2052,7 +2052,7 @@ int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2052 return 0; 2052 return 0;
2053} 2053}
2054 2054
2055void ipt_unregister_table(struct ipt_table *table) 2055void ipt_unregister_table(struct xt_table *table)
2056{ 2056{
2057 struct xt_table_info *private; 2057 struct xt_table_info *private;
2058 void *loc_cpu_entry; 2058 void *loc_cpu_entry;
@@ -2124,7 +2124,7 @@ icmp_checkentry(const char *tablename,
2124} 2124}
2125 2125
2126/* The built-in targets: standard (NULL) and error. */ 2126/* The built-in targets: standard (NULL) and error. */
2127static struct ipt_target ipt_standard_target = { 2127static struct xt_target ipt_standard_target = {
2128 .name = IPT_STANDARD_TARGET, 2128 .name = IPT_STANDARD_TARGET,
2129 .targetsize = sizeof(int), 2129 .targetsize = sizeof(int),
2130 .family = AF_INET, 2130 .family = AF_INET,
@@ -2135,7 +2135,7 @@ static struct ipt_target ipt_standard_target = {
2135#endif 2135#endif
2136}; 2136};
2137 2137
2138static struct ipt_target ipt_error_target = { 2138static struct xt_target ipt_error_target = {
2139 .name = IPT_ERROR_TARGET, 2139 .name = IPT_ERROR_TARGET,
2140 .target = ipt_error, 2140 .target = ipt_error,
2141 .targetsize = IPT_FUNCTION_MAXNAMELEN, 2141 .targetsize = IPT_FUNCTION_MAXNAMELEN,
@@ -2158,7 +2158,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
2158#endif 2158#endif
2159}; 2159};
2160 2160
2161static struct ipt_match icmp_matchstruct = { 2161static struct xt_match icmp_matchstruct = {
2162 .name = "icmp", 2162 .name = "icmp",
2163 .match = icmp_match, 2163 .match = icmp_match,
2164 .matchsize = sizeof(struct ipt_icmp), 2164 .matchsize = sizeof(struct ipt_icmp),
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index b1c11160b9de..343c2abdc1a0 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -26,6 +26,7 @@
26 26
27#include <linux/netfilter_arp.h> 27#include <linux/netfilter_arp.h>
28 28
29#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_ipv4/ip_tables.h> 30#include <linux/netfilter_ipv4/ip_tables.h>
30#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h> 31#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
31#include <net/netfilter/nf_conntrack_compat.h> 32#include <net/netfilter/nf_conntrack_compat.h>
@@ -247,6 +248,7 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
247 switch (iph->protocol) { 248 switch (iph->protocol) {
248 case IPPROTO_TCP: 249 case IPPROTO_TCP:
249 case IPPROTO_UDP: 250 case IPPROTO_UDP:
251 case IPPROTO_UDPLITE:
250 case IPPROTO_SCTP: 252 case IPPROTO_SCTP:
251 case IPPROTO_DCCP: 253 case IPPROTO_DCCP:
252 case IPPROTO_ICMP: 254 case IPPROTO_ICMP:
@@ -329,7 +331,7 @@ target(struct sk_buff **pskb,
329 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP 331 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP
330 && (ctinfo == IP_CT_RELATED 332 && (ctinfo == IP_CT_RELATED
331 || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) 333 || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY))
332 return IPT_CONTINUE; 334 return XT_CONTINUE;
333 335
334 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, 336 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
335 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here 337 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here
@@ -367,7 +369,7 @@ target(struct sk_buff **pskb,
367 * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */ 369 * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */
368 (*pskb)->pkt_type = PACKET_HOST; 370 (*pskb)->pkt_type = PACKET_HOST;
369 371
370 return IPT_CONTINUE; 372 return XT_CONTINUE;
371} 373}
372 374
373static int 375static int
@@ -470,8 +472,9 @@ static void destroy(const struct xt_target *target, void *targinfo)
470 nf_ct_l3proto_module_put(target->family); 472 nf_ct_l3proto_module_put(target->family);
471} 473}
472 474
473static struct ipt_target clusterip_tgt = { 475static struct xt_target clusterip_tgt = {
474 .name = "CLUSTERIP", 476 .name = "CLUSTERIP",
477 .family = AF_INET,
475 .target = target, 478 .target = target,
476 .targetsize = sizeof(struct ipt_clusterip_tgt_info), 479 .targetsize = sizeof(struct ipt_clusterip_tgt_info),
477 .checkentry = checkentry, 480 .checkentry = checkentry,
@@ -727,7 +730,7 @@ static int __init ipt_clusterip_init(void)
727{ 730{
728 int ret; 731 int ret;
729 732
730 ret = ipt_register_target(&clusterip_tgt); 733 ret = xt_register_target(&clusterip_tgt);
731 if (ret < 0) 734 if (ret < 0)
732 return ret; 735 return ret;
733 736
@@ -753,7 +756,7 @@ cleanup_hook:
753 nf_unregister_hook(&cip_arp_ops); 756 nf_unregister_hook(&cip_arp_ops);
754#endif /* CONFIG_PROC_FS */ 757#endif /* CONFIG_PROC_FS */
755cleanup_target: 758cleanup_target:
756 ipt_unregister_target(&clusterip_tgt); 759 xt_unregister_target(&clusterip_tgt);
757 return ret; 760 return ret;
758} 761}
759 762
@@ -765,7 +768,7 @@ static void __exit ipt_clusterip_fini(void)
765 remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent); 768 remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
766#endif 769#endif
767 nf_unregister_hook(&cip_arp_ops); 770 nf_unregister_hook(&cip_arp_ops);
768 ipt_unregister_target(&clusterip_tgt); 771 xt_unregister_target(&clusterip_tgt);
769} 772}
770 773
771module_init(ipt_clusterip_init); 774module_init(ipt_clusterip_init);
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index b55d670a24df..b5ca5938d1fe 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -9,12 +9,14 @@
9 * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp 9 * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp
10*/ 10*/
11 11
12#include <linux/in.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14#include <linux/ip.h> 15#include <linux/ip.h>
15#include <linux/tcp.h> 16#include <linux/tcp.h>
16#include <net/checksum.h> 17#include <net/checksum.h>
17 18
19#include <linux/netfilter/x_tables.h>
18#include <linux/netfilter_ipv4/ip_tables.h> 20#include <linux/netfilter_ipv4/ip_tables.h>
19#include <linux/netfilter_ipv4/ipt_ECN.h> 21#include <linux/netfilter_ipv4/ipt_ECN.h>
20 22
@@ -95,7 +97,7 @@ target(struct sk_buff **pskb,
95 if (!set_ect_tcp(pskb, einfo)) 97 if (!set_ect_tcp(pskb, einfo))
96 return NF_DROP; 98 return NF_DROP;
97 99
98 return IPT_CONTINUE; 100 return XT_CONTINUE;
99} 101}
100 102
101static int 103static int
@@ -119,7 +121,7 @@ checkentry(const char *tablename,
119 return 0; 121 return 0;
120 } 122 }
121 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) 123 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR))
122 && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & IPT_INV_PROTO))) { 124 && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
123 printk(KERN_WARNING "ECN: cannot use TCP operations on a " 125 printk(KERN_WARNING "ECN: cannot use TCP operations on a "
124 "non-tcp rule\n"); 126 "non-tcp rule\n");
125 return 0; 127 return 0;
@@ -127,8 +129,9 @@ checkentry(const char *tablename,
127 return 1; 129 return 1;
128} 130}
129 131
130static struct ipt_target ipt_ecn_reg = { 132static struct xt_target ipt_ecn_reg = {
131 .name = "ECN", 133 .name = "ECN",
134 .family = AF_INET,
132 .target = target, 135 .target = target,
133 .targetsize = sizeof(struct ipt_ECN_info), 136 .targetsize = sizeof(struct ipt_ECN_info),
134 .table = "mangle", 137 .table = "mangle",
@@ -138,12 +141,12 @@ static struct ipt_target ipt_ecn_reg = {
138 141
139static int __init ipt_ecn_init(void) 142static int __init ipt_ecn_init(void)
140{ 143{
141 return ipt_register_target(&ipt_ecn_reg); 144 return xt_register_target(&ipt_ecn_reg);
142} 145}
143 146
144static void __exit ipt_ecn_fini(void) 147static void __exit ipt_ecn_fini(void)
145{ 148{
146 ipt_unregister_target(&ipt_ecn_reg); 149 xt_unregister_target(&ipt_ecn_reg);
147} 150}
148 151
149module_init(ipt_ecn_init); 152module_init(ipt_ecn_init);
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index c96de16fefae..f68370ffb43f 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -20,7 +20,7 @@
20#include <net/route.h> 20#include <net/route.h>
21 21
22#include <linux/netfilter.h> 22#include <linux/netfilter.h>
23#include <linux/netfilter_ipv4/ip_tables.h> 23#include <linux/netfilter/x_tables.h>
24#include <linux/netfilter_ipv4/ipt_LOG.h> 24#include <linux/netfilter_ipv4/ipt_LOG.h>
25 25
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
@@ -432,7 +432,7 @@ ipt_log_target(struct sk_buff **pskb,
432 432
433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
434 loginfo->prefix); 434 loginfo->prefix);
435 return IPT_CONTINUE; 435 return XT_CONTINUE;
436} 436}
437 437
438static int ipt_log_checkentry(const char *tablename, 438static int ipt_log_checkentry(const char *tablename,
@@ -455,8 +455,9 @@ static int ipt_log_checkentry(const char *tablename,
455 return 1; 455 return 1;
456} 456}
457 457
458static struct ipt_target ipt_log_reg = { 458static struct xt_target ipt_log_reg = {
459 .name = "LOG", 459 .name = "LOG",
460 .family = AF_INET,
460 .target = ipt_log_target, 461 .target = ipt_log_target,
461 .targetsize = sizeof(struct ipt_log_info), 462 .targetsize = sizeof(struct ipt_log_info),
462 .checkentry = ipt_log_checkentry, 463 .checkentry = ipt_log_checkentry,
@@ -471,8 +472,11 @@ static struct nf_logger ipt_log_logger ={
471 472
472static int __init ipt_log_init(void) 473static int __init ipt_log_init(void)
473{ 474{
474 if (ipt_register_target(&ipt_log_reg)) 475 int ret;
475 return -EINVAL; 476
477 ret = xt_register_target(&ipt_log_reg);
478 if (ret < 0)
479 return ret;
476 if (nf_log_register(PF_INET, &ipt_log_logger) < 0) { 480 if (nf_log_register(PF_INET, &ipt_log_logger) < 0) {
477 printk(KERN_WARNING "ipt_LOG: not logging via system console " 481 printk(KERN_WARNING "ipt_LOG: not logging via system console "
478 "since somebody else already registered for PF_INET\n"); 482 "since somebody else already registered for PF_INET\n");
@@ -486,7 +490,7 @@ static int __init ipt_log_init(void)
486static void __exit ipt_log_fini(void) 490static void __exit ipt_log_fini(void)
487{ 491{
488 nf_log_unregister_logger(&ipt_log_logger); 492 nf_log_unregister_logger(&ipt_log_logger);
489 ipt_unregister_target(&ipt_log_reg); 493 xt_unregister_target(&ipt_log_reg);
490} 494}
491 495
492module_init(ipt_log_init); 496module_init(ipt_log_init);
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d669685afd04..91c42efcd533 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -25,7 +25,7 @@
25#else 25#else
26#include <linux/netfilter_ipv4/ip_nat_rule.h> 26#include <linux/netfilter_ipv4/ip_nat_rule.h>
27#endif 27#endif
28#include <linux/netfilter_ipv4/ip_tables.h> 28#include <linux/netfilter/x_tables.h>
29 29
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 31MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -190,8 +190,9 @@ static struct notifier_block masq_inet_notifier = {
190 .notifier_call = masq_inet_event, 190 .notifier_call = masq_inet_event,
191}; 191};
192 192
193static struct ipt_target masquerade = { 193static struct xt_target masquerade = {
194 .name = "MASQUERADE", 194 .name = "MASQUERADE",
195 .family = AF_INET,
195 .target = masquerade_target, 196 .target = masquerade_target,
196 .targetsize = sizeof(struct ip_nat_multi_range_compat), 197 .targetsize = sizeof(struct ip_nat_multi_range_compat),
197 .table = "nat", 198 .table = "nat",
@@ -204,7 +205,7 @@ static int __init ipt_masquerade_init(void)
204{ 205{
205 int ret; 206 int ret;
206 207
207 ret = ipt_register_target(&masquerade); 208 ret = xt_register_target(&masquerade);
208 209
209 if (ret == 0) { 210 if (ret == 0) {
210 /* Register for device down reports */ 211 /* Register for device down reports */
@@ -218,7 +219,7 @@ static int __init ipt_masquerade_init(void)
218 219
219static void __exit ipt_masquerade_fini(void) 220static void __exit ipt_masquerade_fini(void)
220{ 221{
221 ipt_unregister_target(&masquerade); 222 xt_unregister_target(&masquerade);
222 unregister_netdevice_notifier(&masq_dev_notifier); 223 unregister_netdevice_notifier(&masq_dev_notifier);
223 unregister_inetaddr_notifier(&masq_inet_notifier); 224 unregister_inetaddr_notifier(&masq_inet_notifier);
224} 225}
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 9390e90f2b25..b4acc241d898 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -15,6 +15,7 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/netfilter.h> 16#include <linux/netfilter.h>
17#include <linux/netfilter_ipv4.h> 17#include <linux/netfilter_ipv4.h>
18#include <linux/netfilter/x_tables.h>
18#ifdef CONFIG_NF_NAT_NEEDED 19#ifdef CONFIG_NF_NAT_NEEDED
19#include <net/netfilter/nf_nat_rule.h> 20#include <net/netfilter/nf_nat_rule.h>
20#else 21#else
@@ -88,8 +89,9 @@ target(struct sk_buff **pskb,
88 return ip_nat_setup_info(ct, &newrange, hooknum); 89 return ip_nat_setup_info(ct, &newrange, hooknum);
89} 90}
90 91
91static struct ipt_target target_module = { 92static struct xt_target target_module = {
92 .name = MODULENAME, 93 .name = MODULENAME,
94 .family = AF_INET,
93 .target = target, 95 .target = target,
94 .targetsize = sizeof(struct ip_nat_multi_range_compat), 96 .targetsize = sizeof(struct ip_nat_multi_range_compat),
95 .table = "nat", 97 .table = "nat",
@@ -101,12 +103,12 @@ static struct ipt_target target_module = {
101 103
102static int __init ipt_netmap_init(void) 104static int __init ipt_netmap_init(void)
103{ 105{
104 return ipt_register_target(&target_module); 106 return xt_register_target(&target_module);
105} 107}
106 108
107static void __exit ipt_netmap_fini(void) 109static void __exit ipt_netmap_fini(void)
108{ 110{
109 ipt_unregister_target(&target_module); 111 xt_unregister_target(&target_module);
110} 112}
111 113
112module_init(ipt_netmap_init); 114module_init(ipt_netmap_init);
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 462eceb3a1b1..54cd021aa5a8 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -18,6 +18,7 @@
18#include <net/protocol.h> 18#include <net/protocol.h>
19#include <net/checksum.h> 19#include <net/checksum.h>
20#include <linux/netfilter_ipv4.h> 20#include <linux/netfilter_ipv4.h>
21#include <linux/netfilter/x_tables.h>
21#ifdef CONFIG_NF_NAT_NEEDED 22#ifdef CONFIG_NF_NAT_NEEDED
22#include <net/netfilter/nf_nat_rule.h> 23#include <net/netfilter/nf_nat_rule.h>
23#else 24#else
@@ -104,8 +105,9 @@ redirect_target(struct sk_buff **pskb,
104 return ip_nat_setup_info(ct, &newrange, hooknum); 105 return ip_nat_setup_info(ct, &newrange, hooknum);
105} 106}
106 107
107static struct ipt_target redirect_reg = { 108static struct xt_target redirect_reg = {
108 .name = "REDIRECT", 109 .name = "REDIRECT",
110 .family = AF_INET,
109 .target = redirect_target, 111 .target = redirect_target,
110 .targetsize = sizeof(struct ip_nat_multi_range_compat), 112 .targetsize = sizeof(struct ip_nat_multi_range_compat),
111 .table = "nat", 113 .table = "nat",
@@ -116,12 +118,12 @@ static struct ipt_target redirect_reg = {
116 118
117static int __init ipt_redirect_init(void) 119static int __init ipt_redirect_init(void)
118{ 120{
119 return ipt_register_target(&redirect_reg); 121 return xt_register_target(&redirect_reg);
120} 122}
121 123
122static void __exit ipt_redirect_fini(void) 124static void __exit ipt_redirect_fini(void)
123{ 125{
124 ipt_unregister_target(&redirect_reg); 126 xt_unregister_target(&redirect_reg);
125} 127}
126 128
127module_init(ipt_redirect_init); 129module_init(ipt_redirect_init);
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index f0319e5ee437..e4a1ddb386a7 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -22,6 +22,7 @@
22#include <net/tcp.h> 22#include <net/tcp.h>
23#include <net/route.h> 23#include <net/route.h>
24#include <net/dst.h> 24#include <net/dst.h>
25#include <linux/netfilter/x_tables.h>
25#include <linux/netfilter_ipv4/ip_tables.h> 26#include <linux/netfilter_ipv4/ip_tables.h>
26#include <linux/netfilter_ipv4/ipt_REJECT.h> 27#include <linux/netfilter_ipv4/ipt_REJECT.h>
27#ifdef CONFIG_BRIDGE_NETFILTER 28#ifdef CONFIG_BRIDGE_NETFILTER
@@ -116,7 +117,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
116 117
117 /* Adjust TCP checksum */ 118 /* Adjust TCP checksum */
118 tcph->check = 0; 119 tcph->check = 0;
119 tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr), 120 tcph->check = tcp_v4_check(sizeof(struct tcphdr),
120 nskb->nh.iph->saddr, 121 nskb->nh.iph->saddr,
121 nskb->nh.iph->daddr, 122 nskb->nh.iph->daddr,
122 csum_partial((char *)tcph, 123 csum_partial((char *)tcph,
@@ -230,7 +231,7 @@ static int check(const char *tablename,
230 } else if (rejinfo->with == IPT_TCP_RESET) { 231 } else if (rejinfo->with == IPT_TCP_RESET) {
231 /* Must specify that it's a TCP packet */ 232 /* Must specify that it's a TCP packet */
232 if (e->ip.proto != IPPROTO_TCP 233 if (e->ip.proto != IPPROTO_TCP
233 || (e->ip.invflags & IPT_INV_PROTO)) { 234 || (e->ip.invflags & XT_INV_PROTO)) {
234 DEBUGP("REJECT: TCP_RESET invalid for non-tcp\n"); 235 DEBUGP("REJECT: TCP_RESET invalid for non-tcp\n");
235 return 0; 236 return 0;
236 } 237 }
@@ -238,8 +239,9 @@ static int check(const char *tablename,
238 return 1; 239 return 1;
239} 240}
240 241
241static struct ipt_target ipt_reject_reg = { 242static struct xt_target ipt_reject_reg = {
242 .name = "REJECT", 243 .name = "REJECT",
244 .family = AF_INET,
243 .target = reject, 245 .target = reject,
244 .targetsize = sizeof(struct ipt_reject_info), 246 .targetsize = sizeof(struct ipt_reject_info),
245 .table = "filter", 247 .table = "filter",
@@ -251,12 +253,12 @@ static struct ipt_target ipt_reject_reg = {
251 253
252static int __init ipt_reject_init(void) 254static int __init ipt_reject_init(void)
253{ 255{
254 return ipt_register_target(&ipt_reject_reg); 256 return xt_register_target(&ipt_reject_reg);
255} 257}
256 258
257static void __exit ipt_reject_fini(void) 259static void __exit ipt_reject_fini(void)
258{ 260{
259 ipt_unregister_target(&ipt_reject_reg); 261 xt_unregister_target(&ipt_reject_reg);
260} 262}
261 263
262module_init(ipt_reject_init); 264module_init(ipt_reject_init);
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c
index 3dcf29411337..a1cdd1262de2 100644
--- a/net/ipv4/netfilter/ipt_SAME.c
+++ b/net/ipv4/netfilter/ipt_SAME.c
@@ -34,6 +34,7 @@
34#include <net/protocol.h> 34#include <net/protocol.h>
35#include <net/checksum.h> 35#include <net/checksum.h>
36#include <linux/netfilter_ipv4.h> 36#include <linux/netfilter_ipv4.h>
37#include <linux/netfilter/x_tables.h>
37#ifdef CONFIG_NF_NAT_NEEDED 38#ifdef CONFIG_NF_NAT_NEEDED
38#include <net/netfilter/nf_nat_rule.h> 39#include <net/netfilter/nf_nat_rule.h>
39#else 40#else
@@ -186,8 +187,9 @@ same_target(struct sk_buff **pskb,
186 return ip_nat_setup_info(ct, &newrange, hooknum); 187 return ip_nat_setup_info(ct, &newrange, hooknum);
187} 188}
188 189
189static struct ipt_target same_reg = { 190static struct xt_target same_reg = {
190 .name = "SAME", 191 .name = "SAME",
192 .family = AF_INET,
191 .target = same_target, 193 .target = same_target,
192 .targetsize = sizeof(struct ipt_same_info), 194 .targetsize = sizeof(struct ipt_same_info),
193 .table = "nat", 195 .table = "nat",
@@ -199,12 +201,12 @@ static struct ipt_target same_reg = {
199 201
200static int __init ipt_same_init(void) 202static int __init ipt_same_init(void)
201{ 203{
202 return ipt_register_target(&same_reg); 204 return xt_register_target(&same_reg);
203} 205}
204 206
205static void __exit ipt_same_fini(void) 207static void __exit ipt_same_fini(void)
206{ 208{
207 ipt_unregister_target(&same_reg); 209 xt_unregister_target(&same_reg);
208} 210}
209 211
210module_init(ipt_same_init); 212module_init(ipt_same_init);
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c
deleted file mode 100644
index 93eb5c3c1884..000000000000
--- a/net/ipv4/netfilter/ipt_TCPMSS.c
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * This is a module which is used for setting the MSS option in TCP packets.
3 *
4 * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/skbuff.h>
13
14#include <linux/ip.h>
15#include <net/tcp.h>
16
17#include <linux/netfilter_ipv4/ip_tables.h>
18#include <linux/netfilter_ipv4/ipt_TCPMSS.h>
19
20MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
22MODULE_DESCRIPTION("iptables TCP MSS modification module");
23
24static inline unsigned int
25optlen(const u_int8_t *opt, unsigned int offset)
26{
27 /* Beware zero-length options: make finite progress */
28 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
29 return 1;
30 else
31 return opt[offset+1];
32}
33
34static unsigned int
35ipt_tcpmss_target(struct sk_buff **pskb,
36 const struct net_device *in,
37 const struct net_device *out,
38 unsigned int hooknum,
39 const struct xt_target *target,
40 const void *targinfo)
41{
42 const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
43 struct tcphdr *tcph;
44 struct iphdr *iph;
45 u_int16_t tcplen, newmss;
46 __be16 newtotlen, oldval;
47 unsigned int i;
48 u_int8_t *opt;
49
50 if (!skb_make_writable(pskb, (*pskb)->len))
51 return NF_DROP;
52
53 iph = (*pskb)->nh.iph;
54 tcplen = (*pskb)->len - iph->ihl*4;
55 tcph = (void *)iph + iph->ihl*4;
56
57 /* Since it passed flags test in tcp match, we know it is is
58 not a fragment, and has data >= tcp header length. SYN
59 packets should not contain data: if they did, then we risk
60 running over MTU, sending Frag Needed and breaking things
61 badly. --RR */
62 if (tcplen != tcph->doff*4) {
63 if (net_ratelimit())
64 printk(KERN_ERR
65 "ipt_tcpmss_target: bad length (%d bytes)\n",
66 (*pskb)->len);
67 return NF_DROP;
68 }
69
70 if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) {
71 if (dst_mtu((*pskb)->dst) <= sizeof(struct iphdr) +
72 sizeof(struct tcphdr)) {
73 if (net_ratelimit())
74 printk(KERN_ERR "ipt_tcpmss_target: "
75 "unknown or invalid path-MTU (%d)\n",
76 dst_mtu((*pskb)->dst));
77 return NF_DROP; /* or IPT_CONTINUE ?? */
78 }
79
80 newmss = dst_mtu((*pskb)->dst) - sizeof(struct iphdr) -
81 sizeof(struct tcphdr);
82 } else
83 newmss = tcpmssinfo->mss;
84
85 opt = (u_int8_t *)tcph;
86 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
87 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
88 opt[i+1] == TCPOLEN_MSS) {
89 u_int16_t oldmss;
90
91 oldmss = (opt[i+2] << 8) | opt[i+3];
92
93 if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU &&
94 oldmss <= newmss)
95 return IPT_CONTINUE;
96
97 opt[i+2] = (newmss & 0xff00) >> 8;
98 opt[i+3] = (newmss & 0x00ff);
99
100 nf_proto_csum_replace2(&tcph->check, *pskb,
101 htons(oldmss), htons(newmss), 0);
102 return IPT_CONTINUE;
103 }
104 }
105
106 /*
107 * MSS Option not found ?! add it..
108 */
109 if (skb_tailroom((*pskb)) < TCPOLEN_MSS) {
110 struct sk_buff *newskb;
111
112 newskb = skb_copy_expand(*pskb, skb_headroom(*pskb),
113 TCPOLEN_MSS, GFP_ATOMIC);
114 if (!newskb)
115 return NF_DROP;
116 kfree_skb(*pskb);
117 *pskb = newskb;
118 iph = (*pskb)->nh.iph;
119 tcph = (void *)iph + iph->ihl*4;
120 }
121
122 skb_put((*pskb), TCPOLEN_MSS);
123
124 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
125 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
126
127 nf_proto_csum_replace2(&tcph->check, *pskb,
128 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
129 opt[0] = TCPOPT_MSS;
130 opt[1] = TCPOLEN_MSS;
131 opt[2] = (newmss & 0xff00) >> 8;
132 opt[3] = (newmss & 0x00ff);
133
134 nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0);
135
136 oldval = ((__be16 *)tcph)[6];
137 tcph->doff += TCPOLEN_MSS/4;
138 nf_proto_csum_replace2(&tcph->check, *pskb,
139 oldval, ((__be16 *)tcph)[6], 0);
140
141 newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS);
142 nf_csum_replace2(&iph->check, iph->tot_len, newtotlen);
143 iph->tot_len = newtotlen;
144 return IPT_CONTINUE;
145}
146
147#define TH_SYN 0x02
148
149static inline int find_syn_match(const struct ipt_entry_match *m)
150{
151 const struct ipt_tcp *tcpinfo = (const struct ipt_tcp *)m->data;
152
153 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
154 tcpinfo->flg_cmp & TH_SYN &&
155 !(tcpinfo->invflags & IPT_TCP_INV_FLAGS))
156 return 1;
157
158 return 0;
159}
160
161/* Must specify -p tcp --syn/--tcp-flags SYN */
162static int
163ipt_tcpmss_checkentry(const char *tablename,
164 const void *e_void,
165 const struct xt_target *target,
166 void *targinfo,
167 unsigned int hook_mask)
168{
169 const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
170 const struct ipt_entry *e = e_void;
171
172 if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU &&
173 (hook_mask & ~((1 << NF_IP_FORWARD) |
174 (1 << NF_IP_LOCAL_OUT) |
175 (1 << NF_IP_POST_ROUTING))) != 0) {
176 printk("TCPMSS: path-MTU clamping only supported in "
177 "FORWARD, OUTPUT and POSTROUTING hooks\n");
178 return 0;
179 }
180
181 if (IPT_MATCH_ITERATE(e, find_syn_match))
182 return 1;
183 printk("TCPMSS: Only works on TCP SYN packets\n");
184 return 0;
185}
186
187static struct ipt_target ipt_tcpmss_reg = {
188 .name = "TCPMSS",
189 .target = ipt_tcpmss_target,
190 .targetsize = sizeof(struct ipt_tcpmss_info),
191 .proto = IPPROTO_TCP,
192 .checkentry = ipt_tcpmss_checkentry,
193 .me = THIS_MODULE,
194};
195
196static int __init ipt_tcpmss_init(void)
197{
198 return ipt_register_target(&ipt_tcpmss_reg);
199}
200
201static void __exit ipt_tcpmss_fini(void)
202{
203 ipt_unregister_target(&ipt_tcpmss_reg);
204}
205
206module_init(ipt_tcpmss_init);
207module_exit(ipt_tcpmss_fini);
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index 18e74ac4d425..29b05a6bd108 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -13,7 +13,7 @@
13#include <linux/ip.h> 13#include <linux/ip.h>
14#include <net/checksum.h> 14#include <net/checksum.h>
15 15
16#include <linux/netfilter_ipv4/ip_tables.h> 16#include <linux/netfilter/x_tables.h>
17#include <linux/netfilter_ipv4/ipt_TOS.h> 17#include <linux/netfilter_ipv4/ipt_TOS.h>
18 18
19MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
@@ -40,7 +40,7 @@ target(struct sk_buff **pskb,
40 iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; 40 iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos;
41 nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); 41 nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
42 } 42 }
43 return IPT_CONTINUE; 43 return XT_CONTINUE;
44} 44}
45 45
46static int 46static int
@@ -63,8 +63,9 @@ checkentry(const char *tablename,
63 return 1; 63 return 1;
64} 64}
65 65
66static struct ipt_target ipt_tos_reg = { 66static struct xt_target ipt_tos_reg = {
67 .name = "TOS", 67 .name = "TOS",
68 .family = AF_INET,
68 .target = target, 69 .target = target,
69 .targetsize = sizeof(struct ipt_tos_target_info), 70 .targetsize = sizeof(struct ipt_tos_target_info),
70 .table = "mangle", 71 .table = "mangle",
@@ -74,12 +75,12 @@ static struct ipt_target ipt_tos_reg = {
74 75
75static int __init ipt_tos_init(void) 76static int __init ipt_tos_init(void)
76{ 77{
77 return ipt_register_target(&ipt_tos_reg); 78 return xt_register_target(&ipt_tos_reg);
78} 79}
79 80
80static void __exit ipt_tos_fini(void) 81static void __exit ipt_tos_fini(void)
81{ 82{
82 ipt_unregister_target(&ipt_tos_reg); 83 xt_unregister_target(&ipt_tos_reg);
83} 84}
84 85
85module_init(ipt_tos_init); 86module_init(ipt_tos_init);
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
index fffe5ca82e91..d2b6fa3f9dcd 100644
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -12,7 +12,7 @@
12#include <linux/ip.h> 12#include <linux/ip.h>
13#include <net/checksum.h> 13#include <net/checksum.h>
14 14
15#include <linux/netfilter_ipv4/ip_tables.h> 15#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter_ipv4/ipt_TTL.h> 16#include <linux/netfilter_ipv4/ipt_TTL.h>
17 17
18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -59,7 +59,7 @@ ipt_ttl_target(struct sk_buff **pskb,
59 iph->ttl = new_ttl; 59 iph->ttl = new_ttl;
60 } 60 }
61 61
62 return IPT_CONTINUE; 62 return XT_CONTINUE;
63} 63}
64 64
65static int ipt_ttl_checkentry(const char *tablename, 65static int ipt_ttl_checkentry(const char *tablename,
@@ -80,8 +80,9 @@ static int ipt_ttl_checkentry(const char *tablename,
80 return 1; 80 return 1;
81} 81}
82 82
83static struct ipt_target ipt_TTL = { 83static struct xt_target ipt_TTL = {
84 .name = "TTL", 84 .name = "TTL",
85 .family = AF_INET,
85 .target = ipt_ttl_target, 86 .target = ipt_ttl_target,
86 .targetsize = sizeof(struct ipt_TTL_info), 87 .targetsize = sizeof(struct ipt_TTL_info),
87 .table = "mangle", 88 .table = "mangle",
@@ -91,12 +92,12 @@ static struct ipt_target ipt_TTL = {
91 92
92static int __init ipt_ttl_init(void) 93static int __init ipt_ttl_init(void)
93{ 94{
94 return ipt_register_target(&ipt_TTL); 95 return xt_register_target(&ipt_TTL);
95} 96}
96 97
97static void __exit ipt_ttl_fini(void) 98static void __exit ipt_ttl_fini(void)
98{ 99{
99 ipt_unregister_target(&ipt_TTL); 100 xt_unregister_target(&ipt_TTL);
100} 101}
101 102
102module_init(ipt_ttl_init); 103module_init(ipt_ttl_init);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index dbd34783a64d..7af57a3a1f36 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -57,7 +57,7 @@
57#include <linux/mm.h> 57#include <linux/mm.h>
58#include <linux/moduleparam.h> 58#include <linux/moduleparam.h>
59#include <linux/netfilter.h> 59#include <linux/netfilter.h>
60#include <linux/netfilter_ipv4/ip_tables.h> 60#include <linux/netfilter/x_tables.h>
61#include <linux/netfilter_ipv4/ipt_ULOG.h> 61#include <linux/netfilter_ipv4/ipt_ULOG.h>
62#include <net/sock.h> 62#include <net/sock.h>
63#include <linux/bitops.h> 63#include <linux/bitops.h>
@@ -132,7 +132,6 @@ static void ulog_send(unsigned int nlgroupnum)
132 ub->qlen = 0; 132 ub->qlen = 0;
133 ub->skb = NULL; 133 ub->skb = NULL;
134 ub->lastnlh = NULL; 134 ub->lastnlh = NULL;
135
136} 135}
137 136
138 137
@@ -314,7 +313,7 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
314 313
315 ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL); 314 ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL);
316 315
317 return IPT_CONTINUE; 316 return XT_CONTINUE;
318} 317}
319 318
320static void ipt_logfn(unsigned int pf, 319static void ipt_logfn(unsigned int pf,
@@ -363,8 +362,9 @@ static int ipt_ulog_checkentry(const char *tablename,
363 return 1; 362 return 1;
364} 363}
365 364
366static struct ipt_target ipt_ulog_reg = { 365static struct xt_target ipt_ulog_reg = {
367 .name = "ULOG", 366 .name = "ULOG",
367 .family = AF_INET,
368 .target = ipt_ulog_target, 368 .target = ipt_ulog_target,
369 .targetsize = sizeof(struct ipt_ulog_info), 369 .targetsize = sizeof(struct ipt_ulog_info),
370 .checkentry = ipt_ulog_checkentry, 370 .checkentry = ipt_ulog_checkentry,
@@ -379,7 +379,7 @@ static struct nf_logger ipt_ulog_logger = {
379 379
380static int __init ipt_ulog_init(void) 380static int __init ipt_ulog_init(void)
381{ 381{
382 int i; 382 int ret, i;
383 383
384 DEBUGP("ipt_ULOG: init module\n"); 384 DEBUGP("ipt_ULOG: init module\n");
385 385
@@ -400,9 +400,10 @@ static int __init ipt_ulog_init(void)
400 if (!nflognl) 400 if (!nflognl)
401 return -ENOMEM; 401 return -ENOMEM;
402 402
403 if (ipt_register_target(&ipt_ulog_reg) != 0) { 403 ret = xt_register_target(&ipt_ulog_reg);
404 if (ret < 0) {
404 sock_release(nflognl->sk_socket); 405 sock_release(nflognl->sk_socket);
405 return -EINVAL; 406 return ret;
406 } 407 }
407 if (nflog) 408 if (nflog)
408 nf_log_register(PF_INET, &ipt_ulog_logger); 409 nf_log_register(PF_INET, &ipt_ulog_logger);
@@ -419,7 +420,7 @@ static void __exit ipt_ulog_fini(void)
419 420
420 if (nflog) 421 if (nflog)
421 nf_log_unregister_logger(&ipt_ulog_logger); 422 nf_log_unregister_logger(&ipt_ulog_logger);
422 ipt_unregister_target(&ipt_ulog_reg); 423 xt_unregister_target(&ipt_ulog_reg);
423 sock_release(nflognl->sk_socket); 424 sock_release(nflognl->sk_socket);
424 425
425 /* remove pending timers and free allocated skb's */ 426 /* remove pending timers and free allocated skb's */
@@ -435,7 +436,6 @@ static void __exit ipt_ulog_fini(void)
435 ub->skb = NULL; 436 ub->skb = NULL;
436 } 437 }
437 } 438 }
438
439} 439}
440 440
441module_init(ipt_ulog_init); 441module_init(ipt_ulog_init);
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 7b60eb74788b..648f555c4d16 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -16,7 +16,7 @@
16#include <net/route.h> 16#include <net/route.h>
17 17
18#include <linux/netfilter_ipv4/ipt_addrtype.h> 18#include <linux/netfilter_ipv4/ipt_addrtype.h>
19#include <linux/netfilter_ipv4/ip_tables.h> 19#include <linux/netfilter/x_tables.h>
20 20
21MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 22MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -44,8 +44,9 @@ static int match(const struct sk_buff *skb,
44 return ret; 44 return ret;
45} 45}
46 46
47static struct ipt_match addrtype_match = { 47static struct xt_match addrtype_match = {
48 .name = "addrtype", 48 .name = "addrtype",
49 .family = AF_INET,
49 .match = match, 50 .match = match,
50 .matchsize = sizeof(struct ipt_addrtype_info), 51 .matchsize = sizeof(struct ipt_addrtype_info),
51 .me = THIS_MODULE 52 .me = THIS_MODULE
@@ -53,12 +54,12 @@ static struct ipt_match addrtype_match = {
53 54
54static int __init ipt_addrtype_init(void) 55static int __init ipt_addrtype_init(void)
55{ 56{
56 return ipt_register_match(&addrtype_match); 57 return xt_register_match(&addrtype_match);
57} 58}
58 59
59static void __exit ipt_addrtype_fini(void) 60static void __exit ipt_addrtype_fini(void)
60{ 61{
61 ipt_unregister_match(&addrtype_match); 62 xt_unregister_match(&addrtype_match);
62} 63}
63 64
64module_init(ipt_addrtype_init); 65module_init(ipt_addrtype_init);
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index 1798f86bc534..42f41224a43a 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -6,12 +6,13 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <linux/in.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/skbuff.h> 11#include <linux/skbuff.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12 13
13#include <linux/netfilter_ipv4/ipt_ah.h> 14#include <linux/netfilter_ipv4/ipt_ah.h>
14#include <linux/netfilter_ipv4/ip_tables.h> 15#include <linux/netfilter/x_tables.h>
15 16
16MODULE_LICENSE("GPL"); 17MODULE_LICENSE("GPL");
17MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>"); 18MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
@@ -86,8 +87,9 @@ checkentry(const char *tablename,
86 return 1; 87 return 1;
87} 88}
88 89
89static struct ipt_match ah_match = { 90static struct xt_match ah_match = {
90 .name = "ah", 91 .name = "ah",
92 .family = AF_INET,
91 .match = match, 93 .match = match,
92 .matchsize = sizeof(struct ipt_ah), 94 .matchsize = sizeof(struct ipt_ah),
93 .proto = IPPROTO_AH, 95 .proto = IPPROTO_AH,
@@ -97,12 +99,12 @@ static struct ipt_match ah_match = {
97 99
98static int __init ipt_ah_init(void) 100static int __init ipt_ah_init(void)
99{ 101{
100 return ipt_register_match(&ah_match); 102 return xt_register_match(&ah_match);
101} 103}
102 104
103static void __exit ipt_ah_fini(void) 105static void __exit ipt_ah_fini(void)
104{ 106{
105 ipt_unregister_match(&ah_match); 107 xt_unregister_match(&ah_match);
106} 108}
107 109
108module_init(ipt_ah_init); 110module_init(ipt_ah_init);
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index dafbdec0efc0..37508b2cfea6 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -9,10 +9,13 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/in.h>
13#include <linux/ip.h>
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/skbuff.h> 15#include <linux/skbuff.h>
14#include <linux/tcp.h> 16#include <linux/tcp.h>
15 17
18#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter_ipv4/ip_tables.h> 19#include <linux/netfilter_ipv4/ip_tables.h>
17#include <linux/netfilter_ipv4/ipt_ecn.h> 20#include <linux/netfilter_ipv4/ipt_ecn.h>
18 21
@@ -109,8 +112,9 @@ static int checkentry(const char *tablename, const void *ip_void,
109 return 1; 112 return 1;
110} 113}
111 114
112static struct ipt_match ecn_match = { 115static struct xt_match ecn_match = {
113 .name = "ecn", 116 .name = "ecn",
117 .family = AF_INET,
114 .match = match, 118 .match = match,
115 .matchsize = sizeof(struct ipt_ecn_info), 119 .matchsize = sizeof(struct ipt_ecn_info),
116 .checkentry = checkentry, 120 .checkentry = checkentry,
@@ -119,12 +123,12 @@ static struct ipt_match ecn_match = {
119 123
120static int __init ipt_ecn_init(void) 124static int __init ipt_ecn_init(void)
121{ 125{
122 return ipt_register_match(&ecn_match); 126 return xt_register_match(&ecn_match);
123} 127}
124 128
125static void __exit ipt_ecn_fini(void) 129static void __exit ipt_ecn_fini(void)
126{ 130{
127 ipt_unregister_match(&ecn_match); 131 xt_unregister_match(&ecn_match);
128} 132}
129 133
130module_init(ipt_ecn_init); 134module_init(ipt_ecn_init);
diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c
index 5202edd8d333..05de593be94c 100644
--- a/net/ipv4/netfilter/ipt_iprange.c
+++ b/net/ipv4/netfilter/ipt_iprange.c
@@ -10,7 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/ip.h> 12#include <linux/ip.h>
13#include <linux/netfilter_ipv4/ip_tables.h> 13#include <linux/netfilter/x_tables.h>
14#include <linux/netfilter_ipv4/ipt_iprange.h> 14#include <linux/netfilter_ipv4/ipt_iprange.h>
15 15
16MODULE_LICENSE("GPL"); 16MODULE_LICENSE("GPL");
@@ -63,22 +63,22 @@ match(const struct sk_buff *skb,
63 return 1; 63 return 1;
64} 64}
65 65
66static struct ipt_match iprange_match = { 66static struct xt_match iprange_match = {
67 .name = "iprange", 67 .name = "iprange",
68 .family = AF_INET,
68 .match = match, 69 .match = match,
69 .matchsize = sizeof(struct ipt_iprange_info), 70 .matchsize = sizeof(struct ipt_iprange_info),
70 .destroy = NULL,
71 .me = THIS_MODULE 71 .me = THIS_MODULE
72}; 72};
73 73
74static int __init ipt_iprange_init(void) 74static int __init ipt_iprange_init(void)
75{ 75{
76 return ipt_register_match(&iprange_match); 76 return xt_register_match(&iprange_match);
77} 77}
78 78
79static void __exit ipt_iprange_fini(void) 79static void __exit ipt_iprange_fini(void)
80{ 80{
81 ipt_unregister_match(&iprange_match); 81 xt_unregister_match(&iprange_match);
82} 82}
83 83
84module_init(ipt_iprange_init); 84module_init(ipt_iprange_init);
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c
index 78c336f12a9e..9f496ac834b5 100644
--- a/net/ipv4/netfilter/ipt_owner.c
+++ b/net/ipv4/netfilter/ipt_owner.c
@@ -15,7 +15,7 @@
15#include <net/sock.h> 15#include <net/sock.h>
16 16
17#include <linux/netfilter_ipv4/ipt_owner.h> 17#include <linux/netfilter_ipv4/ipt_owner.h>
18#include <linux/netfilter_ipv4/ip_tables.h> 18#include <linux/netfilter/x_tables.h>
19 19
20MODULE_LICENSE("GPL"); 20MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); 21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
@@ -68,8 +68,9 @@ checkentry(const char *tablename,
68 return 1; 68 return 1;
69} 69}
70 70
71static struct ipt_match owner_match = { 71static struct xt_match owner_match = {
72 .name = "owner", 72 .name = "owner",
73 .family = AF_INET,
73 .match = match, 74 .match = match,
74 .matchsize = sizeof(struct ipt_owner_info), 75 .matchsize = sizeof(struct ipt_owner_info),
75 .hooks = (1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING), 76 .hooks = (1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING),
@@ -79,12 +80,12 @@ static struct ipt_match owner_match = {
79 80
80static int __init ipt_owner_init(void) 81static int __init ipt_owner_init(void)
81{ 82{
82 return ipt_register_match(&owner_match); 83 return xt_register_match(&owner_match);
83} 84}
84 85
85static void __exit ipt_owner_fini(void) 86static void __exit ipt_owner_fini(void)
86{ 87{
87 ipt_unregister_match(&owner_match); 88 xt_unregister_match(&owner_match);
88} 89}
89 90
90module_init(ipt_owner_init); 91module_init(ipt_owner_init);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 4db0e73c56f1..6b97b6796173 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -12,6 +12,7 @@
12 * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org 12 * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/ip.h>
15#include <linux/moduleparam.h> 16#include <linux/moduleparam.h>
16#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
17#include <linux/seq_file.h> 18#include <linux/seq_file.h>
@@ -24,7 +25,7 @@
24#include <linux/skbuff.h> 25#include <linux/skbuff.h>
25#include <linux/inet.h> 26#include <linux/inet.h>
26 27
27#include <linux/netfilter_ipv4/ip_tables.h> 28#include <linux/netfilter/x_tables.h>
28#include <linux/netfilter_ipv4/ipt_recent.h> 29#include <linux/netfilter_ipv4/ipt_recent.h>
29 30
30MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 31MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -462,8 +463,9 @@ static struct file_operations recent_fops = {
462}; 463};
463#endif /* CONFIG_PROC_FS */ 464#endif /* CONFIG_PROC_FS */
464 465
465static struct ipt_match recent_match = { 466static struct xt_match recent_match = {
466 .name = "recent", 467 .name = "recent",
468 .family = AF_INET,
467 .match = ipt_recent_match, 469 .match = ipt_recent_match,
468 .matchsize = sizeof(struct ipt_recent_info), 470 .matchsize = sizeof(struct ipt_recent_info),
469 .checkentry = ipt_recent_checkentry, 471 .checkentry = ipt_recent_checkentry,
@@ -479,13 +481,13 @@ static int __init ipt_recent_init(void)
479 return -EINVAL; 481 return -EINVAL;
480 ip_list_hash_size = 1 << fls(ip_list_tot); 482 ip_list_hash_size = 1 << fls(ip_list_tot);
481 483
482 err = ipt_register_match(&recent_match); 484 err = xt_register_match(&recent_match);
483#ifdef CONFIG_PROC_FS 485#ifdef CONFIG_PROC_FS
484 if (err) 486 if (err)
485 return err; 487 return err;
486 proc_dir = proc_mkdir("ipt_recent", proc_net); 488 proc_dir = proc_mkdir("ipt_recent", proc_net);
487 if (proc_dir == NULL) { 489 if (proc_dir == NULL) {
488 ipt_unregister_match(&recent_match); 490 xt_unregister_match(&recent_match);
489 err = -ENOMEM; 491 err = -ENOMEM;
490 } 492 }
491#endif 493#endif
@@ -495,7 +497,7 @@ static int __init ipt_recent_init(void)
495static void __exit ipt_recent_exit(void) 497static void __exit ipt_recent_exit(void)
496{ 498{
497 BUG_ON(!list_empty(&tables)); 499 BUG_ON(!list_empty(&tables));
498 ipt_unregister_match(&recent_match); 500 xt_unregister_match(&recent_match);
499#ifdef CONFIG_PROC_FS 501#ifdef CONFIG_PROC_FS
500 remove_proc_entry("ipt_recent", proc_net); 502 remove_proc_entry("ipt_recent", proc_net);
501#endif 503#endif
diff --git a/net/ipv4/netfilter/ipt_tos.c b/net/ipv4/netfilter/ipt_tos.c
index 5549c39c7851..5d33b51d49d8 100644
--- a/net/ipv4/netfilter/ipt_tos.c
+++ b/net/ipv4/netfilter/ipt_tos.c
@@ -8,11 +8,12 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/ip.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/skbuff.h> 13#include <linux/skbuff.h>
13 14
14#include <linux/netfilter_ipv4/ipt_tos.h> 15#include <linux/netfilter_ipv4/ipt_tos.h>
15#include <linux/netfilter_ipv4/ip_tables.h> 16#include <linux/netfilter/x_tables.h>
16 17
17MODULE_LICENSE("GPL"); 18MODULE_LICENSE("GPL");
18MODULE_DESCRIPTION("iptables TOS match module"); 19MODULE_DESCRIPTION("iptables TOS match module");
@@ -32,8 +33,9 @@ match(const struct sk_buff *skb,
32 return (skb->nh.iph->tos == info->tos) ^ info->invert; 33 return (skb->nh.iph->tos == info->tos) ^ info->invert;
33} 34}
34 35
35static struct ipt_match tos_match = { 36static struct xt_match tos_match = {
36 .name = "tos", 37 .name = "tos",
38 .family = AF_INET,
37 .match = match, 39 .match = match,
38 .matchsize = sizeof(struct ipt_tos_info), 40 .matchsize = sizeof(struct ipt_tos_info),
39 .me = THIS_MODULE, 41 .me = THIS_MODULE,
@@ -41,12 +43,12 @@ static struct ipt_match tos_match = {
41 43
42static int __init ipt_multiport_init(void) 44static int __init ipt_multiport_init(void)
43{ 45{
44 return ipt_register_match(&tos_match); 46 return xt_register_match(&tos_match);
45} 47}
46 48
47static void __exit ipt_multiport_fini(void) 49static void __exit ipt_multiport_fini(void)
48{ 50{
49 ipt_unregister_match(&tos_match); 51 xt_unregister_match(&tos_match);
50} 52}
51 53
52module_init(ipt_multiport_init); 54module_init(ipt_multiport_init);
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c
index a5243bdb87d7..d5cd984e5ed2 100644
--- a/net/ipv4/netfilter/ipt_ttl.c
+++ b/net/ipv4/netfilter/ipt_ttl.c
@@ -9,11 +9,12 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/ip.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14 15
15#include <linux/netfilter_ipv4/ipt_ttl.h> 16#include <linux/netfilter_ipv4/ipt_ttl.h>
16#include <linux/netfilter_ipv4/ip_tables.h> 17#include <linux/netfilter/x_tables.h>
17 18
18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 19MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
19MODULE_DESCRIPTION("IP tables TTL matching module"); 20MODULE_DESCRIPTION("IP tables TTL matching module");
@@ -48,8 +49,9 @@ static int match(const struct sk_buff *skb,
48 return 0; 49 return 0;
49} 50}
50 51
51static struct ipt_match ttl_match = { 52static struct xt_match ttl_match = {
52 .name = "ttl", 53 .name = "ttl",
54 .family = AF_INET,
53 .match = match, 55 .match = match,
54 .matchsize = sizeof(struct ipt_ttl_info), 56 .matchsize = sizeof(struct ipt_ttl_info),
55 .me = THIS_MODULE, 57 .me = THIS_MODULE,
@@ -57,13 +59,12 @@ static struct ipt_match ttl_match = {
57 59
58static int __init ipt_ttl_init(void) 60static int __init ipt_ttl_init(void)
59{ 61{
60 return ipt_register_match(&ttl_match); 62 return xt_register_match(&ttl_match);
61} 63}
62 64
63static void __exit ipt_ttl_fini(void) 65static void __exit ipt_ttl_fini(void)
64{ 66{
65 ipt_unregister_match(&ttl_match); 67 xt_unregister_match(&ttl_match);
66
67} 68}
68 69
69module_init(ipt_ttl_init); 70module_init(ipt_ttl_init);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index e2e7dd8d7903..51053cb42f43 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -74,7 +74,7 @@ static struct
74 } 74 }
75}; 75};
76 76
77static struct ipt_table packet_filter = { 77static struct xt_table packet_filter = {
78 .name = "filter", 78 .name = "filter",
79 .valid_hooks = FILTER_VALID_HOOKS, 79 .valid_hooks = FILTER_VALID_HOOKS,
80 .lock = RW_LOCK_UNLOCKED, 80 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index af2939889444..a532e4d84332 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -103,7 +103,7 @@ static struct
103 } 103 }
104}; 104};
105 105
106static struct ipt_table packet_mangler = { 106static struct xt_table packet_mangler = {
107 .name = "mangle", 107 .name = "mangle",
108 .valid_hooks = MANGLE_VALID_HOOKS, 108 .valid_hooks = MANGLE_VALID_HOOKS,
109 .lock = RW_LOCK_UNLOCKED, 109 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index bcbeb4aeacd9..5277550fa6b5 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -79,7 +79,7 @@ static struct
79 } 79 }
80}; 80};
81 81
82static struct ipt_table packet_raw = { 82static struct xt_table packet_raw = {
83 .name = "raw", 83 .name = "raw",
84 .valid_hooks = RAW_VALID_HOOKS, 84 .valid_hooks = RAW_VALID_HOOKS,
85 .lock = RW_LOCK_UNLOCKED, 85 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 86a92272b053..998b2557692c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -254,8 +254,9 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
254 if (maniptype == IP_NAT_MANIP_SRC) { 254 if (maniptype == IP_NAT_MANIP_SRC) {
255 if (find_appropriate_src(orig_tuple, tuple, range)) { 255 if (find_appropriate_src(orig_tuple, tuple, range)) {
256 DEBUGP("get_unique_tuple: Found current src map\n"); 256 DEBUGP("get_unique_tuple: Found current src map\n");
257 if (!nf_nat_used_tuple(tuple, ct)) 257 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
258 return; 258 if (!nf_nat_used_tuple(tuple, ct))
259 return;
259 } 260 }
260 } 261 }
261 262
@@ -269,6 +270,13 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
269 270
270 proto = nf_nat_proto_find_get(orig_tuple->dst.protonum); 271 proto = nf_nat_proto_find_get(orig_tuple->dst.protonum);
271 272
273 /* Change protocol info to have some randomization */
274 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
275 proto->unique_tuple(tuple, range, maniptype, ct);
276 nf_nat_proto_put(proto);
277 return;
278 }
279
272 /* Only bother mapping if it's not already in range and unique */ 280 /* Only bother mapping if it's not already in range and unique */
273 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || 281 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
274 proto->in_range(tuple, maniptype, &range->min, &range->max)) && 282 proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 98fbfc84d183..dc6738bdfab7 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -176,7 +176,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
176 datalen = (*pskb)->len - iph->ihl*4; 176 datalen = (*pskb)->len - iph->ihl*4;
177 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { 177 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
178 tcph->check = 0; 178 tcph->check = 0;
179 tcph->check = tcp_v4_check(tcph, datalen, 179 tcph->check = tcp_v4_check(datalen,
180 iph->saddr, iph->daddr, 180 iph->saddr, iph->daddr,
181 csum_partial((char *)tcph, 181 csum_partial((char *)tcph,
182 datalen, 0)); 182 datalen, 0));
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index 7e26a7e9bee1..439164c7a626 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12#include <linux/tcp.h> 13#include <linux/tcp.h>
13 14
@@ -75,6 +76,9 @@ tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
75 range_size = ntohs(range->max.tcp.port) - min + 1; 76 range_size = ntohs(range->max.tcp.port) - min + 1;
76 } 77 }
77 78
79 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
80 port = net_random();
81
78 for (i = 0; i < range_size; i++, port++) { 82 for (i = 0; i < range_size; i++, port++) {
79 *portptr = htons(min + port % range_size); 83 *portptr = htons(min + port % range_size);
80 if (!nf_nat_used_tuple(tuple, ct)) 84 if (!nf_nat_used_tuple(tuple, ct))
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index ab0ce4c8699f..8cae6e063bb6 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h>
11#include <linux/ip.h> 12#include <linux/ip.h>
12#include <linux/udp.h> 13#include <linux/udp.h>
13 14
@@ -73,6 +74,9 @@ udp_unique_tuple(struct nf_conntrack_tuple *tuple,
73 range_size = ntohs(range->max.udp.port) - min + 1; 74 range_size = ntohs(range->max.udp.port) - min + 1;
74 } 75 }
75 76
77 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
78 port = net_random();
79
76 for (i = 0; i < range_size; i++, port++) { 80 for (i = 0; i < range_size; i++, port++) {
77 *portptr = htons(min + port % range_size); 81 *portptr = htons(min + port % range_size);
78 if (!nf_nat_used_tuple(tuple, ct)) 82 if (!nf_nat_used_tuple(tuple, ct))
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index b868ee0195d4..7f95b4e2eb31 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -119,7 +119,7 @@ static struct
119 } 119 }
120}; 120};
121 121
122static struct ipt_table nat_table = { 122static struct xt_table nat_table = {
123 .name = "nat", 123 .name = "nat",
124 .valid_hooks = NAT_VALID_HOOKS, 124 .valid_hooks = NAT_VALID_HOOKS,
125 .lock = RW_LOCK_UNLOCKED, 125 .lock = RW_LOCK_UNLOCKED,
@@ -226,6 +226,10 @@ static int ipt_dnat_checkentry(const char *tablename,
226 printk("DNAT: multiple ranges no longer supported\n"); 226 printk("DNAT: multiple ranges no longer supported\n");
227 return 0; 227 return 0;
228 } 228 }
229 if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) {
230 printk("DNAT: port randomization not supported\n");
231 return 0;
232 }
229 return 1; 233 return 1;
230} 234}
231 235
@@ -290,7 +294,7 @@ int nf_nat_rule_find(struct sk_buff **pskb,
290 return ret; 294 return ret;
291} 295}
292 296
293static struct ipt_target ipt_snat_reg = { 297static struct xt_target ipt_snat_reg = {
294 .name = "SNAT", 298 .name = "SNAT",
295 .target = ipt_snat_target, 299 .target = ipt_snat_target,
296 .targetsize = sizeof(struct nf_nat_multi_range_compat), 300 .targetsize = sizeof(struct nf_nat_multi_range_compat),
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 00d6dea9f7f3..5a964a167c13 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -32,12 +32,6 @@
32#define DEBUGP(format, args...) 32#define DEBUGP(format, args...)
33#endif 33#endif
34 34
35#define HOOKNAME(hooknum) ((hooknum) == NF_IP_POST_ROUTING ? "POST_ROUTING" \
36 : ((hooknum) == NF_IP_PRE_ROUTING ? "PRE_ROUTING" \
37 : ((hooknum) == NF_IP_LOCAL_OUT ? "LOCAL_OUT" \
38 : ((hooknum) == NF_IP_LOCAL_IN ? "LOCAL_IN" \
39 : "*ERROR*")))
40
41#ifdef CONFIG_XFRM 35#ifdef CONFIG_XFRM
42static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) 36static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
43{ 37{
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a6c63bbd9ddb..fed6a1e7af9e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -489,7 +489,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
489 } 489 }
490 490
491 security_sk_classify_flow(sk, &fl); 491 security_sk_classify_flow(sk, &fl);
492 err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT)); 492 err = ip_route_output_flow(&rt, &fl, sk, 1);
493 } 493 }
494 if (err) 494 if (err)
495 goto done; 495 goto done;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2daa0dc19d33..baee304a3cb7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2635,7 +2635,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2635 2635
2636 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 2636 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2637 if (nlh == NULL) 2637 if (nlh == NULL)
2638 return -ENOBUFS; 2638 return -EMSGSIZE;
2639 2639
2640 r = nlmsg_data(nlh); 2640 r = nlmsg_data(nlh);
2641 r->rtm_family = AF_INET; 2641 r->rtm_family = AF_INET;
@@ -2718,7 +2718,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2718 return nlmsg_end(skb, nlh); 2718 return nlmsg_end(skb, nlh);
2719 2719
2720nla_put_failure: 2720nla_put_failure:
2721 return nlmsg_cancel(skb, nlh); 2721 nlmsg_cancel(skb, nlh);
2722 return -EMSGSIZE;
2722} 2723}
2723 2724
2724int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) 2725int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b67e0dd743be..5bd43d7294fd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2415,10 +2415,11 @@ void __init tcp_init(void)
2415 &tcp_hashinfo.ehash_size, 2415 &tcp_hashinfo.ehash_size,
2416 NULL, 2416 NULL,
2417 0); 2417 0);
2418 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1; 2418 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2419 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) { 2419 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2420 rwlock_init(&tcp_hashinfo.ehash[i].lock); 2420 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2421 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); 2421 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2422 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
2422 } 2423 }
2423 2424
2424 tcp_hashinfo.bhash = 2425 tcp_hashinfo.bhash =
@@ -2475,7 +2476,7 @@ void __init tcp_init(void)
2475 2476
2476 printk(KERN_INFO "TCP: Hash tables configured " 2477 printk(KERN_INFO "TCP: Hash tables configured "
2477 "(established %d bind %d)\n", 2478 "(established %d bind %d)\n",
2478 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size); 2479 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2479 2480
2480 tcp_register_congestion_control(&tcp_reno); 2481 tcp_register_congestion_control(&tcp_reno);
2481} 2482}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c26076fb890e..c6109895bb5e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -936,28 +936,58 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
936 struct tcp_sock *tp = tcp_sk(sk); 936 struct tcp_sock *tp = tcp_sk(sk);
937 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 937 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
938 struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2); 938 struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
939 struct sk_buff *cached_skb;
939 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; 940 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
940 int reord = tp->packets_out; 941 int reord = tp->packets_out;
941 int prior_fackets; 942 int prior_fackets;
942 u32 lost_retrans = 0; 943 u32 lost_retrans = 0;
943 int flag = 0; 944 int flag = 0;
944 int dup_sack = 0; 945 int dup_sack = 0;
946 int cached_fack_count;
945 int i; 947 int i;
948 int first_sack_index;
946 949
947 if (!tp->sacked_out) 950 if (!tp->sacked_out)
948 tp->fackets_out = 0; 951 tp->fackets_out = 0;
949 prior_fackets = tp->fackets_out; 952 prior_fackets = tp->fackets_out;
950 953
954 /* Check for D-SACK. */
955 if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
956 dup_sack = 1;
957 tp->rx_opt.sack_ok |= 4;
958 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
959 } else if (num_sacks > 1 &&
960 !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
961 !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
962 dup_sack = 1;
963 tp->rx_opt.sack_ok |= 4;
964 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
965 }
966
967 /* D-SACK for already forgotten data...
968 * Do dumb counting. */
969 if (dup_sack &&
970 !after(ntohl(sp[0].end_seq), prior_snd_una) &&
971 after(ntohl(sp[0].end_seq), tp->undo_marker))
972 tp->undo_retrans--;
973
974 /* Eliminate too old ACKs, but take into
975 * account more or less fresh ones, they can
976 * contain valid SACK info.
977 */
978 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
979 return 0;
980
951 /* SACK fastpath: 981 /* SACK fastpath:
952 * if the only SACK change is the increase of the end_seq of 982 * if the only SACK change is the increase of the end_seq of
953 * the first block then only apply that SACK block 983 * the first block then only apply that SACK block
954 * and use retrans queue hinting otherwise slowpath */ 984 * and use retrans queue hinting otherwise slowpath */
955 flag = 1; 985 flag = 1;
956 for (i = 0; i< num_sacks; i++) { 986 for (i = 0; i < num_sacks; i++) {
957 __u32 start_seq = ntohl(sp[i].start_seq); 987 __be32 start_seq = sp[i].start_seq;
958 __u32 end_seq = ntohl(sp[i].end_seq); 988 __be32 end_seq = sp[i].end_seq;
959 989
960 if (i == 0){ 990 if (i == 0) {
961 if (tp->recv_sack_cache[i].start_seq != start_seq) 991 if (tp->recv_sack_cache[i].start_seq != start_seq)
962 flag = 0; 992 flag = 0;
963 } else { 993 } else {
@@ -967,39 +997,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
967 } 997 }
968 tp->recv_sack_cache[i].start_seq = start_seq; 998 tp->recv_sack_cache[i].start_seq = start_seq;
969 tp->recv_sack_cache[i].end_seq = end_seq; 999 tp->recv_sack_cache[i].end_seq = end_seq;
970 1000 }
971 /* Check for D-SACK. */ 1001 /* Clear the rest of the cache sack blocks so they won't match mistakenly. */
972 if (i == 0) { 1002 for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
973 u32 ack = TCP_SKB_CB(ack_skb)->ack_seq; 1003 tp->recv_sack_cache[i].start_seq = 0;
974 1004 tp->recv_sack_cache[i].end_seq = 0;
975 if (before(start_seq, ack)) {
976 dup_sack = 1;
977 tp->rx_opt.sack_ok |= 4;
978 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
979 } else if (num_sacks > 1 &&
980 !after(end_seq, ntohl(sp[1].end_seq)) &&
981 !before(start_seq, ntohl(sp[1].start_seq))) {
982 dup_sack = 1;
983 tp->rx_opt.sack_ok |= 4;
984 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
985 }
986
987 /* D-SACK for already forgotten data...
988 * Do dumb counting. */
989 if (dup_sack &&
990 !after(end_seq, prior_snd_una) &&
991 after(end_seq, tp->undo_marker))
992 tp->undo_retrans--;
993
994 /* Eliminate too old ACKs, but take into
995 * account more or less fresh ones, they can
996 * contain valid SACK info.
997 */
998 if (before(ack, prior_snd_una - tp->max_window))
999 return 0;
1000 }
1001 } 1005 }
1002 1006
1007 first_sack_index = 0;
1003 if (flag) 1008 if (flag)
1004 num_sacks = 1; 1009 num_sacks = 1;
1005 else { 1010 else {
@@ -1016,6 +1021,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1016 tmp = sp[j]; 1021 tmp = sp[j];
1017 sp[j] = sp[j+1]; 1022 sp[j] = sp[j+1];
1018 sp[j+1] = tmp; 1023 sp[j+1] = tmp;
1024
1025 /* Track where the first SACK block goes to */
1026 if (j == first_sack_index)
1027 first_sack_index = j+1;
1019 } 1028 }
1020 1029
1021 } 1030 }
@@ -1025,20 +1034,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1025 /* clear flag as used for different purpose in following code */ 1034 /* clear flag as used for different purpose in following code */
1026 flag = 0; 1035 flag = 0;
1027 1036
1037 /* Use SACK fastpath hint if valid */
1038 cached_skb = tp->fastpath_skb_hint;
1039 cached_fack_count = tp->fastpath_cnt_hint;
1040 if (!cached_skb) {
1041 cached_skb = sk->sk_write_queue.next;
1042 cached_fack_count = 0;
1043 }
1044
1028 for (i=0; i<num_sacks; i++, sp++) { 1045 for (i=0; i<num_sacks; i++, sp++) {
1029 struct sk_buff *skb; 1046 struct sk_buff *skb;
1030 __u32 start_seq = ntohl(sp->start_seq); 1047 __u32 start_seq = ntohl(sp->start_seq);
1031 __u32 end_seq = ntohl(sp->end_seq); 1048 __u32 end_seq = ntohl(sp->end_seq);
1032 int fack_count; 1049 int fack_count;
1033 1050
1034 /* Use SACK fastpath hint if valid */ 1051 skb = cached_skb;
1035 if (tp->fastpath_skb_hint) { 1052 fack_count = cached_fack_count;
1036 skb = tp->fastpath_skb_hint;
1037 fack_count = tp->fastpath_cnt_hint;
1038 } else {
1039 skb = sk->sk_write_queue.next;
1040 fack_count = 0;
1041 }
1042 1053
1043 /* Event "B" in the comment above. */ 1054 /* Event "B" in the comment above. */
1044 if (after(end_seq, tp->high_seq)) 1055 if (after(end_seq, tp->high_seq))
@@ -1048,8 +1059,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1048 int in_sack, pcount; 1059 int in_sack, pcount;
1049 u8 sacked; 1060 u8 sacked;
1050 1061
1051 tp->fastpath_skb_hint = skb; 1062 cached_skb = skb;
1052 tp->fastpath_cnt_hint = fack_count; 1063 cached_fack_count = fack_count;
1064 if (i == first_sack_index) {
1065 tp->fastpath_skb_hint = skb;
1066 tp->fastpath_cnt_hint = fack_count;
1067 }
1053 1068
1054 /* The retransmission queue is always in order, so 1069 /* The retransmission queue is always in order, so
1055 * we can short-circuit the walk early. 1070 * we can short-circuit the walk early.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 12de90a5047c..f51d6404c61c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -191,7 +191,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
191 tmp = ip_route_connect(&rt, nexthop, inet->saddr, 191 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
192 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 192 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
193 IPPROTO_TCP, 193 IPPROTO_TCP,
194 inet->sport, usin->sin_port, sk); 194 inet->sport, usin->sin_port, sk, 1);
195 if (tmp < 0) 195 if (tmp < 0)
196 return tmp; 196 return tmp;
197 197
@@ -502,11 +502,11 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
502 struct tcphdr *th = skb->h.th; 502 struct tcphdr *th = skb->h.th;
503 503
504 if (skb->ip_summed == CHECKSUM_PARTIAL) { 504 if (skb->ip_summed == CHECKSUM_PARTIAL) {
505 th->check = ~tcp_v4_check(th, len, 505 th->check = ~tcp_v4_check(len, inet->saddr,
506 inet->saddr, inet->daddr, 0); 506 inet->daddr, 0);
507 skb->csum_offset = offsetof(struct tcphdr, check); 507 skb->csum_offset = offsetof(struct tcphdr, check);
508 } else { 508 } else {
509 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, 509 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
510 csum_partial((char *)th, 510 csum_partial((char *)th,
511 th->doff << 2, 511 th->doff << 2,
512 skb->csum)); 512 skb->csum));
@@ -525,7 +525,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
525 th = skb->h.th; 525 th = skb->h.th;
526 526
527 th->check = 0; 527 th->check = 0;
528 th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); 528 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
529 skb->csum_offset = offsetof(struct tcphdr, check); 529 skb->csum_offset = offsetof(struct tcphdr, check);
530 skb->ip_summed = CHECKSUM_PARTIAL; 530 skb->ip_summed = CHECKSUM_PARTIAL;
531 return 0; 531 return 0;
@@ -747,7 +747,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
747 if (skb) { 747 if (skb) {
748 struct tcphdr *th = skb->h.th; 748 struct tcphdr *th = skb->h.th;
749 749
750 th->check = tcp_v4_check(th, skb->len, 750 th->check = tcp_v4_check(skb->len,
751 ireq->loc_addr, 751 ireq->loc_addr,
752 ireq->rmt_addr, 752 ireq->rmt_addr,
753 csum_partial((char *)th, skb->len, 753 csum_partial((char *)th, skb->len,
@@ -1514,7 +1514,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1514static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) 1514static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1515{ 1515{
1516 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1516 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1517 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr, 1517 if (!tcp_v4_check(skb->len, skb->nh.iph->saddr,
1518 skb->nh.iph->daddr, skb->csum)) { 1518 skb->nh.iph->daddr, skb->csum)) {
1519 skb->ip_summed = CHECKSUM_UNNECESSARY; 1519 skb->ip_summed = CHECKSUM_UNNECESSARY;
1520 return 0; 1520 return 0;
@@ -2051,7 +2051,7 @@ static void *established_get_first(struct seq_file *seq)
2051 } 2051 }
2052 st->state = TCP_SEQ_STATE_TIME_WAIT; 2052 st->state = TCP_SEQ_STATE_TIME_WAIT;
2053 inet_twsk_for_each(tw, node, 2053 inet_twsk_for_each(tw, node,
2054 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) { 2054 &tcp_hashinfo.ehash[st->bucket].twchain) {
2055 if (tw->tw_family != st->family) { 2055 if (tw->tw_family != st->family) {
2056 continue; 2056 continue;
2057 } 2057 }
@@ -2107,7 +2107,7 @@ get_tw:
2107 } 2107 }
2108 2108
2109 st->state = TCP_SEQ_STATE_TIME_WAIT; 2109 st->state = TCP_SEQ_STATE_TIME_WAIT;
2110 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain); 2110 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2111 goto get_tw; 2111 goto get_tw;
2112found: 2112found:
2113 cur = sk; 2113 cur = sk;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 975f4472af29..58b7111523f4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -965,7 +965,8 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
965 u32 in_flight, cwnd; 965 u32 in_flight, cwnd;
966 966
967 /* Don't be strict about the congestion window for the final FIN. */ 967 /* Don't be strict about the congestion window for the final FIN. */
968 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 968 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
969 tcp_skb_pcount(skb) == 1)
969 return 1; 970 return 1;
970 971
971 in_flight = tcp_packets_in_flight(tp); 972 in_flight = tcp_packets_in_flight(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cfff930f2baf..8b54c68a0d12 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -629,7 +629,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
629 { .sport = inet->sport, 629 { .sport = inet->sport,
630 .dport = dport } } }; 630 .dport = dport } } };
631 security_sk_classify_flow(sk, &fl); 631 security_sk_classify_flow(sk, &fl);
632 err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT)); 632 err = ip_route_output_flow(&rt, &fl, sk, 1);
633 if (err) 633 if (err)
634 goto out; 634 goto out;
635 635
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index e23c21d31a53..e54c5494c88f 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -23,6 +23,12 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
23 IP_ECN_set_ce(inner_iph); 23 IP_ECN_set_ce(inner_iph);
24} 24}
25 25
26static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
27{
28 if (INET_ECN_is_ce(iph->tos))
29 IP6_ECN_set_ce(skb->nh.ipv6h);
30}
31
26/* Add encapsulation header. 32/* Add encapsulation header.
27 * 33 *
28 * The top IP header will be constructed per RFC 2401. The following fields 34 * The top IP header will be constructed per RFC 2401. The following fields
@@ -36,6 +42,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
36static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) 42static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
37{ 43{
38 struct dst_entry *dst = skb->dst; 44 struct dst_entry *dst = skb->dst;
45 struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
39 struct iphdr *iph, *top_iph; 46 struct iphdr *iph, *top_iph;
40 int flags; 47 int flags;
41 48
@@ -48,15 +55,27 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
48 top_iph->ihl = 5; 55 top_iph->ihl = 5;
49 top_iph->version = 4; 56 top_iph->version = 4;
50 57
58 flags = x->props.flags;
59
51 /* DS disclosed */ 60 /* DS disclosed */
52 top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos); 61 if (xdst->route->ops->family == AF_INET) {
62 top_iph->protocol = IPPROTO_IPIP;
63 top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos);
64 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
65 0 : (iph->frag_off & htons(IP_DF));
66 }
67#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
68 else {
69 struct ipv6hdr *ipv6h = (struct ipv6hdr*)iph;
70 top_iph->protocol = IPPROTO_IPV6;
71 top_iph->tos = INET_ECN_encapsulate(iph->tos, ipv6_get_dsfield(ipv6h));
72 top_iph->frag_off = 0;
73 }
74#endif
53 75
54 flags = x->props.flags;
55 if (flags & XFRM_STATE_NOECN) 76 if (flags & XFRM_STATE_NOECN)
56 IP_ECN_clear(top_iph); 77 IP_ECN_clear(top_iph);
57 78
58 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
59 0 : (iph->frag_off & htons(IP_DF));
60 if (!top_iph->frag_off) 79 if (!top_iph->frag_off)
61 __ip_select_ident(top_iph, dst->child, 0); 80 __ip_select_ident(top_iph, dst->child, 0);
62 81
@@ -64,7 +83,6 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
64 83
65 top_iph->saddr = x->props.saddr.a4; 84 top_iph->saddr = x->props.saddr.a4;
66 top_iph->daddr = x->id.daddr.a4; 85 top_iph->daddr = x->id.daddr.a4;
67 top_iph->protocol = IPPROTO_IPIP;
68 86
69 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 87 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
70 return 0; 88 return 0;
@@ -75,8 +93,16 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
75 struct iphdr *iph = skb->nh.iph; 93 struct iphdr *iph = skb->nh.iph;
76 int err = -EINVAL; 94 int err = -EINVAL;
77 95
78 if (iph->protocol != IPPROTO_IPIP) 96 switch(iph->protocol){
79 goto out; 97 case IPPROTO_IPIP:
98#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
99 case IPPROTO_IPV6:
100 break;
101#endif
102 default:
103 goto out;
104 }
105
80 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 106 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
81 goto out; 107 goto out;
82 108
@@ -84,10 +110,19 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
84 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 110 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
85 goto out; 111 goto out;
86 112
87 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 113 if (iph->protocol == IPPROTO_IPIP) {
88 ipv4_copy_dscp(iph, skb->h.ipiph); 114 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
89 if (!(x->props.flags & XFRM_STATE_NOECN)) 115 ipv4_copy_dscp(iph, skb->h.ipiph);
90 ipip_ecn_decapsulate(skb); 116 if (!(x->props.flags & XFRM_STATE_NOECN))
117 ipip_ecn_decapsulate(skb);
118 }
119#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
120 else {
121 if (!(x->props.flags & XFRM_STATE_NOECN))
122 ipip6_ecn_decapsulate(iph, skb);
123 skb->protocol = htons(ETH_P_IPV6);
124 }
125#endif
91 skb->mac.raw = memmove(skb->data - skb->mac_len, 126 skb->mac.raw = memmove(skb->data - skb->mac_len,
92 skb->mac.raw, skb->mac_len); 127 skb->mac.raw, skb->mac_len);
93 skb->nh.raw = skb->data; 128 skb->nh.raw = skb->data;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index fb9f69c616f5..699f27ce62ad 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -72,13 +72,11 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
72 struct dst_entry *dst, *dst_prev; 72 struct dst_entry *dst, *dst_prev;
73 struct rtable *rt0 = (struct rtable*)(*dst_p); 73 struct rtable *rt0 = (struct rtable*)(*dst_p);
74 struct rtable *rt = rt0; 74 struct rtable *rt = rt0;
75 __be32 remote = fl->fl4_dst;
76 __be32 local = fl->fl4_src;
77 struct flowi fl_tunnel = { 75 struct flowi fl_tunnel = {
78 .nl_u = { 76 .nl_u = {
79 .ip4_u = { 77 .ip4_u = {
80 .saddr = local, 78 .saddr = fl->fl4_src,
81 .daddr = remote, 79 .daddr = fl->fl4_dst,
82 .tos = fl->fl4_tos 80 .tos = fl->fl4_tos
83 } 81 }
84 } 82 }
@@ -94,7 +92,6 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
94 for (i = 0; i < nx; i++) { 92 for (i = 0; i < nx; i++) {
95 struct dst_entry *dst1 = dst_alloc(&xfrm4_dst_ops); 93 struct dst_entry *dst1 = dst_alloc(&xfrm4_dst_ops);
96 struct xfrm_dst *xdst; 94 struct xfrm_dst *xdst;
97 int tunnel = 0;
98 95
99 if (unlikely(dst1 == NULL)) { 96 if (unlikely(dst1 == NULL)) {
100 err = -ENOBUFS; 97 err = -ENOBUFS;
@@ -116,19 +113,28 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
116 113
117 dst1->next = dst_prev; 114 dst1->next = dst_prev;
118 dst_prev = dst1; 115 dst_prev = dst1;
119 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 116
120 remote = xfrm[i]->id.daddr.a4;
121 local = xfrm[i]->props.saddr.a4;
122 tunnel = 1;
123 }
124 header_len += xfrm[i]->props.header_len; 117 header_len += xfrm[i]->props.header_len;
125 trailer_len += xfrm[i]->props.trailer_len; 118 trailer_len += xfrm[i]->props.trailer_len;
126 119
127 if (tunnel) { 120 if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
128 fl_tunnel.fl4_src = local; 121 unsigned short encap_family = xfrm[i]->props.family;
129 fl_tunnel.fl4_dst = remote; 122 switch(encap_family) {
123 case AF_INET:
124 fl_tunnel.fl4_dst = xfrm[i]->id.daddr.a4;
125 fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
126 break;
127#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
128 case AF_INET6:
129 ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6);
130 ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6);
131 break;
132#endif
133 default:
134 BUG_ON(1);
135 }
130 err = xfrm_dst_lookup((struct xfrm_dst **)&rt, 136 err = xfrm_dst_lookup((struct xfrm_dst **)&rt,
131 &fl_tunnel, AF_INET); 137 &fl_tunnel, encap_family);
132 if (err) 138 if (err)
133 goto error; 139 goto error;
134 } else 140 } else
@@ -145,6 +151,7 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
145 i = 0; 151 i = 0;
146 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) { 152 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
147 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev; 153 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
154 struct xfrm_state_afinfo *afinfo;
148 x->u.rt.fl = *fl; 155 x->u.rt.fl = *fl;
149 156
150 dst_prev->xfrm = xfrm[i++]; 157 dst_prev->xfrm = xfrm[i++];
@@ -162,8 +169,18 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
162 /* Copy neighbout for reachability confirmation */ 169 /* Copy neighbout for reachability confirmation */
163 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour); 170 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
164 dst_prev->input = rt->u.dst.input; 171 dst_prev->input = rt->u.dst.input;
165 dst_prev->output = xfrm4_output; 172 /* XXX: When IPv6 module can be unloaded, we should manage reference
166 if (rt->peer) 173 * to xfrm6_output in afinfo->output. Miyazawa
174 * */
175 afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family);
176 if (!afinfo) {
177 dst = *dst_p;
178 err = -EAFNOSUPPORT;
179 goto error;
180 }
181 dst_prev->output = afinfo->output;
182 xfrm_state_put_afinfo(afinfo);
183 if (dst_prev->xfrm->props.family == AF_INET && rt->peer)
167 atomic_inc(&rt->peer->refcnt); 184 atomic_inc(&rt->peer->refcnt);
168 x->u.rt.peer = rt->peer; 185 x->u.rt.peer = rt->peer;
169 /* Sheit... I remember I did this right. Apparently, 186 /* Sheit... I remember I did this right. Apparently,
@@ -274,7 +291,7 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
274 291
275 if (likely(xdst->u.rt.idev)) 292 if (likely(xdst->u.rt.idev))
276 in_dev_put(xdst->u.rt.idev); 293 in_dev_put(xdst->u.rt.idev);
277 if (likely(xdst->u.rt.peer)) 294 if (dst->xfrm->props.family == AF_INET && likely(xdst->u.rt.peer))
278 inet_putpeer(xdst->u.rt.peer); 295 inet_putpeer(xdst->u.rt.peer);
279 xfrm_dst_destroy(xdst); 296 xfrm_dst_destroy(xdst);
280} 297}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 3cc3df0c6ece..93e2c061cdda 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -51,6 +51,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
51 .family = AF_INET, 51 .family = AF_INET,
52 .init_flags = xfrm4_init_flags, 52 .init_flags = xfrm4_init_flags,
53 .init_tempsel = __xfrm4_init_tempsel, 53 .init_tempsel = __xfrm4_init_tempsel,
54 .output = xfrm4_output,
54}; 55};
55 56
56void __init xfrm4_state_init(void) 57void __init xfrm4_state_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e3854696988d..fe5e1d833871 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3117,7 +3117,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3117 3117
3118 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3118 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
3119 if (nlh == NULL) 3119 if (nlh == NULL)
3120 return -ENOBUFS; 3120 return -EMSGSIZE;
3121 3121
3122 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), 3122 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
3123 ifa->idev->dev->ifindex); 3123 ifa->idev->dev->ifindex);
@@ -3137,8 +3137,10 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3137 } 3137 }
3138 3138
3139 if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 || 3139 if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 ||
3140 put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) 3140 put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) {
3141 return nlmsg_cancel(skb, nlh); 3141 nlmsg_cancel(skb, nlh);
3142 return -EMSGSIZE;
3143 }
3142 3144
3143 return nlmsg_end(skb, nlh); 3145 return nlmsg_end(skb, nlh);
3144} 3146}
@@ -3155,13 +3157,15 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
3155 3157
3156 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3158 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
3157 if (nlh == NULL) 3159 if (nlh == NULL)
3158 return -ENOBUFS; 3160 return -EMSGSIZE;
3159 3161
3160 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); 3162 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
3161 if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 || 3163 if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 ||
3162 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp, 3164 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
3163 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) 3165 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
3164 return nlmsg_cancel(skb, nlh); 3166 nlmsg_cancel(skb, nlh);
3167 return -EMSGSIZE;
3168 }
3165 3169
3166 return nlmsg_end(skb, nlh); 3170 return nlmsg_end(skb, nlh);
3167} 3171}
@@ -3178,13 +3182,15 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
3178 3182
3179 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3183 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
3180 if (nlh == NULL) 3184 if (nlh == NULL)
3181 return -ENOBUFS; 3185 return -EMSGSIZE;
3182 3186
3183 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); 3187 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
3184 if (nla_put(skb, IFA_ANYCAST, 16, &ifaca->aca_addr) < 0 || 3188 if (nla_put(skb, IFA_ANYCAST, 16, &ifaca->aca_addr) < 0 ||
3185 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp, 3189 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
3186 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) 3190 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
3187 return nlmsg_cancel(skb, nlh); 3191 nlmsg_cancel(skb, nlh);
3192 return -EMSGSIZE;
3193 }
3188 3194
3189 return nlmsg_end(skb, nlh); 3195 return nlmsg_end(skb, nlh);
3190} 3196}
@@ -3334,9 +3340,12 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3334 3340
3335 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, 3341 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
3336 nlh->nlmsg_seq, RTM_NEWADDR, 0); 3342 nlh->nlmsg_seq, RTM_NEWADDR, 0);
3337 /* failure implies BUG in inet6_ifaddr_msgsize() */ 3343 if (err < 0) {
3338 BUG_ON(err < 0); 3344 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
3339 3345 WARN_ON(err == -EMSGSIZE);
3346 kfree_skb(skb);
3347 goto errout_ifa;
3348 }
3340 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid); 3349 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
3341errout_ifa: 3350errout_ifa:
3342 in6_ifa_put(ifa); 3351 in6_ifa_put(ifa);
@@ -3354,9 +3363,12 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
3354 goto errout; 3363 goto errout;
3355 3364
3356 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0); 3365 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
3357 /* failure implies BUG in inet6_ifaddr_msgsize() */ 3366 if (err < 0) {
3358 BUG_ON(err < 0); 3367 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
3359 3368 WARN_ON(err == -EMSGSIZE);
3369 kfree_skb(skb);
3370 goto errout;
3371 }
3360 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 3372 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
3361errout: 3373errout:
3362 if (err < 0) 3374 if (err < 0)
@@ -3426,7 +3438,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3426 3438
3427 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 3439 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
3428 if (nlh == NULL) 3440 if (nlh == NULL)
3429 return -ENOBUFS; 3441 return -EMSGSIZE;
3430 3442
3431 hdr = nlmsg_data(nlh); 3443 hdr = nlmsg_data(nlh);
3432 hdr->ifi_family = AF_INET6; 3444 hdr->ifi_family = AF_INET6;
@@ -3469,7 +3481,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3469 return nlmsg_end(skb, nlh); 3481 return nlmsg_end(skb, nlh);
3470 3482
3471nla_put_failure: 3483nla_put_failure:
3472 return nlmsg_cancel(skb, nlh); 3484 nlmsg_cancel(skb, nlh);
3485 return -EMSGSIZE;
3473} 3486}
3474 3487
3475static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 3488static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
@@ -3507,9 +3520,12 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
3507 goto errout; 3520 goto errout;
3508 3521
3509 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0); 3522 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
3510 /* failure implies BUG in inet6_if_nlmsg_size() */ 3523 if (err < 0) {
3511 BUG_ON(err < 0); 3524 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
3512 3525 WARN_ON(err == -EMSGSIZE);
3526 kfree_skb(skb);
3527 goto errout;
3528 }
3513 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 3529 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
3514errout: 3530errout:
3515 if (err < 0) 3531 if (err < 0)
@@ -3533,7 +3549,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
3533 3549
3534 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags); 3550 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags);
3535 if (nlh == NULL) 3551 if (nlh == NULL)
3536 return -ENOBUFS; 3552 return -EMSGSIZE;
3537 3553
3538 pmsg = nlmsg_data(nlh); 3554 pmsg = nlmsg_data(nlh);
3539 pmsg->prefix_family = AF_INET6; 3555 pmsg->prefix_family = AF_INET6;
@@ -3558,7 +3574,8 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
3558 return nlmsg_end(skb, nlh); 3574 return nlmsg_end(skb, nlh);
3559 3575
3560nla_put_failure: 3576nla_put_failure:
3561 return nlmsg_cancel(skb, nlh); 3577 nlmsg_cancel(skb, nlh);
3578 return -EMSGSIZE;
3562} 3579}
3563 3580
3564static void inet6_prefix_notify(int event, struct inet6_dev *idev, 3581static void inet6_prefix_notify(int event, struct inet6_dev *idev,
@@ -3572,9 +3589,12 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
3572 goto errout; 3589 goto errout;
3573 3590
3574 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0); 3591 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
3575 /* failure implies BUG in inet6_prefix_nlmsg_size() */ 3592 if (err < 0) {
3576 BUG_ON(err < 0); 3593 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
3577 3594 WARN_ON(err == -EMSGSIZE);
3595 kfree_skb(skb);
3596 goto errout;
3597 }
3578 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); 3598 err = rtnl_notify(skb, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
3579errout: 3599errout:
3580 if (err < 0) 3600 if (err < 0)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 5c94fea90e97..ecde30140f4a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -178,7 +178,7 @@ ipv4_connected:
178 if (final_p) 178 if (final_p)
179 ipv6_addr_copy(&fl.fl6_dst, final_p); 179 ipv6_addr_copy(&fl.fl6_dst, final_p);
180 180
181 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 181 if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
182 goto out; 182 goto out;
183 183
184 /* source address lookup done in ip6_dst_lookup */ 184 /* source address lookup done in ip6_dst_lookup */
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b7e5bae0e347..e61116949bee 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -79,7 +79,7 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
79 goto hit; /* You sunk my battleship! */ 79 goto hit; /* You sunk my battleship! */
80 } 80 }
81 /* Must check for a TIME_WAIT'er before going to listener hash. */ 81 /* Must check for a TIME_WAIT'er before going to listener hash. */
82 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { 82 sk_for_each(sk, node, &head->twchain) {
83 const struct inet_timewait_sock *tw = inet_twsk(sk); 83 const struct inet_timewait_sock *tw = inet_twsk(sk);
84 84
85 if(*((__portpair *)&(tw->tw_dport)) == ports && 85 if(*((__portpair *)&(tw->tw_dport)) == ports &&
@@ -183,7 +183,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
183 write_lock(&head->lock); 183 write_lock(&head->lock);
184 184
185 /* Check TIME-WAIT sockets first. */ 185 /* Check TIME-WAIT sockets first. */
186 sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) { 186 sk_for_each(sk2, node, &head->twchain) {
187 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2); 187 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2);
188 188
189 tw = inet_twsk(sk2); 189 tw = inet_twsk(sk2);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8d918348f5bb..2b9e3bb7da65 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -999,7 +999,8 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
999 break; 999 break;
1000 dev = t->dev; 1000 dev = t->dev;
1001 } 1001 }
1002 err = unregister_netdevice(dev); 1002 err = 0;
1003 unregister_netdevice(dev);
1003 break; 1004 break;
1004 default: 1005 default:
1005 err = -EINVAL; 1006 err = -EINVAL;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 882cde4b4047..e3ec21695832 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1582,6 +1582,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1582 skb = add_grhead(skb, pmc, type, &pgr); 1582 skb = add_grhead(skb, pmc, type, &pgr);
1583 first = 0; 1583 first = 0;
1584 } 1584 }
1585 if (!skb)
1586 return NULL;
1585 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc)); 1587 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1586 *psrc = psf->sf_addr; 1588 *psrc = psf->sf_addr;
1587 scount++; stotal++; 1589 scount++; stotal++;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index be7dd7db65d7..681bb077eacc 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -89,7 +89,6 @@ static int mip6_mh_len(int type)
89int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) 89int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
90{ 90{
91 struct ip6_mh *mh; 91 struct ip6_mh *mh;
92 int mhlen;
93 92
94 if (!pskb_may_pull(skb, (skb->h.raw - skb->data) + 8) || 93 if (!pskb_may_pull(skb, (skb->h.raw - skb->data) + 8) ||
95 !pskb_may_pull(skb, (skb->h.raw - skb->data) + ((skb->h.raw[1] + 1) << 3))) 94 !pskb_may_pull(skb, (skb->h.raw - skb->data) + ((skb->h.raw[1] + 1) << 3)))
@@ -103,31 +102,6 @@ int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
103 mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw); 102 mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw);
104 return -1; 103 return -1;
105 } 104 }
106 mhlen = (mh->ip6mh_hdrlen + 1) << 3;
107
108 if (skb->ip_summed == CHECKSUM_COMPLETE) {
109 skb->ip_summed = CHECKSUM_UNNECESSARY;
110 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
111 &skb->nh.ipv6h->daddr,
112 mhlen, IPPROTO_MH,
113 skb->csum)) {
114 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH hw checksum failed\n");
115 skb->ip_summed = CHECKSUM_NONE;
116 }
117 }
118 if (skb->ip_summed == CHECKSUM_NONE) {
119 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
120 &skb->nh.ipv6h->daddr,
121 mhlen, IPPROTO_MH,
122 skb_checksum(skb, 0, mhlen, 0))) {
123 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH checksum failed "
124 "[" NIP6_FMT " > " NIP6_FMT "]\n",
125 NIP6(skb->nh.ipv6h->saddr),
126 NIP6(skb->nh.ipv6h->daddr));
127 return -1;
128 }
129 skb->ip_summed = CHECKSUM_UNNECESSARY;
130 }
131 105
132 if (mh->ip6mh_proto != IPPROTO_NONE) { 106 if (mh->ip6mh_proto != IPPROTO_NONE) {
133 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", 107 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index adcd6131df2a..cd549aea84f0 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -114,6 +114,14 @@ config IP6_NF_MATCH_AH
114 114
115 To compile it as a module, choose M here. If unsure, say N. 115 To compile it as a module, choose M here. If unsure, say N.
116 116
117config IP6_NF_MATCH_MH
118 tristate "MH match support"
119 depends on IP6_NF_IPTABLES
120 help
121 This module allows one to match MH packets.
122
123 To compile it as a module, choose M here. If unsure, say N.
124
117config IP6_NF_MATCH_EUI64 125config IP6_NF_MATCH_EUI64
118 tristate "EUI64 address check" 126 tristate "EUI64 address check"
119 depends on IP6_NF_IPTABLES 127 depends on IP6_NF_IPTABLES
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index ac1dfebde175..4513eab77397 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
19obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 19obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
20obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o 20obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
21obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o 21obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
22obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
22 23
23# objects for l3 independent conntrack 24# objects for l3 independent conntrack
24nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o 25nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 99502c5da4c4..7083e1cfb2f5 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -530,7 +530,7 @@ check_match(struct ip6t_entry_match *m,
530 unsigned int hookmask, 530 unsigned int hookmask,
531 unsigned int *i) 531 unsigned int *i)
532{ 532{
533 struct ip6t_match *match; 533 struct xt_match *match;
534 int ret; 534 int ret;
535 535
536 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name, 536 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
@@ -564,14 +564,14 @@ err:
564 return ret; 564 return ret;
565} 565}
566 566
567static struct ip6t_target ip6t_standard_target; 567static struct xt_target ip6t_standard_target;
568 568
569static inline int 569static inline int
570check_entry(struct ip6t_entry *e, const char *name, unsigned int size, 570check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
571 unsigned int *i) 571 unsigned int *i)
572{ 572{
573 struct ip6t_entry_target *t; 573 struct ip6t_entry_target *t;
574 struct ip6t_target *target; 574 struct xt_target *target;
575 int ret; 575 int ret;
576 unsigned int j; 576 unsigned int j;
577 577
@@ -1348,13 +1348,13 @@ icmp6_checkentry(const char *tablename,
1348} 1348}
1349 1349
1350/* The built-in targets: standard (NULL) and error. */ 1350/* The built-in targets: standard (NULL) and error. */
1351static struct ip6t_target ip6t_standard_target = { 1351static struct xt_target ip6t_standard_target = {
1352 .name = IP6T_STANDARD_TARGET, 1352 .name = IP6T_STANDARD_TARGET,
1353 .targetsize = sizeof(int), 1353 .targetsize = sizeof(int),
1354 .family = AF_INET6, 1354 .family = AF_INET6,
1355}; 1355};
1356 1356
1357static struct ip6t_target ip6t_error_target = { 1357static struct xt_target ip6t_error_target = {
1358 .name = IP6T_ERROR_TARGET, 1358 .name = IP6T_ERROR_TARGET,
1359 .target = ip6t_error, 1359 .target = ip6t_error,
1360 .targetsize = IP6T_FUNCTION_MAXNAMELEN, 1360 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
@@ -1371,7 +1371,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
1371 .get = do_ip6t_get_ctl, 1371 .get = do_ip6t_get_ctl,
1372}; 1372};
1373 1373
1374static struct ip6t_match icmp6_matchstruct = { 1374static struct xt_match icmp6_matchstruct = {
1375 .name = "icmp6", 1375 .name = "icmp6",
1376 .match = &icmp6_match, 1376 .match = &icmp6_match,
1377 .matchsize = sizeof(struct ip6t_icmp), 1377 .matchsize = sizeof(struct ip6t_icmp),
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c
index 435750f664dd..04e500172fb4 100644
--- a/net/ipv6/netfilter/ip6t_HL.c
+++ b/net/ipv6/netfilter/ip6t_HL.c
@@ -9,12 +9,13 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/ip.h> 11#include <linux/ip.h>
12#include <linux/ipv6.h>
12 13
13#include <linux/netfilter_ipv6/ip6_tables.h> 14#include <linux/netfilter/x_tables.h>
14#include <linux/netfilter_ipv6/ip6t_HL.h> 15#include <linux/netfilter_ipv6/ip6t_HL.h>
15 16
16MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); 17MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
17MODULE_DESCRIPTION("IP tables Hop Limit modification module"); 18MODULE_DESCRIPTION("IP6 tables Hop Limit modification module");
18MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
19 20
20static unsigned int ip6t_hl_target(struct sk_buff **pskb, 21static unsigned int ip6t_hl_target(struct sk_buff **pskb,
@@ -52,10 +53,9 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb,
52 break; 53 break;
53 } 54 }
54 55
55 if (new_hl != ip6h->hop_limit) 56 ip6h->hop_limit = new_hl;
56 ip6h->hop_limit = new_hl;
57 57
58 return IP6T_CONTINUE; 58 return XT_CONTINUE;
59} 59}
60 60
61static int ip6t_hl_checkentry(const char *tablename, 61static int ip6t_hl_checkentry(const char *tablename,
@@ -79,8 +79,9 @@ static int ip6t_hl_checkentry(const char *tablename,
79 return 1; 79 return 1;
80} 80}
81 81
82static struct ip6t_target ip6t_HL = { 82static struct xt_target ip6t_HL = {
83 .name = "HL", 83 .name = "HL",
84 .family = AF_INET6,
84 .target = ip6t_hl_target, 85 .target = ip6t_hl_target,
85 .targetsize = sizeof(struct ip6t_HL_info), 86 .targetsize = sizeof(struct ip6t_HL_info),
86 .table = "mangle", 87 .table = "mangle",
@@ -90,12 +91,12 @@ static struct ip6t_target ip6t_HL = {
90 91
91static int __init ip6t_hl_init(void) 92static int __init ip6t_hl_init(void)
92{ 93{
93 return ip6t_register_target(&ip6t_HL); 94 return xt_register_target(&ip6t_HL);
94} 95}
95 96
96static void __exit ip6t_hl_fini(void) 97static void __exit ip6t_hl_fini(void)
97{ 98{
98 ip6t_unregister_target(&ip6t_HL); 99 xt_unregister_target(&ip6t_HL);
99} 100}
100 101
101module_init(ip6t_hl_init); 102module_init(ip6t_hl_init);
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 33b1faa90d74..5587a77b884c 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -21,6 +21,7 @@
21#include <net/tcp.h> 21#include <net/tcp.h>
22#include <net/ipv6.h> 22#include <net/ipv6.h>
23#include <linux/netfilter.h> 23#include <linux/netfilter.h>
24#include <linux/netfilter/x_tables.h>
24#include <linux/netfilter_ipv6/ip6_tables.h> 25#include <linux/netfilter_ipv6/ip6_tables.h>
25 26
26MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>"); 27MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
@@ -442,7 +443,7 @@ ip6t_log_target(struct sk_buff **pskb,
442 443
443 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
444 loginfo->prefix); 445 loginfo->prefix);
445 return IP6T_CONTINUE; 446 return XT_CONTINUE;
446} 447}
447 448
448 449
@@ -466,8 +467,9 @@ static int ip6t_log_checkentry(const char *tablename,
466 return 1; 467 return 1;
467} 468}
468 469
469static struct ip6t_target ip6t_log_reg = { 470static struct xt_target ip6t_log_reg = {
470 .name = "LOG", 471 .name = "LOG",
472 .family = AF_INET6,
471 .target = ip6t_log_target, 473 .target = ip6t_log_target,
472 .targetsize = sizeof(struct ip6t_log_info), 474 .targetsize = sizeof(struct ip6t_log_info),
473 .checkentry = ip6t_log_checkentry, 475 .checkentry = ip6t_log_checkentry,
@@ -482,8 +484,11 @@ static struct nf_logger ip6t_logger = {
482 484
483static int __init ip6t_log_init(void) 485static int __init ip6t_log_init(void)
484{ 486{
485 if (ip6t_register_target(&ip6t_log_reg)) 487 int ret;
486 return -EINVAL; 488
489 ret = xt_register_target(&ip6t_log_reg);
490 if (ret < 0)
491 return ret;
487 if (nf_log_register(PF_INET6, &ip6t_logger) < 0) { 492 if (nf_log_register(PF_INET6, &ip6t_logger) < 0) {
488 printk(KERN_WARNING "ip6t_LOG: not logging via system console " 493 printk(KERN_WARNING "ip6t_LOG: not logging via system console "
489 "since somebody else already registered for PF_INET6\n"); 494 "since somebody else already registered for PF_INET6\n");
@@ -497,7 +502,7 @@ static int __init ip6t_log_init(void)
497static void __exit ip6t_log_fini(void) 502static void __exit ip6t_log_fini(void)
498{ 503{
499 nf_log_unregister_logger(&ip6t_logger); 504 nf_log_unregister_logger(&ip6t_logger);
500 ip6t_unregister_target(&ip6t_log_reg); 505 xt_unregister_target(&ip6t_log_reg);
501} 506}
502 507
503module_init(ip6t_log_init); 508module_init(ip6t_log_init);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 311eae82feb3..278349c18793 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -26,6 +26,7 @@
26#include <net/ip6_fib.h> 26#include <net/ip6_fib.h>
27#include <net/ip6_route.h> 27#include <net/ip6_route.h>
28#include <net/flow.h> 28#include <net/flow.h>
29#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_ipv6/ip6_tables.h> 30#include <linux/netfilter_ipv6/ip6_tables.h>
30#include <linux/netfilter_ipv6/ip6t_REJECT.h> 31#include <linux/netfilter_ipv6/ip6t_REJECT.h>
31 32
@@ -234,7 +235,7 @@ static int check(const char *tablename,
234 } else if (rejinfo->with == IP6T_TCP_RESET) { 235 } else if (rejinfo->with == IP6T_TCP_RESET) {
235 /* Must specify that it's a TCP packet */ 236 /* Must specify that it's a TCP packet */
236 if (e->ipv6.proto != IPPROTO_TCP 237 if (e->ipv6.proto != IPPROTO_TCP
237 || (e->ipv6.invflags & IP6T_INV_PROTO)) { 238 || (e->ipv6.invflags & XT_INV_PROTO)) {
238 DEBUGP("ip6t_REJECT: TCP_RESET illegal for non-tcp\n"); 239 DEBUGP("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
239 return 0; 240 return 0;
240 } 241 }
@@ -242,8 +243,9 @@ static int check(const char *tablename,
242 return 1; 243 return 1;
243} 244}
244 245
245static struct ip6t_target ip6t_reject_reg = { 246static struct xt_target ip6t_reject_reg = {
246 .name = "REJECT", 247 .name = "REJECT",
248 .family = AF_INET6,
247 .target = reject6_target, 249 .target = reject6_target,
248 .targetsize = sizeof(struct ip6t_reject_info), 250 .targetsize = sizeof(struct ip6t_reject_info),
249 .table = "filter", 251 .table = "filter",
@@ -255,12 +257,12 @@ static struct ip6t_target ip6t_reject_reg = {
255 257
256static int __init ip6t_reject_init(void) 258static int __init ip6t_reject_init(void)
257{ 259{
258 return ip6t_register_target(&ip6t_reject_reg); 260 return xt_register_target(&ip6t_reject_reg);
259} 261}
260 262
261static void __exit ip6t_reject_fini(void) 263static void __exit ip6t_reject_fini(void)
262{ 264{
263 ip6t_unregister_target(&ip6t_reject_reg); 265 xt_unregister_target(&ip6t_reject_reg);
264} 266}
265 267
266module_init(ip6t_reject_init); 268module_init(ip6t_reject_init);
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 46486645eb75..456c76adcbf6 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -15,6 +15,7 @@
15#include <net/checksum.h> 15#include <net/checksum.h>
16#include <net/ipv6.h> 16#include <net/ipv6.h>
17 17
18#include <linux/netfilter/x_tables.h>
18#include <linux/netfilter_ipv6/ip6_tables.h> 19#include <linux/netfilter_ipv6/ip6_tables.h>
19#include <linux/netfilter_ipv6/ip6t_ah.h> 20#include <linux/netfilter_ipv6/ip6t_ah.h>
20 21
@@ -118,8 +119,9 @@ checkentry(const char *tablename,
118 return 1; 119 return 1;
119} 120}
120 121
121static struct ip6t_match ah_match = { 122static struct xt_match ah_match = {
122 .name = "ah", 123 .name = "ah",
124 .family = AF_INET6,
123 .match = match, 125 .match = match,
124 .matchsize = sizeof(struct ip6t_ah), 126 .matchsize = sizeof(struct ip6t_ah),
125 .checkentry = checkentry, 127 .checkentry = checkentry,
@@ -128,12 +130,12 @@ static struct ip6t_match ah_match = {
128 130
129static int __init ip6t_ah_init(void) 131static int __init ip6t_ah_init(void)
130{ 132{
131 return ip6t_register_match(&ah_match); 133 return xt_register_match(&ah_match);
132} 134}
133 135
134static void __exit ip6t_ah_fini(void) 136static void __exit ip6t_ah_fini(void)
135{ 137{
136 ip6t_unregister_match(&ah_match); 138 xt_unregister_match(&ah_match);
137} 139}
138 140
139module_init(ip6t_ah_init); 141module_init(ip6t_ah_init);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 4f6b84c8f4ab..967bed71d4a8 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -12,6 +12,7 @@
12#include <linux/ipv6.h> 12#include <linux/ipv6.h>
13#include <linux/if_ether.h> 13#include <linux/if_ether.h>
14 14
15#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter_ipv6/ip6_tables.h> 16#include <linux/netfilter_ipv6/ip6_tables.h>
16 17
17MODULE_DESCRIPTION("IPv6 EUI64 address checking match"); 18MODULE_DESCRIPTION("IPv6 EUI64 address checking match");
@@ -61,8 +62,9 @@ match(const struct sk_buff *skb,
61 return 0; 62 return 0;
62} 63}
63 64
64static struct ip6t_match eui64_match = { 65static struct xt_match eui64_match = {
65 .name = "eui64", 66 .name = "eui64",
67 .family = AF_INET6,
66 .match = match, 68 .match = match,
67 .matchsize = sizeof(int), 69 .matchsize = sizeof(int),
68 .hooks = (1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) | 70 .hooks = (1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) |
@@ -72,12 +74,12 @@ static struct ip6t_match eui64_match = {
72 74
73static int __init ip6t_eui64_init(void) 75static int __init ip6t_eui64_init(void)
74{ 76{
75 return ip6t_register_match(&eui64_match); 77 return xt_register_match(&eui64_match);
76} 78}
77 79
78static void __exit ip6t_eui64_fini(void) 80static void __exit ip6t_eui64_fini(void)
79{ 81{
80 ip6t_unregister_match(&eui64_match); 82 xt_unregister_match(&eui64_match);
81} 83}
82 84
83module_init(ip6t_eui64_init); 85module_init(ip6t_eui64_init);
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index cd22eaaccdca..5a5da71321b6 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -14,6 +14,7 @@
14#include <net/checksum.h> 14#include <net/checksum.h>
15#include <net/ipv6.h> 15#include <net/ipv6.h>
16 16
17#include <linux/netfilter/x_tables.h>
17#include <linux/netfilter_ipv6/ip6_tables.h> 18#include <linux/netfilter_ipv6/ip6_tables.h>
18#include <linux/netfilter_ipv6/ip6t_frag.h> 19#include <linux/netfilter_ipv6/ip6t_frag.h>
19 20
@@ -135,8 +136,9 @@ checkentry(const char *tablename,
135 return 1; 136 return 1;
136} 137}
137 138
138static struct ip6t_match frag_match = { 139static struct xt_match frag_match = {
139 .name = "frag", 140 .name = "frag",
141 .family = AF_INET6,
140 .match = match, 142 .match = match,
141 .matchsize = sizeof(struct ip6t_frag), 143 .matchsize = sizeof(struct ip6t_frag),
142 .checkentry = checkentry, 144 .checkentry = checkentry,
@@ -145,12 +147,12 @@ static struct ip6t_match frag_match = {
145 147
146static int __init ip6t_frag_init(void) 148static int __init ip6t_frag_init(void)
147{ 149{
148 return ip6t_register_match(&frag_match); 150 return xt_register_match(&frag_match);
149} 151}
150 152
151static void __exit ip6t_frag_fini(void) 153static void __exit ip6t_frag_fini(void)
152{ 154{
153 ip6t_unregister_match(&frag_match); 155 xt_unregister_match(&frag_match);
154} 156}
155 157
156module_init(ip6t_frag_init); 158module_init(ip6t_frag_init);
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 3f25babe0440..d2373c7cd354 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -16,6 +16,7 @@
16 16
17#include <asm/byteorder.h> 17#include <asm/byteorder.h>
18 18
19#include <linux/netfilter/x_tables.h>
19#include <linux/netfilter_ipv6/ip6_tables.h> 20#include <linux/netfilter_ipv6/ip6_tables.h>
20#include <linux/netfilter_ipv6/ip6t_opts.h> 21#include <linux/netfilter_ipv6/ip6t_opts.h>
21 22
diff --git a/net/ipv6/netfilter/ip6t_hl.c b/net/ipv6/netfilter/ip6t_hl.c
index 44a729e17c48..601cc1211c62 100644
--- a/net/ipv6/netfilter/ip6t_hl.c
+++ b/net/ipv6/netfilter/ip6t_hl.c
@@ -8,11 +8,12 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/ipv6.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/skbuff.h> 13#include <linux/skbuff.h>
13 14
14#include <linux/netfilter_ipv6/ip6t_hl.h> 15#include <linux/netfilter_ipv6/ip6t_hl.h>
15#include <linux/netfilter_ipv6/ip6_tables.h> 16#include <linux/netfilter/x_tables.h>
16 17
17MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>"); 18MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
18MODULE_DESCRIPTION("IP tables Hop Limit matching module"); 19MODULE_DESCRIPTION("IP tables Hop Limit matching module");
@@ -48,8 +49,9 @@ static int match(const struct sk_buff *skb,
48 return 0; 49 return 0;
49} 50}
50 51
51static struct ip6t_match hl_match = { 52static struct xt_match hl_match = {
52 .name = "hl", 53 .name = "hl",
54 .family = AF_INET6,
53 .match = match, 55 .match = match,
54 .matchsize = sizeof(struct ip6t_hl_info), 56 .matchsize = sizeof(struct ip6t_hl_info),
55 .me = THIS_MODULE, 57 .me = THIS_MODULE,
@@ -57,13 +59,12 @@ static struct ip6t_match hl_match = {
57 59
58static int __init ip6t_hl_init(void) 60static int __init ip6t_hl_init(void)
59{ 61{
60 return ip6t_register_match(&hl_match); 62 return xt_register_match(&hl_match);
61} 63}
62 64
63static void __exit ip6t_hl_fini(void) 65static void __exit ip6t_hl_fini(void)
64{ 66{
65 ip6t_unregister_match(&hl_match); 67 xt_unregister_match(&hl_match);
66
67} 68}
68 69
69module_init(ip6t_hl_init); 70module_init(ip6t_hl_init);
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 3093c398002f..26ac084adefc 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -18,6 +18,7 @@
18#include <net/checksum.h> 18#include <net/checksum.h>
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20 20
21#include <linux/netfilter/x_tables.h>
21#include <linux/netfilter_ipv6/ip6_tables.h> 22#include <linux/netfilter_ipv6/ip6_tables.h>
22#include <linux/netfilter_ipv6/ip6t_ipv6header.h> 23#include <linux/netfilter_ipv6/ip6t_ipv6header.h>
23 24
@@ -140,8 +141,9 @@ ipv6header_checkentry(const char *tablename,
140 return 1; 141 return 1;
141} 142}
142 143
143static struct ip6t_match ip6t_ipv6header_match = { 144static struct xt_match ip6t_ipv6header_match = {
144 .name = "ipv6header", 145 .name = "ipv6header",
146 .family = AF_INET6,
145 .match = &ipv6header_match, 147 .match = &ipv6header_match,
146 .matchsize = sizeof(struct ip6t_ipv6header_info), 148 .matchsize = sizeof(struct ip6t_ipv6header_info),
147 .checkentry = &ipv6header_checkentry, 149 .checkentry = &ipv6header_checkentry,
@@ -151,12 +153,12 @@ static struct ip6t_match ip6t_ipv6header_match = {
151 153
152static int __init ipv6header_init(void) 154static int __init ipv6header_init(void)
153{ 155{
154 return ip6t_register_match(&ip6t_ipv6header_match); 156 return xt_register_match(&ip6t_ipv6header_match);
155} 157}
156 158
157static void __exit ipv6header_exit(void) 159static void __exit ipv6header_exit(void)
158{ 160{
159 ip6t_unregister_match(&ip6t_ipv6header_match); 161 xt_unregister_match(&ip6t_ipv6header_match);
160} 162}
161 163
162module_init(ipv6header_init); 164module_init(ipv6header_init);
diff --git a/net/ipv6/netfilter/ip6t_mh.c b/net/ipv6/netfilter/ip6t_mh.c
new file mode 100644
index 000000000000..2c7efc6a506d
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_mh.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C)2006 USAGI/WIDE Project
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Author:
9 * Masahide NAKAMURA @USAGI <masahide.nakamura.cz@hitachi.com>
10 *
11 * Based on net/netfilter/xt_tcpudp.c
12 *
13 */
14#include <linux/types.h>
15#include <linux/module.h>
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <net/ipv6.h>
19#include <net/mip6.h>
20
21#include <linux/netfilter/x_tables.h>
22#include <linux/netfilter_ipv6/ip6t_mh.h>
23
24MODULE_DESCRIPTION("ip6t_tables match for MH");
25MODULE_LICENSE("GPL");
26
27#ifdef DEBUG_IP_FIREWALL_USER
28#define duprintf(format, args...) printk(format , ## args)
29#else
30#define duprintf(format, args...)
31#endif
32
33/* Returns 1 if the type is matched by the range, 0 otherwise */
34static inline int
35type_match(u_int8_t min, u_int8_t max, u_int8_t type, int invert)
36{
37 int ret;
38
39 ret = (type >= min && type <= max) ^ invert;
40 return ret;
41}
42
43static int
44match(const struct sk_buff *skb,
45 const struct net_device *in,
46 const struct net_device *out,
47 const struct xt_match *match,
48 const void *matchinfo,
49 int offset,
50 unsigned int protoff,
51 int *hotdrop)
52{
53 struct ip6_mh _mh, *mh;
54 const struct ip6t_mh *mhinfo = matchinfo;
55
56 /* Must not be a fragment. */
57 if (offset)
58 return 0;
59
60 mh = skb_header_pointer(skb, protoff, sizeof(_mh), &_mh);
61 if (mh == NULL) {
62 /* We've been asked to examine this packet, and we
63 can't. Hence, no choice but to drop. */
64 duprintf("Dropping evil MH tinygram.\n");
65 *hotdrop = 1;
66 return 0;
67 }
68
69 return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type,
70 !!(mhinfo->invflags & IP6T_MH_INV_TYPE));
71}
72
73/* Called when user tries to insert an entry of this type. */
74static int
75mh_checkentry(const char *tablename,
76 const void *entry,
77 const struct xt_match *match,
78 void *matchinfo,
79 unsigned int hook_mask)
80{
81 const struct ip6t_mh *mhinfo = matchinfo;
82
83 /* Must specify no unknown invflags */
84 return !(mhinfo->invflags & ~IP6T_MH_INV_MASK);
85}
86
87static struct xt_match mh_match = {
88 .name = "mh",
89 .family = AF_INET6,
90 .checkentry = mh_checkentry,
91 .match = match,
92 .matchsize = sizeof(struct ip6t_mh),
93 .proto = IPPROTO_MH,
94 .me = THIS_MODULE,
95};
96
97static int __init ip6t_mh_init(void)
98{
99 return xt_register_match(&mh_match);
100}
101
102static void __exit ip6t_mh_fini(void)
103{
104 xt_unregister_match(&mh_match);
105}
106
107module_init(ip6t_mh_init);
108module_exit(ip6t_mh_fini);
diff --git a/net/ipv6/netfilter/ip6t_owner.c b/net/ipv6/netfilter/ip6t_owner.c
index 4eb9bbc4ebc3..43738bba00b5 100644
--- a/net/ipv6/netfilter/ip6t_owner.c
+++ b/net/ipv6/netfilter/ip6t_owner.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/netfilter_ipv6/ip6t_owner.h> 17#include <linux/netfilter_ipv6/ip6t_owner.h>
18#include <linux/netfilter_ipv6/ip6_tables.h> 18#include <linux/netfilter_ipv6/ip6_tables.h>
19#include <linux/netfilter/x_tables.h>
19 20
20MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); 21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
21MODULE_DESCRIPTION("IP6 tables owner matching module"); 22MODULE_DESCRIPTION("IP6 tables owner matching module");
@@ -69,8 +70,9 @@ checkentry(const char *tablename,
69 return 1; 70 return 1;
70} 71}
71 72
72static struct ip6t_match owner_match = { 73static struct xt_match owner_match = {
73 .name = "owner", 74 .name = "owner",
75 .family = AF_INET6,
74 .match = match, 76 .match = match,
75 .matchsize = sizeof(struct ip6t_owner_info), 77 .matchsize = sizeof(struct ip6t_owner_info),
76 .hooks = (1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING), 78 .hooks = (1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING),
@@ -80,12 +82,12 @@ static struct ip6t_match owner_match = {
80 82
81static int __init ip6t_owner_init(void) 83static int __init ip6t_owner_init(void)
82{ 84{
83 return ip6t_register_match(&owner_match); 85 return xt_register_match(&owner_match);
84} 86}
85 87
86static void __exit ip6t_owner_fini(void) 88static void __exit ip6t_owner_fini(void)
87{ 89{
88 ip6t_unregister_match(&owner_match); 90 xt_unregister_match(&owner_match);
89} 91}
90 92
91module_init(ip6t_owner_init); 93module_init(ip6t_owner_init);
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 54d7d14134fd..81ab00d8c182 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -16,6 +16,7 @@
16 16
17#include <asm/byteorder.h> 17#include <asm/byteorder.h>
18 18
19#include <linux/netfilter/x_tables.h>
19#include <linux/netfilter_ipv6/ip6_tables.h> 20#include <linux/netfilter_ipv6/ip6_tables.h>
20#include <linux/netfilter_ipv6/ip6t_rt.h> 21#include <linux/netfilter_ipv6/ip6t_rt.h>
21 22
@@ -221,8 +222,9 @@ checkentry(const char *tablename,
221 return 1; 222 return 1;
222} 223}
223 224
224static struct ip6t_match rt_match = { 225static struct xt_match rt_match = {
225 .name = "rt", 226 .name = "rt",
227 .family = AF_INET6,
226 .match = match, 228 .match = match,
227 .matchsize = sizeof(struct ip6t_rt), 229 .matchsize = sizeof(struct ip6t_rt),
228 .checkentry = checkentry, 230 .checkentry = checkentry,
@@ -231,12 +233,12 @@ static struct ip6t_match rt_match = {
231 233
232static int __init ip6t_rt_init(void) 234static int __init ip6t_rt_init(void)
233{ 235{
234 return ip6t_register_match(&rt_match); 236 return xt_register_match(&rt_match);
235} 237}
236 238
237static void __exit ip6t_rt_fini(void) 239static void __exit ip6t_rt_fini(void)
238{ 240{
239 ip6t_unregister_match(&rt_match); 241 xt_unregister_match(&rt_match);
240} 242}
241 243
242module_init(ip6t_rt_init); 244module_init(ip6t_rt_init);
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 2fc07c74decf..112a21d0c6da 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -19,25 +19,6 @@ MODULE_DESCRIPTION("ip6tables filter table");
19 19
20#define FILTER_VALID_HOOKS ((1 << NF_IP6_LOCAL_IN) | (1 << NF_IP6_FORWARD) | (1 << NF_IP6_LOCAL_OUT)) 20#define FILTER_VALID_HOOKS ((1 << NF_IP6_LOCAL_IN) | (1 << NF_IP6_FORWARD) | (1 << NF_IP6_LOCAL_OUT))
21 21
22/* Standard entry. */
23struct ip6t_standard
24{
25 struct ip6t_entry entry;
26 struct ip6t_standard_target target;
27};
28
29struct ip6t_error_target
30{
31 struct ip6t_entry_target target;
32 char errorname[IP6T_FUNCTION_MAXNAMELEN];
33};
34
35struct ip6t_error
36{
37 struct ip6t_entry entry;
38 struct ip6t_error_target target;
39};
40
41static struct 22static struct
42{ 23{
43 struct ip6t_replace repl; 24 struct ip6t_replace repl;
@@ -92,7 +73,7 @@ static struct
92 } 73 }
93}; 74};
94 75
95static struct ip6t_table packet_filter = { 76static struct xt_table packet_filter = {
96 .name = "filter", 77 .name = "filter",
97 .valid_hooks = FILTER_VALID_HOOKS, 78 .valid_hooks = FILTER_VALID_HOOKS,
98 .lock = RW_LOCK_UNLOCKED, 79 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 6250e86a6ddc..5f5aa0e51478 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -29,25 +29,6 @@ MODULE_DESCRIPTION("ip6tables mangle table");
29#define DEBUGP(x, args...) 29#define DEBUGP(x, args...)
30#endif 30#endif
31 31
32/* Standard entry. */
33struct ip6t_standard
34{
35 struct ip6t_entry entry;
36 struct ip6t_standard_target target;
37};
38
39struct ip6t_error_target
40{
41 struct ip6t_entry_target target;
42 char errorname[IP6T_FUNCTION_MAXNAMELEN];
43};
44
45struct ip6t_error
46{
47 struct ip6t_entry entry;
48 struct ip6t_error_target target;
49};
50
51static struct 32static struct
52{ 33{
53 struct ip6t_replace repl; 34 struct ip6t_replace repl;
@@ -122,7 +103,7 @@ static struct
122 } 103 }
123}; 104};
124 105
125static struct ip6t_table packet_mangler = { 106static struct xt_table packet_mangler = {
126 .name = "mangle", 107 .name = "mangle",
127 .valid_hooks = MANGLE_VALID_HOOKS, 108 .valid_hooks = MANGLE_VALID_HOOKS,
128 .lock = RW_LOCK_UNLOCKED, 109 .lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index b4154da575c0..277bf34638b4 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -14,25 +14,6 @@
14#define DEBUGP(x, args...) 14#define DEBUGP(x, args...)
15#endif 15#endif
16 16
17/* Standard entry. */
18struct ip6t_standard
19{
20 struct ip6t_entry entry;
21 struct ip6t_standard_target target;
22};
23
24struct ip6t_error_target
25{
26 struct ip6t_entry_target target;
27 char errorname[IP6T_FUNCTION_MAXNAMELEN];
28};
29
30struct ip6t_error
31{
32 struct ip6t_entry entry;
33 struct ip6t_error_target target;
34};
35
36static struct 17static struct
37{ 18{
38 struct ip6t_replace repl; 19 struct ip6t_replace repl;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4ae1b19ada5d..c2d8059e754e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -815,7 +815,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
815 if (final_p) 815 if (final_p)
816 ipv6_addr_copy(&fl.fl6_dst, final_p); 816 ipv6_addr_copy(&fl.fl6_dst, final_p);
817 817
818 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 818 if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
819 goto out; 819 goto out;
820 820
821 if (hlimit < 0) { 821 if (hlimit < 0) {
@@ -1094,10 +1094,19 @@ static void rawv6_close(struct sock *sk, long timeout)
1094 1094
1095static int rawv6_init_sk(struct sock *sk) 1095static int rawv6_init_sk(struct sock *sk)
1096{ 1096{
1097 if (inet_sk(sk)->num == IPPROTO_ICMPV6) { 1097 struct raw6_sock *rp = raw6_sk(sk);
1098 struct raw6_sock *rp = raw6_sk(sk); 1098
1099 switch (inet_sk(sk)->num) {
1100 case IPPROTO_ICMPV6:
1099 rp->checksum = 1; 1101 rp->checksum = 1;
1100 rp->offset = 2; 1102 rp->offset = 2;
1103 break;
1104 case IPPROTO_MH:
1105 rp->checksum = 1;
1106 rp->offset = 4;
1107 break;
1108 default:
1109 break;
1101 } 1110 }
1102 return(0); 1111 return(0);
1103} 1112}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5f0043c30b70..19c906f6efa1 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -311,12 +311,21 @@ static inline void rt6_probe(struct rt6_info *rt)
311static int inline rt6_check_dev(struct rt6_info *rt, int oif) 311static int inline rt6_check_dev(struct rt6_info *rt, int oif)
312{ 312{
313 struct net_device *dev = rt->rt6i_dev; 313 struct net_device *dev = rt->rt6i_dev;
314 if (!oif || dev->ifindex == oif) 314 int ret = 0;
315
316 if (!oif)
315 return 2; 317 return 2;
316 if ((dev->flags & IFF_LOOPBACK) && 318 if (dev->flags & IFF_LOOPBACK) {
317 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) 319 if (!WARN_ON(rt->rt6i_idev == NULL) &&
318 return 1; 320 rt->rt6i_idev->dev->ifindex == oif)
319 return 0; 321 ret = 1;
322 else
323 return 0;
324 }
325 if (dev->ifindex == oif)
326 return 2;
327
328 return ret;
320} 329}
321 330
322static int inline rt6_check_neigh(struct rt6_info *rt) 331static int inline rt6_check_neigh(struct rt6_info *rt)
@@ -2040,7 +2049,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2040 2049
2041 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags); 2050 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2042 if (nlh == NULL) 2051 if (nlh == NULL)
2043 return -ENOBUFS; 2052 return -EMSGSIZE;
2044 2053
2045 rtm = nlmsg_data(nlh); 2054 rtm = nlmsg_data(nlh);
2046 rtm->rtm_family = AF_INET6; 2055 rtm->rtm_family = AF_INET6;
@@ -2111,7 +2120,8 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2111 return nlmsg_end(skb, nlh); 2120 return nlmsg_end(skb, nlh);
2112 2121
2113nla_put_failure: 2122nla_put_failure:
2114 return nlmsg_cancel(skb, nlh); 2123 nlmsg_cancel(skb, nlh);
2124 return -EMSGSIZE;
2115} 2125}
2116 2126
2117int rt6_dump_route(struct rt6_info *rt, void *p_arg) 2127int rt6_dump_route(struct rt6_info *rt, void *p_arg)
@@ -2222,9 +2232,12 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2222 goto errout; 2232 goto errout;
2223 2233
2224 err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0); 2234 err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0);
2225 /* failure implies BUG in rt6_nlmsg_size() */ 2235 if (err < 0) {
2226 BUG_ON(err < 0); 2236 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2227 2237 WARN_ON(err == -EMSGSIZE);
2238 kfree_skb(skb);
2239 goto errout;
2240 }
2228 err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any()); 2241 err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any());
2229errout: 2242errout:
2230 if (err < 0) 2243 if (err < 0)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 77b7b0911438..47cfeadac6dd 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -686,7 +686,8 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
686 goto done; 686 goto done;
687 dev = t->dev; 687 dev = t->dev;
688 } 688 }
689 err = unregister_netdevice(dev); 689 unregister_netdevice(dev);
690 err = 0;
690 break; 691 break;
691 692
692 default: 693 default:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c25e930c2c69..dcb7b00a737d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -265,7 +265,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
265 if (final_p) 265 if (final_p)
266 ipv6_addr_copy(&fl.fl6_dst, final_p); 266 ipv6_addr_copy(&fl.fl6_dst, final_p);
267 267
268 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 268 if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
269 goto failure; 269 goto failure;
270 270
271 if (saddr == NULL) { 271 if (saddr == NULL) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f52a5c3cc0a3..15e5195549cb 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -736,7 +736,7 @@ do_udp_sendmsg:
736 if (final_p) 736 if (final_p)
737 ipv6_addr_copy(&fl.fl6_dst, final_p); 737 ipv6_addr_copy(&fl.fl6_dst, final_p);
738 738
739 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 739 if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
740 goto out; 740 goto out;
741 741
742 if (hlimit < 0) { 742 if (hlimit < 0) {
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 5e7d8a7d6414..0bc866c0d83c 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -25,6 +25,12 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
25 IP6_ECN_set_ce(inner_iph); 25 IP6_ECN_set_ce(inner_iph);
26} 26}
27 27
28static inline void ip6ip_ecn_decapsulate(struct sk_buff *skb)
29{
30 if (INET_ECN_is_ce(ipv6_get_dsfield(skb->nh.ipv6h)))
31 IP_ECN_set_ce(skb->h.ipiph);
32}
33
28/* Add encapsulation header. 34/* Add encapsulation header.
29 * 35 *
30 * The top IP header will be constructed per RFC 2401. The following fields 36 * The top IP header will be constructed per RFC 2401. The following fields
@@ -40,6 +46,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
40static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) 46static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
41{ 47{
42 struct dst_entry *dst = skb->dst; 48 struct dst_entry *dst = skb->dst;
49 struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
43 struct ipv6hdr *iph, *top_iph; 50 struct ipv6hdr *iph, *top_iph;
44 int dsfield; 51 int dsfield;
45 52
@@ -52,16 +59,24 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
52 skb->h.ipv6h = top_iph + 1; 59 skb->h.ipv6h = top_iph + 1;
53 60
54 top_iph->version = 6; 61 top_iph->version = 6;
55 top_iph->priority = iph->priority; 62 if (xdst->route->ops->family == AF_INET6) {
56 top_iph->flow_lbl[0] = iph->flow_lbl[0]; 63 top_iph->priority = iph->priority;
57 top_iph->flow_lbl[1] = iph->flow_lbl[1]; 64 top_iph->flow_lbl[0] = iph->flow_lbl[0];
58 top_iph->flow_lbl[2] = iph->flow_lbl[2]; 65 top_iph->flow_lbl[1] = iph->flow_lbl[1];
66 top_iph->flow_lbl[2] = iph->flow_lbl[2];
67 top_iph->nexthdr = IPPROTO_IPV6;
68 } else {
69 top_iph->priority = 0;
70 top_iph->flow_lbl[0] = 0;
71 top_iph->flow_lbl[1] = 0;
72 top_iph->flow_lbl[2] = 0;
73 top_iph->nexthdr = IPPROTO_IPIP;
74 }
59 dsfield = ipv6_get_dsfield(top_iph); 75 dsfield = ipv6_get_dsfield(top_iph);
60 dsfield = INET_ECN_encapsulate(dsfield, dsfield); 76 dsfield = INET_ECN_encapsulate(dsfield, dsfield);
61 if (x->props.flags & XFRM_STATE_NOECN) 77 if (x->props.flags & XFRM_STATE_NOECN)
62 dsfield &= ~INET_ECN_MASK; 78 dsfield &= ~INET_ECN_MASK;
63 ipv6_change_dsfield(top_iph, 0, dsfield); 79 ipv6_change_dsfield(top_iph, 0, dsfield);
64 top_iph->nexthdr = IPPROTO_IPV6;
65 top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT); 80 top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT);
66 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); 81 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
67 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); 82 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
@@ -72,7 +87,8 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
72{ 87{
73 int err = -EINVAL; 88 int err = -EINVAL;
74 89
75 if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6) 90 if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6
91 && skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPIP)
76 goto out; 92 goto out;
77 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 93 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
78 goto out; 94 goto out;
@@ -81,10 +97,16 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
81 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 97 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
82 goto out; 98 goto out;
83 99
84 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 100 if (skb->nh.raw[IP6CB(skb)->nhoff] == IPPROTO_IPV6) {
85 ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h); 101 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
86 if (!(x->props.flags & XFRM_STATE_NOECN)) 102 ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h);
87 ipip6_ecn_decapsulate(skb); 103 if (!(x->props.flags & XFRM_STATE_NOECN))
104 ipip6_ecn_decapsulate(skb);
105 } else {
106 if (!(x->props.flags & XFRM_STATE_NOECN))
107 ip6ip_ecn_decapsulate(skb);
108 skb->protocol = htons(ETH_P_IP);
109 }
88 skb->mac.raw = memmove(skb->data - skb->mac_len, 110 skb->mac.raw = memmove(skb->data - skb->mac_len,
89 skb->mac.raw, skb->mac_len); 111 skb->mac.raw, skb->mac_len);
90 skb->nh.raw = skb->data; 112 skb->nh.raw = skb->data;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8dffd4daae9c..59480e92177d 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -131,13 +131,11 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
131 struct dst_entry *dst, *dst_prev; 131 struct dst_entry *dst, *dst_prev;
132 struct rt6_info *rt0 = (struct rt6_info*)(*dst_p); 132 struct rt6_info *rt0 = (struct rt6_info*)(*dst_p);
133 struct rt6_info *rt = rt0; 133 struct rt6_info *rt = rt0;
134 struct in6_addr *remote = &fl->fl6_dst;
135 struct in6_addr *local = &fl->fl6_src;
136 struct flowi fl_tunnel = { 134 struct flowi fl_tunnel = {
137 .nl_u = { 135 .nl_u = {
138 .ip6_u = { 136 .ip6_u = {
139 .saddr = *local, 137 .saddr = fl->fl6_src,
140 .daddr = *remote 138 .daddr = fl->fl6_dst,
141 } 139 }
142 } 140 }
143 }; 141 };
@@ -153,7 +151,6 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
153 for (i = 0; i < nx; i++) { 151 for (i = 0; i < nx; i++) {
154 struct dst_entry *dst1 = dst_alloc(&xfrm6_dst_ops); 152 struct dst_entry *dst1 = dst_alloc(&xfrm6_dst_ops);
155 struct xfrm_dst *xdst; 153 struct xfrm_dst *xdst;
156 int tunnel = 0;
157 154
158 if (unlikely(dst1 == NULL)) { 155 if (unlikely(dst1 == NULL)) {
159 err = -ENOBUFS; 156 err = -ENOBUFS;
@@ -177,19 +174,27 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
177 174
178 dst1->next = dst_prev; 175 dst1->next = dst_prev;
179 dst_prev = dst1; 176 dst_prev = dst1;
180 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 177
181 remote = __xfrm6_bundle_addr_remote(xfrm[i], remote);
182 local = __xfrm6_bundle_addr_local(xfrm[i], local);
183 tunnel = 1;
184 }
185 __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]); 178 __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]);
186 trailer_len += xfrm[i]->props.trailer_len; 179 trailer_len += xfrm[i]->props.trailer_len;
187 180
188 if (tunnel) { 181 if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
189 ipv6_addr_copy(&fl_tunnel.fl6_dst, remote); 182 unsigned short encap_family = xfrm[i]->props.family;
190 ipv6_addr_copy(&fl_tunnel.fl6_src, local); 183 switch(encap_family) {
184 case AF_INET:
185 fl_tunnel.fl4_dst = xfrm[i]->id.daddr.a4;
186 fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
187 break;
188 case AF_INET6:
189 ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6);
190 ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6);
191 break;
192 default:
193 BUG_ON(1);
194 }
195
191 err = xfrm_dst_lookup((struct xfrm_dst **) &rt, 196 err = xfrm_dst_lookup((struct xfrm_dst **) &rt,
192 &fl_tunnel, AF_INET6); 197 &fl_tunnel, encap_family);
193 if (err) 198 if (err)
194 goto error; 199 goto error;
195 } else 200 } else
@@ -208,6 +213,7 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
208 i = 0; 213 i = 0;
209 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) { 214 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
210 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev; 215 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
216 struct xfrm_state_afinfo *afinfo;
211 217
212 dst_prev->xfrm = xfrm[i++]; 218 dst_prev->xfrm = xfrm[i++];
213 dst_prev->dev = rt->u.dst.dev; 219 dst_prev->dev = rt->u.dst.dev;
@@ -224,7 +230,17 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
224 /* Copy neighbour for reachability confirmation */ 230 /* Copy neighbour for reachability confirmation */
225 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour); 231 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
226 dst_prev->input = rt->u.dst.input; 232 dst_prev->input = rt->u.dst.input;
227 dst_prev->output = xfrm6_output; 233 /* XXX: When IPv4 is implemented as module and can be unloaded,
234 * we should manage reference to xfrm4_output in afinfo->output.
235 * Miyazawa
236 */
237 afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family);
238 if (!afinfo) {
239 dst = *dst_p;
240 goto error;
241 };
242 dst_prev->output = afinfo->output;
243 xfrm_state_put_afinfo(afinfo);
228 /* Sheit... I remember I did this right. Apparently, 244 /* Sheit... I remember I did this right. Apparently,
229 * it was magically lost, so this code needs audit */ 245 * it was magically lost, so this code needs audit */
230 x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL); 246 x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 9ddaa9d41539..60ad5f074e0a 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -171,6 +171,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
171 .init_tempsel = __xfrm6_init_tempsel, 171 .init_tempsel = __xfrm6_init_tempsel,
172 .tmpl_sort = __xfrm6_tmpl_sort, 172 .tmpl_sort = __xfrm6_tmpl_sort,
173 .state_sort = __xfrm6_state_sort, 173 .state_sort = __xfrm6_state_sort,
174 .output = xfrm6_output,
174}; 175};
175 176
176void __init xfrm6_state_init(void) 177void __init xfrm6_state_init(void)
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index b1ee99a59c0c..2a571b43ebec 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -91,6 +91,12 @@ struct ias_object *irias_new_object( char *name, int id)
91 91
92 obj->magic = IAS_OBJECT_MAGIC; 92 obj->magic = IAS_OBJECT_MAGIC;
93 obj->name = strndup(name, IAS_MAX_CLASSNAME); 93 obj->name = strndup(name, IAS_MAX_CLASSNAME);
94 if (!obj->name) {
95 IRDA_WARNING("%s(), Unable to allocate name!\n",
96 __FUNCTION__);
97 kfree(obj);
98 return NULL;
99 }
94 obj->id = id; 100 obj->id = id;
95 101
96 /* Locking notes : the attrib spinlock has lower precendence 102 /* Locking notes : the attrib spinlock has lower precendence
@@ -101,6 +107,7 @@ struct ias_object *irias_new_object( char *name, int id)
101 if (obj->attribs == NULL) { 107 if (obj->attribs == NULL) {
102 IRDA_WARNING("%s(), Unable to allocate attribs!\n", 108 IRDA_WARNING("%s(), Unable to allocate attribs!\n",
103 __FUNCTION__); 109 __FUNCTION__);
110 kfree(obj->name);
104 kfree(obj); 111 kfree(obj);
105 return NULL; 112 return NULL;
106 } 113 }
@@ -357,6 +364,15 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
357 364
358 /* Insert value */ 365 /* Insert value */
359 attrib->value = irias_new_integer_value(value); 366 attrib->value = irias_new_integer_value(value);
367 if (!attrib->name || !attrib->value) {
368 IRDA_WARNING("%s: Unable to allocate attribute!\n",
369 __FUNCTION__);
370 if (attrib->value)
371 irias_delete_value(attrib->value);
372 kfree(attrib->name);
373 kfree(attrib);
374 return;
375 }
360 376
361 irias_add_attrib(obj, attrib, owner); 377 irias_add_attrib(obj, attrib, owner);
362} 378}
@@ -391,6 +407,15 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
391 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 407 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
392 408
393 attrib->value = irias_new_octseq_value( octets, len); 409 attrib->value = irias_new_octseq_value( octets, len);
410 if (!attrib->name || !attrib->value) {
411 IRDA_WARNING("%s: Unable to allocate attribute!\n",
412 __FUNCTION__);
413 if (attrib->value)
414 irias_delete_value(attrib->value);
415 kfree(attrib->name);
416 kfree(attrib);
417 return;
418 }
394 419
395 irias_add_attrib(obj, attrib, owner); 420 irias_add_attrib(obj, attrib, owner);
396} 421}
@@ -424,6 +449,15 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
424 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 449 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
425 450
426 attrib->value = irias_new_string_value(value); 451 attrib->value = irias_new_string_value(value);
452 if (!attrib->name || !attrib->value) {
453 IRDA_WARNING("%s: Unable to allocate attribute!\n",
454 __FUNCTION__);
455 if (attrib->value)
456 irias_delete_value(attrib->value);
457 kfree(attrib->name);
458 kfree(attrib);
459 return;
460 }
427 461
428 irias_add_attrib(obj, attrib, owner); 462 irias_add_attrib(obj, attrib, owner);
429} 463}
@@ -473,6 +507,12 @@ struct ias_value *irias_new_string_value(char *string)
473 value->type = IAS_STRING; 507 value->type = IAS_STRING;
474 value->charset = CS_ASCII; 508 value->charset = CS_ASCII;
475 value->t.string = strndup(string, IAS_MAX_STRING); 509 value->t.string = strndup(string, IAS_MAX_STRING);
510 if (!value->t.string) {
511 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
512 kfree(value);
513 return NULL;
514 }
515
476 value->len = strlen(value->t.string); 516 value->len = strlen(value->t.string);
477 517
478 return value; 518 return value;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 2bb04ac09329..310776dd6109 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -144,12 +144,18 @@ static int __init irlan_init(void)
144 /* Register with IrLMP as a client */ 144 /* Register with IrLMP as a client */
145 ckey = irlmp_register_client(hints, &irlan_client_discovery_indication, 145 ckey = irlmp_register_client(hints, &irlan_client_discovery_indication,
146 NULL, NULL); 146 NULL, NULL);
147 147 if (!ckey)
148 goto err_ckey;
149
148 /* Register with IrLMP as a service */ 150 /* Register with IrLMP as a service */
149 skey = irlmp_register_service(hints); 151 skey = irlmp_register_service(hints);
152 if (!skey)
153 goto err_skey;
150 154
151 /* Start the master IrLAN instance (the only one for now) */ 155 /* Start the master IrLAN instance (the only one for now) */
152 new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY); 156 new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY);
157 if (!new)
158 goto err_open;
153 159
154 /* The master will only open its (listen) control TSAP */ 160 /* The master will only open its (listen) control TSAP */
155 irlan_provider_open_ctrl_tsap(new); 161 irlan_provider_open_ctrl_tsap(new);
@@ -158,6 +164,17 @@ static int __init irlan_init(void)
158 irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS); 164 irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
159 165
160 return 0; 166 return 0;
167
168err_open:
169 irlmp_unregister_service(skey);
170err_skey:
171 irlmp_unregister_client(ckey);
172err_ckey:
173#ifdef CONFIG_PROC_FS
174 remove_proc_entry("irlan", proc_irda);
175#endif /* CONFIG_PROC_FS */
176
177 return -ENOMEM;
161} 178}
162 179
163static void __exit irlan_cleanup(void) 180static void __exit irlan_cleanup(void)
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
new file mode 100644
index 000000000000..f8fcc3d10327
--- /dev/null
+++ b/net/iucv/Kconfig
@@ -0,0 +1,15 @@
1config IUCV
2 tristate "IUCV support (VM only)"
3 depends on S390
4 help
5 Select this option if you want to use inter-user communication under
6 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
7 communication link between VM guests.
8
9config AFIUCV
10 tristate "AF_IUCV support (VM only)"
11 depends on IUCV
12 help
13 Select this option if you want to use inter-user communication under
14 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
15 communication link between VM guests.
diff --git a/net/iucv/Makefile b/net/iucv/Makefile
new file mode 100644
index 000000000000..7bfdc8532675
--- /dev/null
+++ b/net/iucv/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for IUCV
3#
4
5obj-$(CONFIG_IUCV) += iucv.o
6obj-$(CONFIG_AFIUCV) += af_iucv.o
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
new file mode 100644
index 000000000000..acc94214bde6
--- /dev/null
+++ b/net/iucv/af_iucv.c
@@ -0,0 +1,1077 @@
1/*
2 * linux/net/iucv/af_iucv.c
3 *
4 * IUCV protocol stack for Linux on zSeries
5 *
6 * Copyright 2006 IBM Corporation
7 *
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/list.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/poll.h>
21#include <net/sock.h>
22#include <asm/ebcdic.h>
23#include <asm/cpcmd.h>
24#include <linux/kmod.h>
25
26#include <net/iucv/iucv.h>
27#include <net/iucv/af_iucv.h>
28
29#define CONFIG_IUCV_SOCK_DEBUG 1
30
31#define IPRMDATA 0x80
32#define VERSION "1.0"
33
34static char iucv_userid[80];
35
36static struct proto_ops iucv_sock_ops;
37
38static struct proto iucv_proto = {
39 .name = "AF_IUCV",
40 .owner = THIS_MODULE,
41 .obj_size = sizeof(struct iucv_sock),
42};
43
44/* Call Back functions */
45static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
46static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
47static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
48static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
49static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
50
51static struct iucv_sock_list iucv_sk_list = {
52 .lock = RW_LOCK_UNLOCKED,
53 .autobind_name = ATOMIC_INIT(0)
54};
55
56static struct iucv_handler af_iucv_handler = {
57 .path_pending = iucv_callback_connreq,
58 .path_complete = iucv_callback_connack,
59 .path_severed = iucv_callback_connrej,
60 .message_pending = iucv_callback_rx,
61 .message_complete = iucv_callback_txdone
62};
63
64static inline void high_nmcpy(unsigned char *dst, char *src)
65{
66 memcpy(dst, src, 8);
67}
68
69static inline void low_nmcpy(unsigned char *dst, char *src)
70{
71 memcpy(&dst[8], src, 8);
72}
73
74/* Timers */
75static void iucv_sock_timeout(unsigned long arg)
76{
77 struct sock *sk = (struct sock *)arg;
78
79 bh_lock_sock(sk);
80 sk->sk_err = ETIMEDOUT;
81 sk->sk_state_change(sk);
82 bh_unlock_sock(sk);
83
84 iucv_sock_kill(sk);
85 sock_put(sk);
86}
87
88static void iucv_sock_clear_timer(struct sock *sk)
89{
90 sk_stop_timer(sk, &sk->sk_timer);
91}
92
93static void iucv_sock_init_timer(struct sock *sk)
94{
95 init_timer(&sk->sk_timer);
96 sk->sk_timer.function = iucv_sock_timeout;
97 sk->sk_timer.data = (unsigned long)sk;
98}
99
100static struct sock *__iucv_get_sock_by_name(char *nm)
101{
102 struct sock *sk;
103 struct hlist_node *node;
104
105 sk_for_each(sk, node, &iucv_sk_list.head)
106 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
107 return sk;
108
109 return NULL;
110}
111
112static void iucv_sock_destruct(struct sock *sk)
113{
114 skb_queue_purge(&sk->sk_receive_queue);
115 skb_queue_purge(&sk->sk_write_queue);
116}
117
118/* Cleanup Listen */
119static void iucv_sock_cleanup_listen(struct sock *parent)
120{
121 struct sock *sk;
122
123 /* Close non-accepted connections */
124 while ((sk = iucv_accept_dequeue(parent, NULL))) {
125 iucv_sock_close(sk);
126 iucv_sock_kill(sk);
127 }
128
129 parent->sk_state = IUCV_CLOSED;
130 sock_set_flag(parent, SOCK_ZAPPED);
131}
132
133/* Kill socket */
134static void iucv_sock_kill(struct sock *sk)
135{
136 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
137 return;
138
139 iucv_sock_unlink(&iucv_sk_list, sk);
140 sock_set_flag(sk, SOCK_DEAD);
141 sock_put(sk);
142}
143
144/* Close an IUCV socket */
145static void iucv_sock_close(struct sock *sk)
146{
147 unsigned char user_data[16];
148 struct iucv_sock *iucv = iucv_sk(sk);
149 int err;
150
151 iucv_sock_clear_timer(sk);
152 lock_sock(sk);
153
154 switch(sk->sk_state) {
155 case IUCV_LISTEN:
156 iucv_sock_cleanup_listen(sk);
157 break;
158
159 case IUCV_CONNECTED:
160 case IUCV_DISCONN:
161 err = 0;
162 if (iucv->path) {
163 low_nmcpy(user_data, iucv->src_name);
164 high_nmcpy(user_data, iucv->dst_name);
165 ASCEBC(user_data, sizeof(user_data));
166 err = iucv_path_sever(iucv->path, user_data);
167 iucv_path_free(iucv->path);
168 iucv->path = NULL;
169 }
170
171 sk->sk_state = IUCV_CLOSED;
172 sk->sk_state_change(sk);
173 sk->sk_err = ECONNRESET;
174 sk->sk_state_change(sk);
175
176 skb_queue_purge(&iucv->send_skb_q);
177
178 sock_set_flag(sk, SOCK_ZAPPED);
179 break;
180
181 default:
182 sock_set_flag(sk, SOCK_ZAPPED);
183 break;
184 };
185
186 release_sock(sk);
187 iucv_sock_kill(sk);
188}
189
190static void iucv_sock_init(struct sock *sk, struct sock *parent)
191{
192 if (parent)
193 sk->sk_type = parent->sk_type;
194}
195
196static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
197{
198 struct sock *sk;
199
200 sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1);
201 if (!sk)
202 return NULL;
203
204 sock_init_data(sock, sk);
205 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
206 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
207 iucv_sk(sk)->send_tag = 0;
208
209 sk->sk_destruct = iucv_sock_destruct;
210 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
211 sk->sk_allocation = GFP_DMA;
212
213 sock_reset_flag(sk, SOCK_ZAPPED);
214
215 sk->sk_protocol = proto;
216 sk->sk_state = IUCV_OPEN;
217
218 iucv_sock_init_timer(sk);
219
220 iucv_sock_link(&iucv_sk_list, sk);
221 return sk;
222}
223
224/* Create an IUCV socket */
225static int iucv_sock_create(struct socket *sock, int protocol)
226{
227 struct sock *sk;
228
229 if (sock->type != SOCK_STREAM)
230 return -ESOCKTNOSUPPORT;
231
232 sock->state = SS_UNCONNECTED;
233 sock->ops = &iucv_sock_ops;
234
235 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
236 if (!sk)
237 return -ENOMEM;
238
239 iucv_sock_init(sk, NULL);
240
241 return 0;
242}
243
244void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
245{
246 write_lock_bh(&l->lock);
247 sk_add_node(sk, &l->head);
248 write_unlock_bh(&l->lock);
249}
250
251void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
252{
253 write_lock_bh(&l->lock);
254 sk_del_node_init(sk);
255 write_unlock_bh(&l->lock);
256}
257
258void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
259{
260 sock_hold(sk);
261 list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
262 iucv_sk(sk)->parent = parent;
263 parent->sk_ack_backlog++;
264}
265
266void iucv_accept_unlink(struct sock *sk)
267{
268 list_del_init(&iucv_sk(sk)->accept_q);
269 iucv_sk(sk)->parent->sk_ack_backlog--;
270 iucv_sk(sk)->parent = NULL;
271 sock_put(sk);
272}
273
274struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
275{
276 struct iucv_sock *isk, *n;
277 struct sock *sk;
278
279 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
280 sk = (struct sock *) isk;
281 lock_sock(sk);
282
283 if (sk->sk_state == IUCV_CLOSED) {
284 release_sock(sk);
285 iucv_accept_unlink(sk);
286 continue;
287 }
288
289 if (sk->sk_state == IUCV_CONNECTED ||
290 sk->sk_state == IUCV_SEVERED ||
291 !newsock) {
292 iucv_accept_unlink(sk);
293 if (newsock)
294 sock_graft(sk, newsock);
295
296 if (sk->sk_state == IUCV_SEVERED)
297 sk->sk_state = IUCV_DISCONN;
298
299 release_sock(sk);
300 return sk;
301 }
302
303 release_sock(sk);
304 }
305 return NULL;
306}
307
308int iucv_sock_wait_state(struct sock *sk, int state, int state2,
309 unsigned long timeo)
310{
311 DECLARE_WAITQUEUE(wait, current);
312 int err = 0;
313
314 add_wait_queue(sk->sk_sleep, &wait);
315 while (sk->sk_state != state && sk->sk_state != state2) {
316 set_current_state(TASK_INTERRUPTIBLE);
317
318 if (!timeo) {
319 err = -EAGAIN;
320 break;
321 }
322
323 if (signal_pending(current)) {
324 err = sock_intr_errno(timeo);
325 break;
326 }
327
328 release_sock(sk);
329 timeo = schedule_timeout(timeo);
330 lock_sock(sk);
331
332 err = sock_error(sk);
333 if (err)
334 break;
335 }
336 set_current_state(TASK_RUNNING);
337 remove_wait_queue(sk->sk_sleep, &wait);
338 return err;
339}
340
341/* Bind an unbound socket */
342static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
343 int addr_len)
344{
345 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
346 struct sock *sk = sock->sk;
347 struct iucv_sock *iucv;
348 int err;
349
350 /* Verify the input sockaddr */
351 if (!addr || addr->sa_family != AF_IUCV)
352 return -EINVAL;
353
354 lock_sock(sk);
355 if (sk->sk_state != IUCV_OPEN) {
356 err = -EBADFD;
357 goto done;
358 }
359
360 write_lock_bh(&iucv_sk_list.lock);
361
362 iucv = iucv_sk(sk);
363 if (__iucv_get_sock_by_name(sa->siucv_name)) {
364 err = -EADDRINUSE;
365 goto done_unlock;
366 }
367 if (iucv->path) {
368 err = 0;
369 goto done_unlock;
370 }
371
372 /* Bind the socket */
373 memcpy(iucv->src_name, sa->siucv_name, 8);
374
375 /* Copy the user id */
376 memcpy(iucv->src_user_id, iucv_userid, 8);
377 sk->sk_state = IUCV_BOUND;
378 err = 0;
379
380done_unlock:
381 /* Release the socket list lock */
382 write_unlock_bh(&iucv_sk_list.lock);
383done:
384 release_sock(sk);
385 return err;
386}
387
388/* Automatically bind an unbound socket */
389static int iucv_sock_autobind(struct sock *sk)
390{
391 struct iucv_sock *iucv = iucv_sk(sk);
392 char query_buffer[80];
393 char name[12];
394 int err = 0;
395
396 /* Set the userid and name */
397 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
398 if (unlikely(err))
399 return -EPROTO;
400
401 memcpy(iucv->src_user_id, query_buffer, 8);
402
403 write_lock_bh(&iucv_sk_list.lock);
404
405 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
406 while (__iucv_get_sock_by_name(name)) {
407 sprintf(name, "%08x",
408 atomic_inc_return(&iucv_sk_list.autobind_name));
409 }
410
411 write_unlock_bh(&iucv_sk_list.lock);
412
413 memcpy(&iucv->src_name, name, 8);
414
415 return err;
416}
417
418/* Connect an unconnected socket */
419static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
420 int alen, int flags)
421{
422 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
423 struct sock *sk = sock->sk;
424 struct iucv_sock *iucv;
425 unsigned char user_data[16];
426 int err;
427
428 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
429 return -EINVAL;
430
431 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
432 return -EBADFD;
433
434 if (sk->sk_type != SOCK_STREAM)
435 return -EINVAL;
436
437 iucv = iucv_sk(sk);
438
439 if (sk->sk_state == IUCV_OPEN) {
440 err = iucv_sock_autobind(sk);
441 if (unlikely(err))
442 return err;
443 }
444
445 lock_sock(sk);
446
447 /* Set the destination information */
448 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
449 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
450
451 high_nmcpy(user_data, sa->siucv_name);
452 low_nmcpy(user_data, iucv_sk(sk)->src_name);
453 ASCEBC(user_data, sizeof(user_data));
454
455 iucv = iucv_sk(sk);
456 /* Create path. */
457 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
458 IPRMDATA, GFP_KERNEL);
459 err = iucv_path_connect(iucv->path, &af_iucv_handler,
460 sa->siucv_user_id, NULL, user_data, sk);
461 if (err) {
462 iucv_path_free(iucv->path);
463 iucv->path = NULL;
464 err = -ECONNREFUSED;
465 goto done;
466 }
467
468 if (sk->sk_state != IUCV_CONNECTED) {
469 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
470 sock_sndtimeo(sk, flags & O_NONBLOCK));
471 }
472
473 if (sk->sk_state == IUCV_DISCONN) {
474 release_sock(sk);
475 return -ECONNREFUSED;
476 }
477done:
478 release_sock(sk);
479 return err;
480}
481
482/* Move a socket into listening state. */
483static int iucv_sock_listen(struct socket *sock, int backlog)
484{
485 struct sock *sk = sock->sk;
486 int err;
487
488 lock_sock(sk);
489
490 err = -EINVAL;
491 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
492 goto done;
493
494 sk->sk_max_ack_backlog = backlog;
495 sk->sk_ack_backlog = 0;
496 sk->sk_state = IUCV_LISTEN;
497 err = 0;
498
499done:
500 release_sock(sk);
501 return err;
502}
503
504/* Accept a pending connection */
505static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
506 int flags)
507{
508 DECLARE_WAITQUEUE(wait, current);
509 struct sock *sk = sock->sk, *nsk;
510 long timeo;
511 int err = 0;
512
513 lock_sock(sk);
514
515 if (sk->sk_state != IUCV_LISTEN) {
516 err = -EBADFD;
517 goto done;
518 }
519
520 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
521
522 /* Wait for an incoming connection */
523 add_wait_queue_exclusive(sk->sk_sleep, &wait);
524 while (!(nsk = iucv_accept_dequeue(sk, newsock))){
525 set_current_state(TASK_INTERRUPTIBLE);
526 if (!timeo) {
527 err = -EAGAIN;
528 break;
529 }
530
531 release_sock(sk);
532 timeo = schedule_timeout(timeo);
533 lock_sock(sk);
534
535 if (sk->sk_state != IUCV_LISTEN) {
536 err = -EBADFD;
537 break;
538 }
539
540 if (signal_pending(current)) {
541 err = sock_intr_errno(timeo);
542 break;
543 }
544 }
545
546 set_current_state(TASK_RUNNING);
547 remove_wait_queue(sk->sk_sleep, &wait);
548
549 if (err)
550 goto done;
551
552 newsock->state = SS_CONNECTED;
553
554done:
555 release_sock(sk);
556 return err;
557}
558
559static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
560 int *len, int peer)
561{
562 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
563 struct sock *sk = sock->sk;
564
565 addr->sa_family = AF_IUCV;
566 *len = sizeof(struct sockaddr_iucv);
567
568 if (peer) {
569 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
570 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
571 } else {
572 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
573 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
574 }
575 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
576 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
577 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
578
579 return 0;
580}
581
582static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
583 struct msghdr *msg, size_t len)
584{
585 struct sock *sk = sock->sk;
586 struct iucv_sock *iucv = iucv_sk(sk);
587 struct sk_buff *skb;
588 struct iucv_message txmsg;
589 int err;
590
591 err = sock_error(sk);
592 if (err)
593 return err;
594
595 if (msg->msg_flags & MSG_OOB)
596 return -EOPNOTSUPP;
597
598 lock_sock(sk);
599
600 if (sk->sk_shutdown & SEND_SHUTDOWN) {
601 err = -EPIPE;
602 goto out;
603 }
604
605 if (sk->sk_state == IUCV_CONNECTED){
606 if(!(skb = sock_alloc_send_skb(sk, len,
607 msg->msg_flags & MSG_DONTWAIT,
608 &err)))
609 return err;
610
611 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){
612 err = -EFAULT;
613 goto fail;
614 }
615
616 txmsg.class = 0;
617 txmsg.tag = iucv->send_tag++;
618 memcpy(skb->cb, &txmsg.tag, 4);
619 skb_queue_tail(&iucv->send_skb_q, skb);
620 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
621 (void *) skb->data, skb->len);
622 if (err) {
623 if (err == 3)
624 printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
625 skb_unlink(skb, &iucv->send_skb_q);
626 err = -EPIPE;
627 goto fail;
628 }
629
630 } else {
631 err = -ENOTCONN;
632 goto out;
633 }
634
635 release_sock(sk);
636 return len;
637
638fail:
639 kfree_skb(skb);
640out:
641 release_sock(sk);
642 return err;
643}
644
645static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
646 struct msghdr *msg, size_t len, int flags)
647{
648 int noblock = flags & MSG_DONTWAIT;
649 struct sock *sk = sock->sk;
650 int target, copied = 0;
651 struct sk_buff *skb;
652 int err = 0;
653
654 if (flags & (MSG_OOB))
655 return -EOPNOTSUPP;
656
657 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
658
659 skb = skb_recv_datagram(sk, flags, noblock, &err);
660 if (!skb) {
661 if (sk->sk_shutdown & RCV_SHUTDOWN)
662 return 0;
663 return err;
664 }
665
666 copied = min_t(unsigned int, skb->len, len);
667
668 if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) {
669 skb_queue_head(&sk->sk_receive_queue, skb);
670 if (copied == 0)
671 return -EFAULT;
672 }
673
674 len -= copied;
675
676 /* Mark read part of skb as used */
677 if (!(flags & MSG_PEEK)) {
678 skb_pull(skb, copied);
679
680 if (skb->len) {
681 skb_queue_head(&sk->sk_receive_queue, skb);
682 goto done;
683 }
684
685 kfree_skb(skb);
686 } else
687 skb_queue_head(&sk->sk_receive_queue, skb);
688
689done:
690 return err ? : copied;
691}
692
693static inline unsigned int iucv_accept_poll(struct sock *parent)
694{
695 struct iucv_sock *isk, *n;
696 struct sock *sk;
697
698 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
699 sk = (struct sock *) isk;
700
701 if (sk->sk_state == IUCV_CONNECTED)
702 return POLLIN | POLLRDNORM;
703 }
704
705 return 0;
706}
707
708unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
709 poll_table *wait)
710{
711 struct sock *sk = sock->sk;
712 unsigned int mask = 0;
713
714 poll_wait(file, sk->sk_sleep, wait);
715
716 if (sk->sk_state == IUCV_LISTEN)
717 return iucv_accept_poll(sk);
718
719 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
720 mask |= POLLERR;
721
722 if (sk->sk_shutdown & RCV_SHUTDOWN)
723 mask |= POLLRDHUP;
724
725 if (sk->sk_shutdown == SHUTDOWN_MASK)
726 mask |= POLLHUP;
727
728 if (!skb_queue_empty(&sk->sk_receive_queue) ||
729 (sk->sk_shutdown & RCV_SHUTDOWN))
730 mask |= POLLIN | POLLRDNORM;
731
732 if (sk->sk_state == IUCV_CLOSED)
733 mask |= POLLHUP;
734
735 if (sock_writeable(sk))
736 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
737 else
738 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
739
740 return mask;
741}
742
743static int iucv_sock_shutdown(struct socket *sock, int how)
744{
745 struct sock *sk = sock->sk;
746 struct iucv_sock *iucv = iucv_sk(sk);
747 struct iucv_message txmsg;
748 int err = 0;
749 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
750
751 how++;
752
753 if ((how & ~SHUTDOWN_MASK) || !how)
754 return -EINVAL;
755
756 lock_sock(sk);
757 switch(sk->sk_state) {
758 case IUCV_CLOSED:
759 err = -ENOTCONN;
760 goto fail;
761
762 default:
763 sk->sk_shutdown |= how;
764 break;
765 }
766
767 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
768 txmsg.class = 0;
769 txmsg.tag = 0;
770 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
771 (void *) prmmsg, 8);
772 if (err) {
773 switch(err) {
774 case 1:
775 err = -ENOTCONN;
776 break;
777 case 2:
778 err = -ECONNRESET;
779 break;
780 default:
781 err = -ENOTCONN;
782 break;
783 }
784 }
785 }
786
787 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
788 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
789 if (err)
790 err = -ENOTCONN;
791
792 skb_queue_purge(&sk->sk_receive_queue);
793 }
794
795 /* Wake up anyone sleeping in poll */
796 sk->sk_state_change(sk);
797
798fail:
799 release_sock(sk);
800 return err;
801}
802
803static int iucv_sock_release(struct socket *sock)
804{
805 struct sock *sk = sock->sk;
806 int err = 0;
807
808 if (!sk)
809 return 0;
810
811 iucv_sock_close(sk);
812
813 /* Unregister with IUCV base support */
814 if (iucv_sk(sk)->path) {
815 iucv_path_sever(iucv_sk(sk)->path, NULL);
816 iucv_path_free(iucv_sk(sk)->path);
817 iucv_sk(sk)->path = NULL;
818 }
819
820 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){
821 lock_sock(sk);
822 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0,
823 sk->sk_lingertime);
824 release_sock(sk);
825 }
826
827 sock_orphan(sk);
828 iucv_sock_kill(sk);
829 return err;
830}
831
832/* Callback wrappers - called from iucv base support */
833static int iucv_callback_connreq(struct iucv_path *path,
834 u8 ipvmid[8], u8 ipuser[16])
835{
836 unsigned char user_data[16];
837 unsigned char nuser_data[16];
838 unsigned char src_name[8];
839 struct hlist_node *node;
840 struct sock *sk, *nsk;
841 struct iucv_sock *iucv, *niucv;
842 int err;
843
844 memcpy(src_name, ipuser, 8);
845 EBCASC(src_name, 8);
846 /* Find out if this path belongs to af_iucv. */
847 read_lock(&iucv_sk_list.lock);
848 iucv = NULL;
849 sk_for_each(sk, node, &iucv_sk_list.head)
850 if (sk->sk_state == IUCV_LISTEN &&
851 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
852 /*
853 * Found a listening socket with
854 * src_name == ipuser[0-7].
855 */
856 iucv = iucv_sk(sk);
857 break;
858 }
859 read_unlock(&iucv_sk_list.lock);
860 if (!iucv)
861 /* No socket found, not one of our paths. */
862 return -EINVAL;
863
864 bh_lock_sock(sk);
865
866 /* Check if parent socket is listening */
867 low_nmcpy(user_data, iucv->src_name);
868 high_nmcpy(user_data, iucv->dst_name);
869 ASCEBC(user_data, sizeof(user_data));
870 if (sk->sk_state != IUCV_LISTEN) {
871 err = iucv_path_sever(path, user_data);
872 goto fail;
873 }
874
875 /* Check for backlog size */
876 if (sk_acceptq_is_full(sk)) {
877 err = iucv_path_sever(path, user_data);
878 goto fail;
879 }
880
881 /* Create the new socket */
882 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
883 if (!nsk){
884 err = iucv_path_sever(path, user_data);
885 goto fail;
886 }
887
888 niucv = iucv_sk(nsk);
889 iucv_sock_init(nsk, sk);
890
891 /* Set the new iucv_sock */
892 memcpy(niucv->dst_name, ipuser + 8, 8);
893 EBCASC(niucv->dst_name, 8);
894 memcpy(niucv->dst_user_id, ipvmid, 8);
895 memcpy(niucv->src_name, iucv->src_name, 8);
896 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
897 niucv->path = path;
898
899 /* Call iucv_accept */
900 high_nmcpy(nuser_data, ipuser + 8);
901 memcpy(nuser_data + 8, niucv->src_name, 8);
902 ASCEBC(nuser_data + 8, 8);
903
904 path->msglim = IUCV_QUEUELEN_DEFAULT;
905 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
906 if (err){
907 err = iucv_path_sever(path, user_data);
908 goto fail;
909 }
910
911 iucv_accept_enqueue(sk, nsk);
912
913 /* Wake up accept */
914 nsk->sk_state = IUCV_CONNECTED;
915 sk->sk_data_ready(sk, 1);
916 err = 0;
917fail:
918 bh_unlock_sock(sk);
919 return 0;
920}
921
922static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
923{
924 struct sock *sk = path->private;
925
926 sk->sk_state = IUCV_CONNECTED;
927 sk->sk_state_change(sk);
928}
929
930static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
931{
932 struct sock *sk = path->private;
933 struct sk_buff *skb;
934 int rc;
935
936 if (sk->sk_shutdown & RCV_SHUTDOWN)
937 return;
938
939 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
940 if (!skb) {
941 iucv_message_reject(path, msg);
942 return;
943 }
944
945 if (msg->flags & IPRMDATA) {
946 skb->data = NULL;
947 skb->len = 0;
948 } else {
949 rc = iucv_message_receive(path, msg, 0, skb->data,
950 msg->length, NULL);
951 if (rc) {
952 kfree_skb(skb);
953 return;
954 }
955
956 skb->h.raw = skb->data;
957 skb->nh.raw = skb->data;
958 skb->len = msg->length;
959 }
960
961 if (sock_queue_rcv_skb(sk, skb))
962 kfree_skb(skb);
963}
964
965static void iucv_callback_txdone(struct iucv_path *path,
966 struct iucv_message *msg)
967{
968 struct sock *sk = path->private;
969 struct sk_buff *this;
970 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
971 struct sk_buff *list_skb = list->next;
972 unsigned long flags;
973
974 spin_lock_irqsave(&list->lock, flags);
975
976 do {
977 this = list_skb;
978 list_skb = list_skb->next;
979 } while (memcmp(&msg->tag, this->cb, 4));
980
981 spin_unlock_irqrestore(&list->lock, flags);
982
983 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
984 kfree_skb(this);
985}
986
987static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
988{
989 struct sock *sk = path->private;
990
991 if (!list_empty(&iucv_sk(sk)->accept_q))
992 sk->sk_state = IUCV_SEVERED;
993 else
994 sk->sk_state = IUCV_DISCONN;
995
996 sk->sk_state_change(sk);
997}
998
999static struct proto_ops iucv_sock_ops = {
1000 .family = PF_IUCV,
1001 .owner = THIS_MODULE,
1002 .release = iucv_sock_release,
1003 .bind = iucv_sock_bind,
1004 .connect = iucv_sock_connect,
1005 .listen = iucv_sock_listen,
1006 .accept = iucv_sock_accept,
1007 .getname = iucv_sock_getname,
1008 .sendmsg = iucv_sock_sendmsg,
1009 .recvmsg = iucv_sock_recvmsg,
1010 .poll = iucv_sock_poll,
1011 .ioctl = sock_no_ioctl,
1012 .mmap = sock_no_mmap,
1013 .socketpair = sock_no_socketpair,
1014 .shutdown = iucv_sock_shutdown,
1015 .setsockopt = sock_no_setsockopt,
1016 .getsockopt = sock_no_getsockopt
1017};
1018
1019static struct net_proto_family iucv_sock_family_ops = {
1020 .family = AF_IUCV,
1021 .owner = THIS_MODULE,
1022 .create = iucv_sock_create,
1023};
1024
1025static int afiucv_init(void)
1026{
1027 int err;
1028
1029 if (!MACHINE_IS_VM) {
1030 printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1031 err = -EPROTONOSUPPORT;
1032 goto out;
1033 }
1034 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1035 if (unlikely(err)) {
1036 printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1037 err = -EPROTONOSUPPORT;
1038 goto out;
1039 }
1040
1041 err = iucv_register(&af_iucv_handler, 0);
1042 if (err)
1043 goto out;
1044 err = proto_register(&iucv_proto, 0);
1045 if (err)
1046 goto out_iucv;
1047 err = sock_register(&iucv_sock_family_ops);
1048 if (err)
1049 goto out_proto;
1050 printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1051 return 0;
1052
1053out_proto:
1054 proto_unregister(&iucv_proto);
1055out_iucv:
1056 iucv_unregister(&af_iucv_handler, 0);
1057out:
1058 return err;
1059}
1060
1061static void __exit afiucv_exit(void)
1062{
1063 sock_unregister(PF_IUCV);
1064 proto_unregister(&iucv_proto);
1065 iucv_unregister(&af_iucv_handler, 0);
1066
1067 printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1068}
1069
1070module_init(afiucv_init);
1071module_exit(afiucv_exit);
1072
1073MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1074MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1075MODULE_VERSION(VERSION);
1076MODULE_LICENSE("GPL");
1077MODULE_ALIAS_NETPROTO(PF_IUCV);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
new file mode 100644
index 000000000000..1b10d576f222
--- /dev/null
+++ b/net/iucv/iucv.c
@@ -0,0 +1,1619 @@
1/*
2 * IUCV base infrastructure.
3 *
4 * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s):
6 * Original source:
7 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 * 2Gb awareness and general cleanup:
10 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
11 * Rewritten for af_iucv:
12 * Martin Schwidefsky <schwidefsky@de.ibm.com>
13 *
14 * Documentation used:
15 * The original source
16 * CP Programming Service, IBM document # SC24-5760
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35
36#include <linux/spinlock.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/list.h>
42#include <linux/errno.h>
43#include <linux/err.h>
44#include <linux/device.h>
45#include <linux/cpu.h>
46#include <net/iucv/iucv.h>
47#include <asm/atomic.h>
48#include <asm/ebcdic.h>
49#include <asm/io.h>
50#include <asm/s390_ext.h>
51#include <asm/s390_rdev.h>
52#include <asm/smp.h>
53
54/*
55 * FLAGS:
56 * All flags are defined in the field IPFLAGS1 of each function
57 * and can be found in CP Programming Services.
58 * IPSRCCLS - Indicates you have specified a source class.
59 * IPTRGCLS - Indicates you have specified a target class.
60 * IPFGPID - Indicates you have specified a pathid.
61 * IPFGMID - Indicates you have specified a message ID.
62 * IPNORPY - Indicates a one-way message. No reply expected.
63 * IPALL - Indicates that all paths are affected.
64 */
65#define IUCV_IPSRCCLS 0x01
66#define IUCV_IPTRGCLS 0x01
67#define IUCV_IPFGPID 0x02
68#define IUCV_IPFGMID 0x04
69#define IUCV_IPNORPY 0x10
70#define IUCV_IPALL 0x80
71
72static int iucv_bus_match (struct device *dev, struct device_driver *drv)
73{
74 return 0;
75}
76
77struct bus_type iucv_bus = {
78 .name = "iucv",
79 .match = iucv_bus_match,
80};
81
82struct device *iucv_root;
83static int iucv_available;
84
85/* General IUCV interrupt structure */
86struct iucv_irq_data {
87 u16 ippathid;
88 u8 ipflags1;
89 u8 iptype;
90 u32 res2[8];
91};
92
93struct iucv_work {
94 struct list_head list;
95 struct iucv_irq_data data;
96};
97
98static LIST_HEAD(iucv_work_queue);
99static DEFINE_SPINLOCK(iucv_work_lock);
100
101static struct iucv_irq_data *iucv_irq_data;
102static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
103static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
104
105static void iucv_tasklet_handler(unsigned long);
106static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_handler,0);
107
108enum iucv_command_codes {
109 IUCV_QUERY = 0,
110 IUCV_RETRIEVE_BUFFER = 2,
111 IUCV_SEND = 4,
112 IUCV_RECEIVE = 5,
113 IUCV_REPLY = 6,
114 IUCV_REJECT = 8,
115 IUCV_PURGE = 9,
116 IUCV_ACCEPT = 10,
117 IUCV_CONNECT = 11,
118 IUCV_DECLARE_BUFFER = 12,
119 IUCV_QUIESCE = 13,
120 IUCV_RESUME = 14,
121 IUCV_SEVER = 15,
122 IUCV_SETMASK = 16,
123};
124
125/*
126 * Error messages that are used with the iucv_sever function. They get
127 * converted to EBCDIC.
128 */
129static char iucv_error_no_listener[16] = "NO LISTENER";
130static char iucv_error_no_memory[16] = "NO MEMORY";
131static char iucv_error_pathid[16] = "INVALID PATHID";
132
133/*
134 * iucv_handler_list: List of registered handlers.
135 */
136static LIST_HEAD(iucv_handler_list);
137
138/*
139 * iucv_path_table: an array of iucv_path structures.
140 */
141static struct iucv_path **iucv_path_table;
142static unsigned long iucv_max_pathid;
143
144/*
145 * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table
146 */
147static DEFINE_SPINLOCK(iucv_table_lock);
148
149/*
150 * iucv_tasklet_cpu: contains the number of the cpu executing the tasklet.
151 * Needed for iucv_path_sever called from tasklet.
152 */
153static int iucv_tasklet_cpu = -1;
154
155/*
156 * Mutex and wait queue for iucv_register/iucv_unregister.
157 */
158static DEFINE_MUTEX(iucv_register_mutex);
159
160/*
161 * Counter for number of non-smp capable handlers.
162 */
163static int iucv_nonsmp_handler;
164
165/*
166 * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect,
167 * iucv_path_quiesce and iucv_path_sever.
168 */
169struct iucv_cmd_control {
170 u16 ippathid;
171 u8 ipflags1;
172 u8 iprcode;
173 u16 ipmsglim;
174 u16 res1;
175 u8 ipvmid[8];
176 u8 ipuser[16];
177 u8 iptarget[8];
178} __attribute__ ((packed,aligned(8)));
179
180/*
181 * Data in parameter list iucv structure. Used by iucv_message_send,
182 * iucv_message_send2way and iucv_message_reply.
183 */
184struct iucv_cmd_dpl {
185 u16 ippathid;
186 u8 ipflags1;
187 u8 iprcode;
188 u32 ipmsgid;
189 u32 iptrgcls;
190 u8 iprmmsg[8];
191 u32 ipsrccls;
192 u32 ipmsgtag;
193 u32 ipbfadr2;
194 u32 ipbfln2f;
195 u32 res;
196} __attribute__ ((packed,aligned(8)));
197
198/*
199 * Data in buffer iucv structure. Used by iucv_message_receive,
200 * iucv_message_reject, iucv_message_send, iucv_message_send2way
201 * and iucv_declare_cpu.
202 */
203struct iucv_cmd_db {
204 u16 ippathid;
205 u8 ipflags1;
206 u8 iprcode;
207 u32 ipmsgid;
208 u32 iptrgcls;
209 u32 ipbfadr1;
210 u32 ipbfln1f;
211 u32 ipsrccls;
212 u32 ipmsgtag;
213 u32 ipbfadr2;
214 u32 ipbfln2f;
215 u32 res;
216} __attribute__ ((packed,aligned(8)));
217
218/*
219 * Purge message iucv structure. Used by iucv_message_purge.
220 */
221struct iucv_cmd_purge {
222 u16 ippathid;
223 u8 ipflags1;
224 u8 iprcode;
225 u32 ipmsgid;
226 u8 ipaudit[3];
227 u8 res1[5];
228 u32 res2;
229 u32 ipsrccls;
230 u32 ipmsgtag;
231 u32 res3[3];
232} __attribute__ ((packed,aligned(8)));
233
234/*
235 * Set mask iucv structure. Used by iucv_enable_cpu.
236 */
237struct iucv_cmd_set_mask {
238 u8 ipmask;
239 u8 res1[2];
240 u8 iprcode;
241 u32 res2[9];
242} __attribute__ ((packed,aligned(8)));
243
244union iucv_param {
245 struct iucv_cmd_control ctrl;
246 struct iucv_cmd_dpl dpl;
247 struct iucv_cmd_db db;
248 struct iucv_cmd_purge purge;
249 struct iucv_cmd_set_mask set_mask;
250};
251
252/*
253 * Anchor for per-cpu IUCV command parameter block.
254 */
255static union iucv_param *iucv_param;
256
257/**
258 * iucv_call_b2f0
259 * @code: identifier of IUCV call to CP.
260 * @parm: pointer to a struct iucv_parm block
261 *
262 * Calls CP to execute IUCV commands.
263 *
264 * Returns the result of the CP IUCV call.
265 */
266static inline int iucv_call_b2f0(int command, union iucv_param *parm)
267{
268 register unsigned long reg0 asm ("0");
269 register unsigned long reg1 asm ("1");
270 int ccode;
271
272 reg0 = command;
273 reg1 = virt_to_phys(parm);
274 asm volatile(
275 " .long 0xb2f01000\n"
276 " ipm %0\n"
277 " srl %0,28\n"
278 : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1)
279 : "m" (*parm) : "cc");
280 return (ccode == 1) ? parm->ctrl.iprcode : ccode;
281}
282
283/**
284 * iucv_query_maxconn
285 *
286 * Determines the maximum number of connections that may be established.
287 *
288 * Returns the maximum number of connections or -EPERM is IUCV is not
289 * available.
290 */
291static int iucv_query_maxconn(void)
292{
293 register unsigned long reg0 asm ("0");
294 register unsigned long reg1 asm ("1");
295 void *param;
296 int ccode;
297
298 param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA);
299 if (!param)
300 return -ENOMEM;
301 reg0 = IUCV_QUERY;
302 reg1 = (unsigned long) param;
303 asm volatile (
304 " .long 0xb2f01000\n"
305 " ipm %0\n"
306 " srl %0,28\n"
307 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
308 if (ccode == 0)
309 iucv_max_pathid = reg0;
310 kfree(param);
311 return ccode ? -EPERM : 0;
312}
313
314/**
315 * iucv_allow_cpu
316 * @data: unused
317 *
318 * Allow iucv interrupts on this cpu.
319 */
320static void iucv_allow_cpu(void *data)
321{
322 int cpu = smp_processor_id();
323 union iucv_param *parm;
324
325 /*
326 * Enable all iucv interrupts.
327 * ipmask contains bits for the different interrupts
328 * 0x80 - Flag to allow nonpriority message pending interrupts
329 * 0x40 - Flag to allow priority message pending interrupts
330 * 0x20 - Flag to allow nonpriority message completion interrupts
331 * 0x10 - Flag to allow priority message completion interrupts
332 * 0x08 - Flag to allow IUCV control interrupts
333 */
334 parm = percpu_ptr(iucv_param, smp_processor_id());
335 memset(parm, 0, sizeof(union iucv_param));
336 parm->set_mask.ipmask = 0xf8;
337 iucv_call_b2f0(IUCV_SETMASK, parm);
338
339 /* Set indication that iucv interrupts are allowed for this cpu. */
340 cpu_set(cpu, iucv_irq_cpumask);
341}
342
343/**
344 * iucv_block_cpu
345 * @data: unused
346 *
347 * Block iucv interrupts on this cpu.
348 */
349static void iucv_block_cpu(void *data)
350{
351 int cpu = smp_processor_id();
352 union iucv_param *parm;
353
354 /* Disable all iucv interrupts. */
355 parm = percpu_ptr(iucv_param, smp_processor_id());
356 memset(parm, 0, sizeof(union iucv_param));
357 iucv_call_b2f0(IUCV_SETMASK, parm);
358
359 /* Clear indication that iucv interrupts are allowed for this cpu. */
360 cpu_clear(cpu, iucv_irq_cpumask);
361}
362
363/**
364 * iucv_declare_cpu
365 * @data: unused
366 *
367 * Declare a interupt buffer on this cpu.
368 */
369static void iucv_declare_cpu(void *data)
370{
371 int cpu = smp_processor_id();
372 union iucv_param *parm;
373 int rc;
374
375 if (cpu_isset(cpu, iucv_buffer_cpumask))
376 return;
377
378 /* Declare interrupt buffer. */
379 parm = percpu_ptr(iucv_param, cpu);
380 memset(parm, 0, sizeof(union iucv_param));
381 parm->db.ipbfadr1 = virt_to_phys(percpu_ptr(iucv_irq_data, cpu));
382 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
383 if (rc) {
384 char *err = "Unknown";
385 switch(rc) {
386 case 0x03:
387 err = "Directory error";
388 break;
389 case 0x0a:
390 err = "Invalid length";
391 break;
392 case 0x13:
393 err = "Buffer already exists";
394 break;
395 case 0x3e:
396 err = "Buffer overlap";
397 break;
398 case 0x5c:
399 err = "Paging or storage error";
400 break;
401 }
402 printk(KERN_WARNING "iucv_register: iucv_declare_buffer "
403 "on cpu %i returned error 0x%02x (%s)\n", cpu, rc, err);
404 return;
405 }
406
407 /* Set indication that an iucv buffer exists for this cpu. */
408 cpu_set(cpu, iucv_buffer_cpumask);
409
410 if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask))
411 /* Enable iucv interrupts on this cpu. */
412 iucv_allow_cpu(NULL);
413 else
414 /* Disable iucv interrupts on this cpu. */
415 iucv_block_cpu(NULL);
416}
417
418/**
419 * iucv_retrieve_cpu
420 * @data: unused
421 *
422 * Retrieve interrupt buffer on this cpu.
423 */
424static void iucv_retrieve_cpu(void *data)
425{
426 int cpu = smp_processor_id();
427 union iucv_param *parm;
428
429 if (!cpu_isset(cpu, iucv_buffer_cpumask))
430 return;
431
432 /* Block iucv interrupts. */
433 iucv_block_cpu(NULL);
434
435 /* Retrieve interrupt buffer. */
436 parm = percpu_ptr(iucv_param, cpu);
437 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
438
439 /* Clear indication that an iucv buffer exists for this cpu. */
440 cpu_clear(cpu, iucv_buffer_cpumask);
441}
442
443/**
444 * iucv_setmask_smp
445 *
446 * Allow iucv interrupts on all cpus.
447 */
448static void iucv_setmask_mp(void)
449{
450 int cpu;
451
452 for_each_online_cpu(cpu)
453 /* Enable all cpus with a declared buffer. */
454 if (cpu_isset(cpu, iucv_buffer_cpumask) &&
455 !cpu_isset(cpu, iucv_irq_cpumask))
456 smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
457}
458
459/**
460 * iucv_setmask_up
461 *
462 * Allow iucv interrupts on a single cpus.
463 */
464static void iucv_setmask_up(void)
465{
466 cpumask_t cpumask;
467 int cpu;
468
469 /* Disable all cpu but the first in cpu_irq_cpumask. */
470 cpumask = iucv_irq_cpumask;
471 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
472 for_each_cpu_mask(cpu, cpumask)
473 smp_call_function_on(iucv_block_cpu, NULL, 0, 1, cpu);
474}
475
476/**
477 * iucv_enable
478 *
479 * This function makes iucv ready for use. It allocates the pathid
480 * table, declares an iucv interrupt buffer and enables the iucv
481 * interrupts. Called when the first user has registered an iucv
482 * handler.
483 */
484static int iucv_enable(void)
485{
486 size_t alloc_size;
487 int cpu, rc;
488
489 rc = -ENOMEM;
490 alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
491 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
492 if (!iucv_path_table)
493 goto out;
494 /* Declare per cpu buffers. */
495 rc = -EIO;
496 for_each_online_cpu(cpu)
497 smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
498 if (cpus_empty(iucv_buffer_cpumask))
499 /* No cpu could declare an iucv buffer. */
500 goto out_path;
501 return 0;
502
503out_path:
504 kfree(iucv_path_table);
505out:
506 return rc;
507}
508
509/**
510 * iucv_disable
511 *
512 * This function shuts down iucv. It disables iucv interrupts, retrieves
513 * the iucv interrupt buffer and frees the pathid table. Called after the
514 * last user unregister its iucv handler.
515 */
516static void iucv_disable(void)
517{
518 on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1);
519 kfree(iucv_path_table);
520}
521
522#ifdef CONFIG_HOTPLUG_CPU
523static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
524 unsigned long action, void *hcpu)
525{
526 cpumask_t cpumask;
527 long cpu = (long) hcpu;
528
529 switch (action) {
530 case CPU_UP_PREPARE:
531 if (!percpu_populate(iucv_irq_data,
532 sizeof(struct iucv_irq_data),
533 GFP_KERNEL|GFP_DMA, cpu))
534 return NOTIFY_BAD;
535 if (!percpu_populate(iucv_param, sizeof(union iucv_param),
536 GFP_KERNEL|GFP_DMA, cpu)) {
537 percpu_depopulate(iucv_irq_data, cpu);
538 return NOTIFY_BAD;
539 }
540 break;
541 case CPU_UP_CANCELED:
542 case CPU_DEAD:
543 percpu_depopulate(iucv_param, cpu);
544 percpu_depopulate(iucv_irq_data, cpu);
545 break;
546 case CPU_ONLINE:
547 case CPU_DOWN_FAILED:
548 smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
549 break;
550 case CPU_DOWN_PREPARE:
551 cpumask = iucv_buffer_cpumask;
552 cpu_clear(cpu, cpumask);
553 if (cpus_empty(cpumask))
554 /* Can't offline last IUCV enabled cpu. */
555 return NOTIFY_BAD;
556 smp_call_function_on(iucv_retrieve_cpu, NULL, 0, 1, cpu);
557 if (cpus_empty(iucv_irq_cpumask))
558 smp_call_function_on(iucv_allow_cpu, NULL, 0, 1,
559 first_cpu(iucv_buffer_cpumask));
560 break;
561 }
562 return NOTIFY_OK;
563}
564
565static struct notifier_block iucv_cpu_notifier = {
566 .notifier_call = iucv_cpu_notify,
567};
568#endif
569
570/**
571 * iucv_sever_pathid
572 * @pathid: path identification number.
573 * @userdata: 16-bytes of user data.
574 *
575 * Sever an iucv path to free up the pathid. Used internally.
576 */
577static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
578{
579 union iucv_param *parm;
580
581 parm = percpu_ptr(iucv_param, smp_processor_id());
582 memset(parm, 0, sizeof(union iucv_param));
583 if (userdata)
584 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
585 parm->ctrl.ippathid = pathid;
586 return iucv_call_b2f0(IUCV_SEVER, parm);
587}
588
589/**
590 * __iucv_cleanup_pathid
591 * @dummy: unused dummy argument
592 *
593 * Nop function called via smp_call_function to force work items from
594 * pending external iucv interrupts to the work queue.
595 */
596static void __iucv_cleanup_pathid(void *dummy)
597{
598}
599
600/**
601 * iucv_cleanup_pathid
602 * @pathid: 16 bit pathid
603 *
604 * Function called after a path has been severed to find all remaining
605 * work items for the now stale pathid. The caller needs to hold the
606 * iucv_table_lock.
607 */
608static void iucv_cleanup_pathid(u16 pathid)
609{
610 struct iucv_work *p, *n;
611
612 /*
613 * Path is severed, the pathid can be reused immediatly on
614 * a iucv connect or a connection pending interrupt.
615 * iucv_path_connect and connection pending interrupt will
616 * wait until the iucv_table_lock is released before the
617 * recycled pathid enters the system.
618 * Force remaining interrupts to the work queue, then
619 * scan the work queue for items of this path.
620 */
621 smp_call_function(__iucv_cleanup_pathid, NULL, 0, 1);
622 spin_lock_irq(&iucv_work_lock);
623 list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
624 /* Remove work items for pathid except connection pending */
625 if (p->data.ippathid == pathid && p->data.iptype != 0x01) {
626 list_del(&p->list);
627 kfree(p);
628 }
629 }
630 spin_unlock_irq(&iucv_work_lock);
631}
632
633/**
634 * iucv_register:
635 * @handler: address of iucv handler structure
636 * @smp: != 0 indicates that the handler can deal with out of order messages
637 *
638 * Registers a driver with IUCV.
639 *
640 * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
641 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
642 */
643int iucv_register(struct iucv_handler *handler, int smp)
644{
645 int rc;
646
647 if (!iucv_available)
648 return -ENOSYS;
649 mutex_lock(&iucv_register_mutex);
650 if (!smp)
651 iucv_nonsmp_handler++;
652 if (list_empty(&iucv_handler_list)) {
653 rc = iucv_enable();
654 if (rc)
655 goto out_mutex;
656 } else if (!smp && iucv_nonsmp_handler == 1)
657 iucv_setmask_up();
658 INIT_LIST_HEAD(&handler->paths);
659
660 spin_lock_irq(&iucv_table_lock);
661 list_add_tail(&handler->list, &iucv_handler_list);
662 spin_unlock_irq(&iucv_table_lock);
663 rc = 0;
664out_mutex:
665 mutex_unlock(&iucv_register_mutex);
666 return rc;
667}
668
669/**
670 * iucv_unregister
671 * @handler: address of iucv handler structure
672 * @smp: != 0 indicates that the handler can deal with out of order messages
673 *
674 * Unregister driver from IUCV.
675 */
676void iucv_unregister(struct iucv_handler *handler, int smp)
677{
678 struct iucv_path *p, *n;
679
680 mutex_lock(&iucv_register_mutex);
681 spin_lock_bh(&iucv_table_lock);
682 /* Remove handler from the iucv_handler_list. */
683 list_del_init(&handler->list);
684 /* Sever all pathids still refering to the handler. */
685 list_for_each_entry_safe(p, n, &handler->paths, list) {
686 iucv_sever_pathid(p->pathid, NULL);
687 iucv_path_table[p->pathid] = NULL;
688 list_del(&p->list);
689 iucv_cleanup_pathid(p->pathid);
690 iucv_path_free(p);
691 }
692 spin_unlock_bh(&iucv_table_lock);
693 if (!smp)
694 iucv_nonsmp_handler--;
695 if (list_empty(&iucv_handler_list))
696 iucv_disable();
697 else if (!smp && iucv_nonsmp_handler == 0)
698 iucv_setmask_mp();
699 mutex_unlock(&iucv_register_mutex);
700}
701
702/**
703 * iucv_path_accept
704 * @path: address of iucv path structure
705 * @handler: address of iucv handler structure
706 * @userdata: 16 bytes of data reflected to the communication partner
707 * @private: private data passed to interrupt handlers for this path
708 *
709 * This function is issued after the user received a connection pending
710 * external interrupt and now wishes to complete the IUCV communication path.
711 *
712 * Returns the result of the CP IUCV call.
713 */
714int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
715 u8 userdata[16], void *private)
716{
717 union iucv_param *parm;
718 int rc;
719
720 local_bh_disable();
721 /* Prepare parameter block. */
722 parm = percpu_ptr(iucv_param, smp_processor_id());
723 memset(parm, 0, sizeof(union iucv_param));
724 parm->ctrl.ippathid = path->pathid;
725 parm->ctrl.ipmsglim = path->msglim;
726 if (userdata)
727 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
728 parm->ctrl.ipflags1 = path->flags;
729
730 rc = iucv_call_b2f0(IUCV_ACCEPT, parm);
731 if (!rc) {
732 path->private = private;
733 path->msglim = parm->ctrl.ipmsglim;
734 path->flags = parm->ctrl.ipflags1;
735 }
736 local_bh_enable();
737 return rc;
738}
739
740/**
741 * iucv_path_connect
742 * @path: address of iucv path structure
743 * @handler: address of iucv handler structure
744 * @userid: 8-byte user identification
745 * @system: 8-byte target system identification
746 * @userdata: 16 bytes of data reflected to the communication partner
747 * @private: private data passed to interrupt handlers for this path
748 *
749 * This function establishes an IUCV path. Although the connect may complete
750 * successfully, you are not able to use the path until you receive an IUCV
751 * Connection Complete external interrupt.
752 *
753 * Returns the result of the CP IUCV call.
754 */
755int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
756 u8 userid[8], u8 system[8], u8 userdata[16],
757 void *private)
758{
759 union iucv_param *parm;
760 int rc;
761
762 preempt_disable();
763 if (iucv_tasklet_cpu != smp_processor_id())
764 spin_lock_bh(&iucv_table_lock);
765 parm = percpu_ptr(iucv_param, smp_processor_id());
766 memset(parm, 0, sizeof(union iucv_param));
767 parm->ctrl.ipmsglim = path->msglim;
768 parm->ctrl.ipflags1 = path->flags;
769 if (userid) {
770 memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid));
771 ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
772 EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
773 }
774 if (system) {
775 memcpy(parm->ctrl.iptarget, system,
776 sizeof(parm->ctrl.iptarget));
777 ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
778 EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
779 }
780 if (userdata)
781 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
782
783 rc = iucv_call_b2f0(IUCV_CONNECT, parm);
784 if (!rc) {
785 if (parm->ctrl.ippathid < iucv_max_pathid) {
786 path->pathid = parm->ctrl.ippathid;
787 path->msglim = parm->ctrl.ipmsglim;
788 path->flags = parm->ctrl.ipflags1;
789 path->handler = handler;
790 path->private = private;
791 list_add_tail(&path->list, &handler->paths);
792 iucv_path_table[path->pathid] = path;
793 } else {
794 iucv_sever_pathid(parm->ctrl.ippathid,
795 iucv_error_pathid);
796 rc = -EIO;
797 }
798 }
799 if (iucv_tasklet_cpu != smp_processor_id())
800 spin_unlock_bh(&iucv_table_lock);
801 preempt_enable();
802 return rc;
803}
804
805/**
806 * iucv_path_quiesce:
807 * @path: address of iucv path structure
808 * @userdata: 16 bytes of data reflected to the communication partner
809 *
810 * This function temporarily suspends incoming messages on an IUCV path.
811 * You can later reactivate the path by invoking the iucv_resume function.
812 *
813 * Returns the result from the CP IUCV call.
814 */
815int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
816{
817 union iucv_param *parm;
818 int rc;
819
820 local_bh_disable();
821 parm = percpu_ptr(iucv_param, smp_processor_id());
822 memset(parm, 0, sizeof(union iucv_param));
823 if (userdata)
824 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
825 parm->ctrl.ippathid = path->pathid;
826 rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
827 local_bh_enable();
828 return rc;
829}
830
831/**
832 * iucv_path_resume:
833 * @path: address of iucv path structure
834 * @userdata: 16 bytes of data reflected to the communication partner
835 *
836 * This function resumes incoming messages on an IUCV path that has
837 * been stopped with iucv_path_quiesce.
838 *
839 * Returns the result from the CP IUCV call.
840 */
841int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
842{
843 union iucv_param *parm;
844 int rc;
845
846 local_bh_disable();
847 parm = percpu_ptr(iucv_param, smp_processor_id());
848 memset(parm, 0, sizeof(union iucv_param));
849 if (userdata)
850 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
851 parm->ctrl.ippathid = path->pathid;
852 rc = iucv_call_b2f0(IUCV_RESUME, parm);
853 local_bh_enable();
854 return rc;
855}
856
857/**
858 * iucv_path_sever
859 * @path: address of iucv path structure
860 * @userdata: 16 bytes of data reflected to the communication partner
861 *
862 * This function terminates an IUCV path.
863 *
864 * Returns the result from the CP IUCV call.
865 */
866int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
867{
868 int rc;
869
870
871 preempt_disable();
872 if (iucv_tasklet_cpu != smp_processor_id())
873 spin_lock_bh(&iucv_table_lock);
874 rc = iucv_sever_pathid(path->pathid, userdata);
875 if (!rc) {
876 iucv_path_table[path->pathid] = NULL;
877 list_del_init(&path->list);
878 iucv_cleanup_pathid(path->pathid);
879 }
880 if (iucv_tasklet_cpu != smp_processor_id())
881 spin_unlock_bh(&iucv_table_lock);
882 preempt_enable();
883 return rc;
884}
885
886/**
887 * iucv_message_purge
888 * @path: address of iucv path structure
889 * @msg: address of iucv msg structure
890 * @srccls: source class of message
891 *
892 * Cancels a message you have sent.
893 *
894 * Returns the result from the CP IUCV call.
895 */
896int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
897 u32 srccls)
898{
899 union iucv_param *parm;
900 int rc;
901
902 local_bh_disable();
903 parm = percpu_ptr(iucv_param, smp_processor_id());
904 memset(parm, 0, sizeof(union iucv_param));
905 parm->purge.ippathid = path->pathid;
906 parm->purge.ipmsgid = msg->id;
907 parm->purge.ipsrccls = srccls;
908 parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID;
909 rc = iucv_call_b2f0(IUCV_PURGE, parm);
910 if (!rc) {
911 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
912 msg->tag = parm->purge.ipmsgtag;
913 }
914 local_bh_enable();
915 return rc;
916}
917
918/**
919 * iucv_message_receive
920 * @path: address of iucv path structure
921 * @msg: address of iucv msg structure
922 * @flags: how the message is received (IUCV_IPBUFLST)
923 * @buffer: address of data buffer or address of struct iucv_array
924 * @size: length of data buffer
925 * @residual:
926 *
927 * This function receives messages that are being sent to you over
928 * established paths. This function will deal with RMDATA messages
929 * embedded in struct iucv_message as well.
930 *
931 * Returns the result from the CP IUCV call.
932 */
933int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
934 u8 flags, void *buffer, size_t size, size_t *residual)
935{
936 union iucv_param *parm;
937 struct iucv_array *array;
938 u8 *rmmsg;
939 size_t copy;
940 int rc;
941
942 if (msg->flags & IUCV_IPRMDATA) {
943 /*
944 * Message is 8 bytes long and has been stored to the
945 * message descriptor itself.
946 */
947 rc = (size < 8) ? 5 : 0;
948 if (residual)
949 *residual = abs(size - 8);
950 rmmsg = msg->rmmsg;
951 if (flags & IUCV_IPBUFLST) {
952 /* Copy to struct iucv_array. */
953 size = (size < 8) ? size : 8;
954 for (array = buffer; size > 0; array++) {
955 copy = min_t(size_t, size, array->length);
956 memcpy((u8 *)(addr_t) array->address,
957 rmmsg, copy);
958 rmmsg += copy;
959 size -= copy;
960 }
961 } else {
962 /* Copy to direct buffer. */
963 memcpy(buffer, rmmsg, min_t(size_t, size, 8));
964 }
965 return 0;
966 }
967
968 local_bh_disable();
969 parm = percpu_ptr(iucv_param, smp_processor_id());
970 memset(parm, 0, sizeof(union iucv_param));
971 parm->db.ipbfadr1 = (u32)(addr_t) buffer;
972 parm->db.ipbfln1f = (u32) size;
973 parm->db.ipmsgid = msg->id;
974 parm->db.ippathid = path->pathid;
975 parm->db.iptrgcls = msg->class;
976 parm->db.ipflags1 = (flags | IUCV_IPFGPID |
977 IUCV_IPFGMID | IUCV_IPTRGCLS);
978 rc = iucv_call_b2f0(IUCV_RECEIVE, parm);
979 if (!rc || rc == 5) {
980 msg->flags = parm->db.ipflags1;
981 if (residual)
982 *residual = parm->db.ipbfln1f;
983 }
984 local_bh_enable();
985 return rc;
986}
987
988/**
989 * iucv_message_reject
990 * @path: address of iucv path structure
991 * @msg: address of iucv msg structure
992 *
993 * The reject function refuses a specified message. Between the time you
994 * are notified of a message and the time that you complete the message,
995 * the message may be rejected.
996 *
997 * Returns the result from the CP IUCV call.
998 */
999int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1000{
1001 union iucv_param *parm;
1002 int rc;
1003
1004 local_bh_disable();
1005 parm = percpu_ptr(iucv_param, smp_processor_id());
1006 memset(parm, 0, sizeof(union iucv_param));
1007 parm->db.ippathid = path->pathid;
1008 parm->db.ipmsgid = msg->id;
1009 parm->db.iptrgcls = msg->class;
1010 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
1011 rc = iucv_call_b2f0(IUCV_REJECT, parm);
1012 local_bh_enable();
1013 return rc;
1014}
1015
1016/**
1017 * iucv_message_reply
1018 * @path: address of iucv path structure
1019 * @msg: address of iucv msg structure
1020 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1021 * @reply: address of reply data buffer or address of struct iucv_array
1022 * @size: length of reply data buffer
1023 *
1024 * This function responds to the two-way messages that you receive. You
1025 * must identify completely the message to which you wish to reply. ie,
1026 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
1027 * the parameter list.
1028 *
1029 * Returns the result from the CP IUCV call.
1030 */
1031int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1032 u8 flags, void *reply, size_t size)
1033{
1034 union iucv_param *parm;
1035 int rc;
1036
1037 local_bh_disable();
1038 parm = percpu_ptr(iucv_param, smp_processor_id());
1039 memset(parm, 0, sizeof(union iucv_param));
1040 if (flags & IUCV_IPRMDATA) {
1041 parm->dpl.ippathid = path->pathid;
1042 parm->dpl.ipflags1 = flags;
1043 parm->dpl.ipmsgid = msg->id;
1044 parm->dpl.iptrgcls = msg->class;
1045 memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
1046 } else {
1047 parm->db.ipbfadr1 = (u32)(addr_t) reply;
1048 parm->db.ipbfln1f = (u32) size;
1049 parm->db.ippathid = path->pathid;
1050 parm->db.ipflags1 = flags;
1051 parm->db.ipmsgid = msg->id;
1052 parm->db.iptrgcls = msg->class;
1053 }
1054 rc = iucv_call_b2f0(IUCV_REPLY, parm);
1055 local_bh_enable();
1056 return rc;
1057}
1058
1059/**
1060 * iucv_message_send
1061 * @path: address of iucv path structure
1062 * @msg: address of iucv msg structure
1063 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1064 * @srccls: source class of message
1065 * @buffer: address of send buffer or address of struct iucv_array
1066 * @size: length of send buffer
1067 *
1068 * This function transmits data to another application. Data to be
1069 * transmitted is in a buffer and this is a one-way message and the
1070 * receiver will not reply to the message.
1071 *
1072 * Returns the result from the CP IUCV call.
1073 */
1074int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1075 u8 flags, u32 srccls, void *buffer, size_t size)
1076{
1077 union iucv_param *parm;
1078 int rc;
1079
1080 local_bh_disable();
1081 parm = percpu_ptr(iucv_param, smp_processor_id());
1082 memset(parm, 0, sizeof(union iucv_param));
1083 if (flags & IUCV_IPRMDATA) {
1084 /* Message of 8 bytes can be placed into the parameter list. */
1085 parm->dpl.ippathid = path->pathid;
1086 parm->dpl.ipflags1 = flags | IUCV_IPNORPY;
1087 parm->dpl.iptrgcls = msg->class;
1088 parm->dpl.ipsrccls = srccls;
1089 parm->dpl.ipmsgtag = msg->tag;
1090 memcpy(parm->dpl.iprmmsg, buffer, 8);
1091 } else {
1092 parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1093 parm->db.ipbfln1f = (u32) size;
1094 parm->db.ippathid = path->pathid;
1095 parm->db.ipflags1 = flags | IUCV_IPNORPY;
1096 parm->db.iptrgcls = msg->class;
1097 parm->db.ipsrccls = srccls;
1098 parm->db.ipmsgtag = msg->tag;
1099 }
1100 rc = iucv_call_b2f0(IUCV_SEND, parm);
1101 if (!rc)
1102 msg->id = parm->db.ipmsgid;
1103 local_bh_enable();
1104 return rc;
1105}
1106
1107/**
1108 * iucv_message_send2way
1109 * @path: address of iucv path structure
1110 * @msg: address of iucv msg structure
1111 * @flags: how the message is sent and the reply is received
1112 * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
1113 * @srccls: source class of message
1114 * @buffer: address of send buffer or address of struct iucv_array
1115 * @size: length of send buffer
1116 * @ansbuf: address of answer buffer or address of struct iucv_array
1117 * @asize: size of reply buffer
1118 *
1119 * This function transmits data to another application. Data to be
1120 * transmitted is in a buffer. The receiver of the send is expected to
1121 * reply to the message and a buffer is provided into which IUCV moves
1122 * the reply to this message.
1123 *
1124 * Returns the result from the CP IUCV call.
1125 */
1126int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1127 u8 flags, u32 srccls, void *buffer, size_t size,
1128 void *answer, size_t asize, size_t *residual)
1129{
1130 union iucv_param *parm;
1131 int rc;
1132
1133 local_bh_disable();
1134 parm = percpu_ptr(iucv_param, smp_processor_id());
1135 memset(parm, 0, sizeof(union iucv_param));
1136 if (flags & IUCV_IPRMDATA) {
1137 parm->dpl.ippathid = path->pathid;
1138 parm->dpl.ipflags1 = path->flags; /* priority message */
1139 parm->dpl.iptrgcls = msg->class;
1140 parm->dpl.ipsrccls = srccls;
1141 parm->dpl.ipmsgtag = msg->tag;
1142 parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
1143 parm->dpl.ipbfln2f = (u32) asize;
1144 memcpy(parm->dpl.iprmmsg, buffer, 8);
1145 } else {
1146 parm->db.ippathid = path->pathid;
1147 parm->db.ipflags1 = path->flags; /* priority message */
1148 parm->db.iptrgcls = msg->class;
1149 parm->db.ipsrccls = srccls;
1150 parm->db.ipmsgtag = msg->tag;
1151 parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1152 parm->db.ipbfln1f = (u32) size;
1153 parm->db.ipbfadr2 = (u32)(addr_t) answer;
1154 parm->db.ipbfln2f = (u32) asize;
1155 }
1156 rc = iucv_call_b2f0(IUCV_SEND, parm);
1157 if (!rc)
1158 msg->id = parm->db.ipmsgid;
1159 local_bh_enable();
1160 return rc;
1161}
1162
1163/**
1164 * iucv_path_pending
1165 * @data: Pointer to external interrupt buffer
1166 *
1167 * Process connection pending work item. Called from tasklet while holding
1168 * iucv_table_lock.
1169 */
1170struct iucv_path_pending {
1171 u16 ippathid;
1172 u8 ipflags1;
1173 u8 iptype;
1174 u16 ipmsglim;
1175 u16 res1;
1176 u8 ipvmid[8];
1177 u8 ipuser[16];
1178 u32 res3;
1179 u8 ippollfg;
1180 u8 res4[3];
1181} __attribute__ ((packed));
1182
1183static void iucv_path_pending(struct iucv_irq_data *data)
1184{
1185 struct iucv_path_pending *ipp = (void *) data;
1186 struct iucv_handler *handler;
1187 struct iucv_path *path;
1188 char *error;
1189
1190 BUG_ON(iucv_path_table[ipp->ippathid]);
1191 /* New pathid, handler found. Create a new path struct. */
1192 error = iucv_error_no_memory;
1193 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC);
1194 if (!path)
1195 goto out_sever;
1196 path->pathid = ipp->ippathid;
1197 iucv_path_table[path->pathid] = path;
1198 EBCASC(ipp->ipvmid, 8);
1199
1200 /* Call registered handler until one is found that wants the path. */
1201 list_for_each_entry(handler, &iucv_handler_list, list) {
1202 if (!handler->path_pending)
1203 continue;
1204 /*
1205 * Add path to handler to allow a call to iucv_path_sever
1206 * inside the path_pending function. If the handler returns
1207 * an error remove the path from the handler again.
1208 */
1209 list_add(&path->list, &handler->paths);
1210 path->handler = handler;
1211 if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser))
1212 return;
1213 list_del(&path->list);
1214 path->handler = NULL;
1215 }
1216 /* No handler wanted the path. */
1217 iucv_path_table[path->pathid] = NULL;
1218 iucv_path_free(path);
1219 error = iucv_error_no_listener;
1220out_sever:
1221 iucv_sever_pathid(ipp->ippathid, error);
1222}
1223
1224/**
1225 * iucv_path_complete
1226 * @data: Pointer to external interrupt buffer
1227 *
1228 * Process connection complete work item. Called from tasklet while holding
1229 * iucv_table_lock.
1230 */
1231struct iucv_path_complete {
1232 u16 ippathid;
1233 u8 ipflags1;
1234 u8 iptype;
1235 u16 ipmsglim;
1236 u16 res1;
1237 u8 res2[8];
1238 u8 ipuser[16];
1239 u32 res3;
1240 u8 ippollfg;
1241 u8 res4[3];
1242} __attribute__ ((packed));
1243
1244static void iucv_path_complete(struct iucv_irq_data *data)
1245{
1246 struct iucv_path_complete *ipc = (void *) data;
1247 struct iucv_path *path = iucv_path_table[ipc->ippathid];
1248
1249 BUG_ON(!path || !path->handler);
1250 if (path->handler->path_complete)
1251 path->handler->path_complete(path, ipc->ipuser);
1252}
1253
1254/**
1255 * iucv_path_severed
1256 * @data: Pointer to external interrupt buffer
1257 *
1258 * Process connection severed work item. Called from tasklet while holding
1259 * iucv_table_lock.
1260 */
1261struct iucv_path_severed {
1262 u16 ippathid;
1263 u8 res1;
1264 u8 iptype;
1265 u32 res2;
1266 u8 res3[8];
1267 u8 ipuser[16];
1268 u32 res4;
1269 u8 ippollfg;
1270 u8 res5[3];
1271} __attribute__ ((packed));
1272
1273static void iucv_path_severed(struct iucv_irq_data *data)
1274{
1275 struct iucv_path_severed *ips = (void *) data;
1276 struct iucv_path *path = iucv_path_table[ips->ippathid];
1277
1278 BUG_ON(!path || !path->handler);
1279 if (path->handler->path_severed)
1280 path->handler->path_severed(path, ips->ipuser);
1281 else {
1282 iucv_sever_pathid(path->pathid, NULL);
1283 iucv_path_table[path->pathid] = NULL;
1284 list_del_init(&path->list);
1285 iucv_cleanup_pathid(path->pathid);
1286 iucv_path_free(path);
1287 }
1288}
1289
1290/**
1291 * iucv_path_quiesced
1292 * @data: Pointer to external interrupt buffer
1293 *
1294 * Process connection quiesced work item. Called from tasklet while holding
1295 * iucv_table_lock.
1296 */
1297struct iucv_path_quiesced {
1298 u16 ippathid;
1299 u8 res1;
1300 u8 iptype;
1301 u32 res2;
1302 u8 res3[8];
1303 u8 ipuser[16];
1304 u32 res4;
1305 u8 ippollfg;
1306 u8 res5[3];
1307} __attribute__ ((packed));
1308
1309static void iucv_path_quiesced(struct iucv_irq_data *data)
1310{
1311 struct iucv_path_quiesced *ipq = (void *) data;
1312 struct iucv_path *path = iucv_path_table[ipq->ippathid];
1313
1314 BUG_ON(!path || !path->handler);
1315 if (path->handler->path_quiesced)
1316 path->handler->path_quiesced(path, ipq->ipuser);
1317}
1318
1319/**
1320 * iucv_path_resumed
1321 * @data: Pointer to external interrupt buffer
1322 *
1323 * Process connection resumed work item. Called from tasklet while holding
1324 * iucv_table_lock.
1325 */
1326struct iucv_path_resumed {
1327 u16 ippathid;
1328 u8 res1;
1329 u8 iptype;
1330 u32 res2;
1331 u8 res3[8];
1332 u8 ipuser[16];
1333 u32 res4;
1334 u8 ippollfg;
1335 u8 res5[3];
1336} __attribute__ ((packed));
1337
1338static void iucv_path_resumed(struct iucv_irq_data *data)
1339{
1340 struct iucv_path_resumed *ipr = (void *) data;
1341 struct iucv_path *path = iucv_path_table[ipr->ippathid];
1342
1343 BUG_ON(!path || !path->handler);
1344 if (path->handler->path_resumed)
1345 path->handler->path_resumed(path, ipr->ipuser);
1346}
1347
1348/**
1349 * iucv_message_complete
1350 * @data: Pointer to external interrupt buffer
1351 *
1352 * Process message complete work item. Called from tasklet while holding
1353 * iucv_table_lock.
1354 */
1355struct iucv_message_complete {
1356 u16 ippathid;
1357 u8 ipflags1;
1358 u8 iptype;
1359 u32 ipmsgid;
1360 u32 ipaudit;
1361 u8 iprmmsg[8];
1362 u32 ipsrccls;
1363 u32 ipmsgtag;
1364 u32 res;
1365 u32 ipbfln2f;
1366 u8 ippollfg;
1367 u8 res2[3];
1368} __attribute__ ((packed));
1369
1370static void iucv_message_complete(struct iucv_irq_data *data)
1371{
1372 struct iucv_message_complete *imc = (void *) data;
1373 struct iucv_path *path = iucv_path_table[imc->ippathid];
1374 struct iucv_message msg;
1375
1376 BUG_ON(!path || !path->handler);
1377 if (path->handler->message_complete) {
1378 msg.flags = imc->ipflags1;
1379 msg.id = imc->ipmsgid;
1380 msg.audit = imc->ipaudit;
1381 memcpy(msg.rmmsg, imc->iprmmsg, 8);
1382 msg.class = imc->ipsrccls;
1383 msg.tag = imc->ipmsgtag;
1384 msg.length = imc->ipbfln2f;
1385 path->handler->message_complete(path, &msg);
1386 }
1387}
1388
1389/**
1390 * iucv_message_pending
1391 * @data: Pointer to external interrupt buffer
1392 *
1393 * Process message pending work item. Called from tasklet while holding
1394 * iucv_table_lock.
1395 */
1396struct iucv_message_pending {
1397 u16 ippathid;
1398 u8 ipflags1;
1399 u8 iptype;
1400 u32 ipmsgid;
1401 u32 iptrgcls;
1402 union {
1403 u32 iprmmsg1_u32;
1404 u8 iprmmsg1[4];
1405 } ln1msg1;
1406 union {
1407 u32 ipbfln1f;
1408 u8 iprmmsg2[4];
1409 } ln1msg2;
1410 u32 res1[3];
1411 u32 ipbfln2f;
1412 u8 ippollfg;
1413 u8 res2[3];
1414} __attribute__ ((packed));
1415
1416static void iucv_message_pending(struct iucv_irq_data *data)
1417{
1418 struct iucv_message_pending *imp = (void *) data;
1419 struct iucv_path *path = iucv_path_table[imp->ippathid];
1420 struct iucv_message msg;
1421
1422 BUG_ON(!path || !path->handler);
1423 if (path->handler->message_pending) {
1424 msg.flags = imp->ipflags1;
1425 msg.id = imp->ipmsgid;
1426 msg.class = imp->iptrgcls;
1427 if (imp->ipflags1 & IUCV_IPRMDATA) {
1428 memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
1429 msg.length = 8;
1430 } else
1431 msg.length = imp->ln1msg2.ipbfln1f;
1432 msg.reply_size = imp->ipbfln2f;
1433 path->handler->message_pending(path, &msg);
1434 }
1435}
1436
1437/**
1438 * iucv_tasklet_handler:
1439 *
1440 * This tasklet loops over the queue of irq buffers created by
1441 * iucv_external_interrupt, calls the appropriate action handler
1442 * and then frees the buffer.
1443 */
1444static void iucv_tasklet_handler(unsigned long ignored)
1445{
1446 typedef void iucv_irq_fn(struct iucv_irq_data *);
1447 static iucv_irq_fn *irq_fn[] = {
1448 [0x01] = iucv_path_pending,
1449 [0x02] = iucv_path_complete,
1450 [0x03] = iucv_path_severed,
1451 [0x04] = iucv_path_quiesced,
1452 [0x05] = iucv_path_resumed,
1453 [0x06] = iucv_message_complete,
1454 [0x07] = iucv_message_complete,
1455 [0x08] = iucv_message_pending,
1456 [0x09] = iucv_message_pending,
1457 };
1458 struct iucv_work *p;
1459
1460 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1461 spin_lock(&iucv_table_lock);
1462 iucv_tasklet_cpu = smp_processor_id();
1463
1464 spin_lock_irq(&iucv_work_lock);
1465 while (!list_empty(&iucv_work_queue)) {
1466 p = list_entry(iucv_work_queue.next, struct iucv_work, list);
1467 list_del_init(&p->list);
1468 spin_unlock_irq(&iucv_work_lock);
1469 irq_fn[p->data.iptype](&p->data);
1470 kfree(p);
1471 spin_lock_irq(&iucv_work_lock);
1472 }
1473 spin_unlock_irq(&iucv_work_lock);
1474
1475 iucv_tasklet_cpu = -1;
1476 spin_unlock(&iucv_table_lock);
1477}
1478
1479/**
1480 * iucv_external_interrupt
1481 * @code: irq code
1482 *
1483 * Handles external interrupts coming in from CP.
1484 * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
1485 */
1486static void iucv_external_interrupt(u16 code)
1487{
1488 struct iucv_irq_data *p;
1489 struct iucv_work *work;
1490
1491 p = percpu_ptr(iucv_irq_data, smp_processor_id());
1492 if (p->ippathid >= iucv_max_pathid) {
1493 printk(KERN_WARNING "iucv_do_int: Got interrupt with "
1494 "pathid %d > max_connections (%ld)\n",
1495 p->ippathid, iucv_max_pathid - 1);
1496 iucv_sever_pathid(p->ippathid, iucv_error_no_listener);
1497 return;
1498 }
1499 if (p->iptype < 0x01 || p->iptype > 0x09) {
1500 printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n");
1501 return;
1502 }
1503 work = kmalloc(sizeof(struct iucv_work), GFP_ATOMIC);
1504 if (!work) {
1505 printk(KERN_WARNING "iucv_external_interrupt: out of memory\n");
1506 return;
1507 }
1508 memcpy(&work->data, p, sizeof(work->data));
1509 spin_lock(&iucv_work_lock);
1510 list_add_tail(&work->list, &iucv_work_queue);
1511 spin_unlock(&iucv_work_lock);
1512 tasklet_schedule(&iucv_tasklet);
1513}
1514
1515/**
1516 * iucv_init
1517 *
1518 * Allocates and initializes various data structures.
1519 */
1520static int iucv_init(void)
1521{
1522 int rc;
1523
1524 if (!MACHINE_IS_VM) {
1525 rc = -EPROTONOSUPPORT;
1526 goto out;
1527 }
1528 rc = iucv_query_maxconn();
1529 if (rc)
1530 goto out;
1531 rc = register_external_interrupt (0x4000, iucv_external_interrupt);
1532 if (rc)
1533 goto out;
1534 rc = bus_register(&iucv_bus);
1535 if (rc)
1536 goto out_int;
1537 iucv_root = s390_root_dev_register("iucv");
1538 if (IS_ERR(iucv_root)) {
1539 rc = PTR_ERR(iucv_root);
1540 goto out_bus;
1541 }
1542 /* Note: GFP_DMA used used to get memory below 2G */
1543 iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data),
1544 GFP_KERNEL|GFP_DMA);
1545 if (!iucv_irq_data) {
1546 rc = -ENOMEM;
1547 goto out_root;
1548 }
1549 /* Allocate parameter blocks. */
1550 iucv_param = percpu_alloc(sizeof(union iucv_param),
1551 GFP_KERNEL|GFP_DMA);
1552 if (!iucv_param) {
1553 rc = -ENOMEM;
1554 goto out_extint;
1555 }
1556 register_hotcpu_notifier(&iucv_cpu_notifier);
1557 ASCEBC(iucv_error_no_listener, 16);
1558 ASCEBC(iucv_error_no_memory, 16);
1559 ASCEBC(iucv_error_pathid, 16);
1560 iucv_available = 1;
1561 return 0;
1562
1563out_extint:
1564 percpu_free(iucv_irq_data);
1565out_root:
1566 s390_root_dev_unregister(iucv_root);
1567out_bus:
1568 bus_unregister(&iucv_bus);
1569out_int:
1570 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1571out:
1572 return rc;
1573}
1574
1575/**
1576 * iucv_exit
1577 *
1578 * Frees everything allocated from iucv_init.
1579 */
1580static void iucv_exit(void)
1581{
1582 struct iucv_work *p, *n;
1583
1584 spin_lock_irq(&iucv_work_lock);
1585 list_for_each_entry_safe(p, n, &iucv_work_queue, list)
1586 kfree(p);
1587 spin_unlock_irq(&iucv_work_lock);
1588 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1589 percpu_free(iucv_param);
1590 percpu_free(iucv_irq_data);
1591 s390_root_dev_unregister(iucv_root);
1592 bus_unregister(&iucv_bus);
1593 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1594}
1595
1596subsys_initcall(iucv_init);
1597module_exit(iucv_exit);
1598
1599/**
1600 * Export all public stuff
1601 */
1602EXPORT_SYMBOL (iucv_bus);
1603EXPORT_SYMBOL (iucv_root);
1604EXPORT_SYMBOL (iucv_register);
1605EXPORT_SYMBOL (iucv_unregister);
1606EXPORT_SYMBOL (iucv_path_accept);
1607EXPORT_SYMBOL (iucv_path_connect);
1608EXPORT_SYMBOL (iucv_path_quiesce);
1609EXPORT_SYMBOL (iucv_path_sever);
1610EXPORT_SYMBOL (iucv_message_purge);
1611EXPORT_SYMBOL (iucv_message_receive);
1612EXPORT_SYMBOL (iucv_message_reject);
1613EXPORT_SYMBOL (iucv_message_reply);
1614EXPORT_SYMBOL (iucv_message_send);
1615EXPORT_SYMBOL (iucv_message_send2way);
1616
1617MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
1618MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
1619MODULE_LICENSE("GPL");
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5dd5094659a1..b4e444063d1f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2345,6 +2345,196 @@ out:
2345 return err; 2345 return err;
2346} 2346}
2347 2347
2348#ifdef CONFIG_NET_KEY_MIGRATE
2349static int pfkey_sockaddr_pair_size(sa_family_t family)
2350{
2351 switch (family) {
2352 case AF_INET:
2353 return PFKEY_ALIGN8(sizeof(struct sockaddr_in) * 2);
2354#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2355 case AF_INET6:
2356 return PFKEY_ALIGN8(sizeof(struct sockaddr_in6) * 2);
2357#endif
2358 default:
2359 return 0;
2360 }
2361 /* NOTREACHED */
2362}
2363
2364static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq,
2365 xfrm_address_t *saddr, xfrm_address_t *daddr,
2366 u16 *family)
2367{
2368 struct sockaddr *sa = (struct sockaddr *)(rq + 1);
2369 if (rq->sadb_x_ipsecrequest_len <
2370 pfkey_sockaddr_pair_size(sa->sa_family))
2371 return -EINVAL;
2372
2373 switch (sa->sa_family) {
2374 case AF_INET:
2375 {
2376 struct sockaddr_in *sin;
2377 sin = (struct sockaddr_in *)sa;
2378 if ((sin+1)->sin_family != AF_INET)
2379 return -EINVAL;
2380 memcpy(&saddr->a4, &sin->sin_addr, sizeof(saddr->a4));
2381 sin++;
2382 memcpy(&daddr->a4, &sin->sin_addr, sizeof(daddr->a4));
2383 *family = AF_INET;
2384 break;
2385 }
2386#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2387 case AF_INET6:
2388 {
2389 struct sockaddr_in6 *sin6;
2390 sin6 = (struct sockaddr_in6 *)sa;
2391 if ((sin6+1)->sin6_family != AF_INET6)
2392 return -EINVAL;
2393 memcpy(&saddr->a6, &sin6->sin6_addr,
2394 sizeof(saddr->a6));
2395 sin6++;
2396 memcpy(&daddr->a6, &sin6->sin6_addr,
2397 sizeof(daddr->a6));
2398 *family = AF_INET6;
2399 break;
2400 }
2401#endif
2402 default:
2403 return -EINVAL;
2404 }
2405
2406 return 0;
2407}
2408
2409static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2410 struct xfrm_migrate *m)
2411{
2412 int err;
2413 struct sadb_x_ipsecrequest *rq2;
2414
2415 if (len <= sizeof(struct sadb_x_ipsecrequest) ||
2416 len < rq1->sadb_x_ipsecrequest_len)
2417 return -EINVAL;
2418
2419 /* old endoints */
2420 err = parse_sockaddr_pair(rq1, &m->old_saddr, &m->old_daddr,
2421 &m->old_family);
2422 if (err)
2423 return err;
2424
2425 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
2426 len -= rq1->sadb_x_ipsecrequest_len;
2427
2428 if (len <= sizeof(struct sadb_x_ipsecrequest) ||
2429 len < rq2->sadb_x_ipsecrequest_len)
2430 return -EINVAL;
2431
2432 /* new endpoints */
2433 err = parse_sockaddr_pair(rq2, &m->new_saddr, &m->new_daddr,
2434 &m->new_family);
2435 if (err)
2436 return err;
2437
2438 if (rq1->sadb_x_ipsecrequest_proto != rq2->sadb_x_ipsecrequest_proto ||
2439 rq1->sadb_x_ipsecrequest_mode != rq2->sadb_x_ipsecrequest_mode ||
2440 rq1->sadb_x_ipsecrequest_reqid != rq2->sadb_x_ipsecrequest_reqid)
2441 return -EINVAL;
2442
2443 m->proto = rq1->sadb_x_ipsecrequest_proto;
2444 m->mode = rq1->sadb_x_ipsecrequest_mode - 1;
2445 m->reqid = rq1->sadb_x_ipsecrequest_reqid;
2446
2447 return ((int)(rq1->sadb_x_ipsecrequest_len +
2448 rq2->sadb_x_ipsecrequest_len));
2449}
2450
2451static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
2452 struct sadb_msg *hdr, void **ext_hdrs)
2453{
2454 int i, len, ret, err = -EINVAL;
2455 u8 dir;
2456 struct sadb_address *sa;
2457 struct sadb_x_policy *pol;
2458 struct sadb_x_ipsecrequest *rq;
2459 struct xfrm_selector sel;
2460 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2461
2462 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
2463 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
2464 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
2465 err = -EINVAL;
2466 goto out;
2467 }
2468
2469 pol = ext_hdrs[SADB_X_EXT_POLICY - 1];
2470 if (!pol) {
2471 err = -EINVAL;
2472 goto out;
2473 }
2474
2475 if (pol->sadb_x_policy_dir >= IPSEC_DIR_MAX) {
2476 err = -EINVAL;
2477 goto out;
2478 }
2479
2480 dir = pol->sadb_x_policy_dir - 1;
2481 memset(&sel, 0, sizeof(sel));
2482
2483 /* set source address info of selector */
2484 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC - 1];
2485 sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
2486 sel.prefixlen_s = sa->sadb_address_prefixlen;
2487 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
2488 sel.sport = ((struct sockaddr_in *)(sa + 1))->sin_port;
2489 if (sel.sport)
2490 sel.sport_mask = ~0;
2491
2492 /* set destination address info of selector */
2493 sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1],
2494 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
2495 sel.prefixlen_d = sa->sadb_address_prefixlen;
2496 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
2497 sel.dport = ((struct sockaddr_in *)(sa + 1))->sin_port;
2498 if (sel.dport)
2499 sel.dport_mask = ~0;
2500
2501 rq = (struct sadb_x_ipsecrequest *)(pol + 1);
2502
2503 /* extract ipsecrequests */
2504 i = 0;
2505 len = pol->sadb_x_policy_len * 8 - sizeof(struct sadb_x_policy);
2506
2507 while (len > 0 && i < XFRM_MAX_DEPTH) {
2508 ret = ipsecrequests_to_migrate(rq, len, &m[i]);
2509 if (ret < 0) {
2510 err = ret;
2511 goto out;
2512 } else {
2513 rq = (struct sadb_x_ipsecrequest *)((u8 *)rq + ret);
2514 len -= ret;
2515 i++;
2516 }
2517 }
2518
2519 if (!i || len > 0) {
2520 err = -EINVAL;
2521 goto out;
2522 }
2523
2524 return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i);
2525
2526 out:
2527 return err;
2528}
2529#else
2530static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
2531 struct sadb_msg *hdr, void **ext_hdrs)
2532{
2533 return -ENOPROTOOPT;
2534}
2535#endif
2536
2537
2348static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 2538static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
2349{ 2539{
2350 unsigned int dir; 2540 unsigned int dir;
@@ -2473,6 +2663,7 @@ static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
2473 [SADB_X_SPDFLUSH] = pfkey_spdflush, 2663 [SADB_X_SPDFLUSH] = pfkey_spdflush,
2474 [SADB_X_SPDSETIDX] = pfkey_spdadd, 2664 [SADB_X_SPDSETIDX] = pfkey_spdadd,
2475 [SADB_X_SPDDELETE2] = pfkey_spdget, 2665 [SADB_X_SPDDELETE2] = pfkey_spdget,
2666 [SADB_X_MIGRATE] = pfkey_migrate,
2476}; 2667};
2477 2668
2478static int pfkey_process(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr) 2669static int pfkey_process(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr)
@@ -3118,6 +3309,236 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3118 return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL); 3309 return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL);
3119} 3310}
3120 3311
3312#ifdef CONFIG_NET_KEY_MIGRATE
3313static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
3314 struct xfrm_selector *sel)
3315{
3316 struct sadb_address *addr;
3317 struct sockaddr_in *sin;
3318#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3319 struct sockaddr_in6 *sin6;
3320#endif
3321 addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
3322 addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8;
3323 addr->sadb_address_exttype = type;
3324 addr->sadb_address_proto = sel->proto;
3325 addr->sadb_address_reserved = 0;
3326
3327 switch (type) {
3328 case SADB_EXT_ADDRESS_SRC:
3329 if (sel->family == AF_INET) {
3330 addr->sadb_address_prefixlen = sel->prefixlen_s;
3331 sin = (struct sockaddr_in *)(addr + 1);
3332 sin->sin_family = AF_INET;
3333 memcpy(&sin->sin_addr.s_addr, &sel->saddr,
3334 sizeof(sin->sin_addr.s_addr));
3335 sin->sin_port = 0;
3336 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3337 }
3338#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3339 else if (sel->family == AF_INET6) {
3340 addr->sadb_address_prefixlen = sel->prefixlen_s;
3341 sin6 = (struct sockaddr_in6 *)(addr + 1);
3342 sin6->sin6_family = AF_INET6;
3343 sin6->sin6_port = 0;
3344 sin6->sin6_flowinfo = 0;
3345 sin6->sin6_scope_id = 0;
3346 memcpy(&sin6->sin6_addr.s6_addr, &sel->saddr,
3347 sizeof(sin6->sin6_addr.s6_addr));
3348 }
3349#endif
3350 break;
3351 case SADB_EXT_ADDRESS_DST:
3352 if (sel->family == AF_INET) {
3353 addr->sadb_address_prefixlen = sel->prefixlen_d;
3354 sin = (struct sockaddr_in *)(addr + 1);
3355 sin->sin_family = AF_INET;
3356 memcpy(&sin->sin_addr.s_addr, &sel->daddr,
3357 sizeof(sin->sin_addr.s_addr));
3358 sin->sin_port = 0;
3359 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3360 }
3361#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3362 else if (sel->family == AF_INET6) {
3363 addr->sadb_address_prefixlen = sel->prefixlen_d;
3364 sin6 = (struct sockaddr_in6 *)(addr + 1);
3365 sin6->sin6_family = AF_INET6;
3366 sin6->sin6_port = 0;
3367 sin6->sin6_flowinfo = 0;
3368 sin6->sin6_scope_id = 0;
3369 memcpy(&sin6->sin6_addr.s6_addr, &sel->daddr,
3370 sizeof(sin6->sin6_addr.s6_addr));
3371 }
3372#endif
3373 break;
3374 default:
3375 return -EINVAL;
3376 }
3377
3378 return 0;
3379}
3380
3381static int set_ipsecrequest(struct sk_buff *skb,
3382 uint8_t proto, uint8_t mode, int level,
3383 uint32_t reqid, uint8_t family,
3384 xfrm_address_t *src, xfrm_address_t *dst)
3385{
3386 struct sadb_x_ipsecrequest *rq;
3387 struct sockaddr_in *sin;
3388#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3389 struct sockaddr_in6 *sin6;
3390#endif
3391 int size_req;
3392
3393 size_req = sizeof(struct sadb_x_ipsecrequest) +
3394 pfkey_sockaddr_pair_size(family);
3395
3396 rq = (struct sadb_x_ipsecrequest *)skb_put(skb, size_req);
3397 memset(rq, 0, size_req);
3398 rq->sadb_x_ipsecrequest_len = size_req;
3399 rq->sadb_x_ipsecrequest_proto = proto;
3400 rq->sadb_x_ipsecrequest_mode = mode;
3401 rq->sadb_x_ipsecrequest_level = level;
3402 rq->sadb_x_ipsecrequest_reqid = reqid;
3403
3404 switch (family) {
3405 case AF_INET:
3406 sin = (struct sockaddr_in *)(rq + 1);
3407 sin->sin_family = AF_INET;
3408 memcpy(&sin->sin_addr.s_addr, src,
3409 sizeof(sin->sin_addr.s_addr));
3410 sin++;
3411 sin->sin_family = AF_INET;
3412 memcpy(&sin->sin_addr.s_addr, dst,
3413 sizeof(sin->sin_addr.s_addr));
3414 break;
3415#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3416 case AF_INET6:
3417 sin6 = (struct sockaddr_in6 *)(rq + 1);
3418 sin6->sin6_family = AF_INET6;
3419 sin6->sin6_port = 0;
3420 sin6->sin6_flowinfo = 0;
3421 sin6->sin6_scope_id = 0;
3422 memcpy(&sin6->sin6_addr.s6_addr, src,
3423 sizeof(sin6->sin6_addr.s6_addr));
3424 sin6++;
3425 sin6->sin6_family = AF_INET6;
3426 sin6->sin6_port = 0;
3427 sin6->sin6_flowinfo = 0;
3428 sin6->sin6_scope_id = 0;
3429 memcpy(&sin6->sin6_addr.s6_addr, dst,
3430 sizeof(sin6->sin6_addr.s6_addr));
3431 break;
3432#endif
3433 default:
3434 return -EINVAL;
3435 }
3436
3437 return 0;
3438}
3439#endif
3440
3441#ifdef CONFIG_NET_KEY_MIGRATE
3442static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
3443 struct xfrm_migrate *m, int num_bundles)
3444{
3445 int i;
3446 int sasize_sel;
3447 int size = 0;
3448 int size_pol = 0;
3449 struct sk_buff *skb;
3450 struct sadb_msg *hdr;
3451 struct sadb_x_policy *pol;
3452 struct xfrm_migrate *mp;
3453
3454 if (type != XFRM_POLICY_TYPE_MAIN)
3455 return 0;
3456
3457 if (num_bundles <= 0 || num_bundles > XFRM_MAX_DEPTH)
3458 return -EINVAL;
3459
3460 /* selector */
3461 sasize_sel = pfkey_sockaddr_size(sel->family);
3462 if (!sasize_sel)
3463 return -EINVAL;
3464 size += (sizeof(struct sadb_address) + sasize_sel) * 2;
3465
3466 /* policy info */
3467 size_pol += sizeof(struct sadb_x_policy);
3468
3469 /* ipsecrequests */
3470 for (i = 0, mp = m; i < num_bundles; i++, mp++) {
3471 /* old locator pair */
3472 size_pol += sizeof(struct sadb_x_ipsecrequest) +
3473 pfkey_sockaddr_pair_size(mp->old_family);
3474 /* new locator pair */
3475 size_pol += sizeof(struct sadb_x_ipsecrequest) +
3476 pfkey_sockaddr_pair_size(mp->new_family);
3477 }
3478
3479 size += sizeof(struct sadb_msg) + size_pol;
3480
3481 /* alloc buffer */
3482 skb = alloc_skb(size, GFP_ATOMIC);
3483 if (skb == NULL)
3484 return -ENOMEM;
3485
3486 hdr = (struct sadb_msg *)skb_put(skb, sizeof(struct sadb_msg));
3487 hdr->sadb_msg_version = PF_KEY_V2;
3488 hdr->sadb_msg_type = SADB_X_MIGRATE;
3489 hdr->sadb_msg_satype = pfkey_proto2satype(m->proto);
3490 hdr->sadb_msg_len = size / 8;
3491 hdr->sadb_msg_errno = 0;
3492 hdr->sadb_msg_reserved = 0;
3493 hdr->sadb_msg_seq = 0;
3494 hdr->sadb_msg_pid = 0;
3495
3496 /* selector src */
3497 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
3498
3499 /* selector dst */
3500 set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_DST, sel);
3501
3502 /* policy information */
3503 pol = (struct sadb_x_policy *)skb_put(skb, sizeof(struct sadb_x_policy));
3504 pol->sadb_x_policy_len = size_pol / 8;
3505 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
3506 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
3507 pol->sadb_x_policy_dir = dir + 1;
3508 pol->sadb_x_policy_id = 0;
3509 pol->sadb_x_policy_priority = 0;
3510
3511 for (i = 0, mp = m; i < num_bundles; i++, mp++) {
3512 /* old ipsecrequest */
3513 if (set_ipsecrequest(skb, mp->proto, mp->mode + 1,
3514 (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
3515 mp->reqid, mp->old_family,
3516 &mp->old_saddr, &mp->old_daddr) < 0) {
3517 return -EINVAL;
3518 }
3519
3520 /* new ipsecrequest */
3521 if (set_ipsecrequest(skb, mp->proto, mp->mode + 1,
3522 (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
3523 mp->reqid, mp->new_family,
3524 &mp->new_saddr, &mp->new_daddr) < 0) {
3525 return -EINVAL;
3526 }
3527 }
3528
3529 /* broadcast migrate message to sockets */
3530 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
3531
3532 return 0;
3533}
3534#else
3535static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
3536 struct xfrm_migrate *m, int num_bundles)
3537{
3538 return -ENOPROTOOPT;
3539}
3540#endif
3541
3121static int pfkey_sendmsg(struct kiocb *kiocb, 3542static int pfkey_sendmsg(struct kiocb *kiocb,
3122 struct socket *sock, struct msghdr *msg, size_t len) 3543 struct socket *sock, struct msghdr *msg, size_t len)
3123{ 3544{
@@ -3287,6 +3708,7 @@ static struct xfrm_mgr pfkeyv2_mgr =
3287 .compile_policy = pfkey_compile_policy, 3708 .compile_policy = pfkey_compile_policy,
3288 .new_mapping = pfkey_send_new_mapping, 3709 .new_mapping = pfkey_send_new_mapping,
3289 .notify_policy = pfkey_send_policy_notify, 3710 .notify_policy = pfkey_send_policy_notify,
3711 .migrate = pfkey_send_migrate,
3290}; 3712};
3291 3713
3292static void __exit ipsec_pfkey_exit(void) 3714static void __exit ipsec_pfkey_exit(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 80107d4909c5..748f7f00909a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -235,6 +235,19 @@ config NF_CONNTRACK_PPTP
235 235
236 To compile it as a module, choose M here. If unsure, say N. 236 To compile it as a module, choose M here. If unsure, say N.
237 237
238config NF_CONNTRACK_SANE
239 tristate "SANE protocol support (EXPERIMENTAL)"
240 depends on EXPERIMENTAL && NF_CONNTRACK
241 help
242 SANE is a protocol for remote access to scanners as implemented
243 by the 'saned' daemon. Like FTP, it uses separate control and
244 data connections.
245
246 With this module you can support SANE on a connection tracking
247 firewall.
248
249 To compile it as a module, choose M here. If unsure, say N.
250
238config NF_CONNTRACK_SIP 251config NF_CONNTRACK_SIP
239 tristate "SIP protocol support (EXPERIMENTAL)" 252 tristate "SIP protocol support (EXPERIMENTAL)"
240 depends on EXPERIMENTAL && NF_CONNTRACK 253 depends on EXPERIMENTAL && NF_CONNTRACK
@@ -382,6 +395,32 @@ config NETFILTER_XT_TARGET_CONNSECMARK
382 395
383 To compile it as a module, choose M here. If unsure, say N. 396 To compile it as a module, choose M here. If unsure, say N.
384 397
398config NETFILTER_XT_TARGET_TCPMSS
399 tristate '"TCPMSS" target support'
400 depends on NETFILTER_XTABLES && (IPV6 || IPV6=n)
401 ---help---
402 This option adds a `TCPMSS' target, which allows you to alter the
403 MSS value of TCP SYN packets, to control the maximum size for that
404 connection (usually limiting it to your outgoing interface's MTU
405 minus 40).
406
407 This is used to overcome criminally braindead ISPs or servers which
408 block ICMP Fragmentation Needed packets. The symptoms of this
409 problem are that everything works fine from your Linux
410 firewall/router, but machines behind it can never exchange large
411 packets:
412 1) Web browsers connect, then hang with no data received.
413 2) Small mail works fine, but large emails hang.
414 3) ssh works fine, but scp hangs after initial handshaking.
415
416 Workaround: activate this option and add a rule to your firewall
417 configuration like:
418
419 iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN \
420 -j TCPMSS --clamp-mss-to-pmtu
421
422 To compile it as a module, choose M here. If unsure, say N.
423
385config NETFILTER_XT_MATCH_COMMENT 424config NETFILTER_XT_MATCH_COMMENT
386 tristate '"comment" match support' 425 tristate '"comment" match support'
387 depends on NETFILTER_XTABLES 426 depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 5dc5574f7e99..b2b5c7566b26 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
29obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o 29obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
30obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o 30obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
31obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o 31obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
32obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
32obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o 33obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
33obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o 34obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
34 35
@@ -44,6 +45,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
44obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 45obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
45obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o 46obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
46obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o 47obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 49obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
48 50
49# matches 51# matches
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 626b0011dd89..6fccdcf43e08 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -60,12 +60,9 @@ static DEFINE_RWLOCK(tcp_lock);
60 If it's non-zero, we mark only out of window RST segments as INVALID. */ 60 If it's non-zero, we mark only out of window RST segments as INVALID. */
61int nf_ct_tcp_be_liberal __read_mostly = 0; 61int nf_ct_tcp_be_liberal __read_mostly = 0;
62 62
63/* When connection is picked up from the middle, how many packets are required 63/* If it is set to zero, we disable picking up already established
64 to pass in each direction when we assume we are in sync - if any side uses
65 window scaling, we lost the game.
66 If it is set to zero, we disable picking up already established
67 connections. */ 64 connections. */
68int nf_ct_tcp_loose __read_mostly = 3; 65int nf_ct_tcp_loose __read_mostly = 1;
69 66
70/* Max number of the retransmitted packets without receiving an (acceptable) 67/* Max number of the retransmitted packets without receiving an (acceptable)
71 ACK from the destination. If this number is reached, a shorter timer 68 ACK from the destination. If this number is reached, a shorter timer
@@ -650,11 +647,10 @@ static int tcp_in_window(struct ip_ct_tcp *state,
650 before(sack, receiver->td_end + 1), 647 before(sack, receiver->td_end + 1),
651 after(ack, receiver->td_end - MAXACKWINDOW(sender))); 648 after(ack, receiver->td_end - MAXACKWINDOW(sender)));
652 649
653 if (sender->loose || receiver->loose || 650 if (before(seq, sender->td_maxend + 1) &&
654 (before(seq, sender->td_maxend + 1) && 651 after(end, sender->td_end - receiver->td_maxwin - 1) &&
655 after(end, sender->td_end - receiver->td_maxwin - 1) && 652 before(sack, receiver->td_end + 1) &&
656 before(sack, receiver->td_end + 1) && 653 after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
657 after(ack, receiver->td_end - MAXACKWINDOW(sender)))) {
658 /* 654 /*
659 * Take into account window scaling (RFC 1323). 655 * Take into account window scaling (RFC 1323).
660 */ 656 */
@@ -699,15 +695,13 @@ static int tcp_in_window(struct ip_ct_tcp *state,
699 state->retrans = 0; 695 state->retrans = 0;
700 } 696 }
701 } 697 }
702 /*
703 * Close the window of disabled window tracking :-)
704 */
705 if (sender->loose)
706 sender->loose--;
707
708 res = 1; 698 res = 1;
709 } else { 699 } else {
710 if (LOG_INVALID(IPPROTO_TCP)) 700 res = 0;
701 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
702 nf_ct_tcp_be_liberal)
703 res = 1;
704 if (!res && LOG_INVALID(IPPROTO_TCP))
711 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 705 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
712 "nf_ct_tcp: %s ", 706 "nf_ct_tcp: %s ",
713 before(seq, sender->td_maxend + 1) ? 707 before(seq, sender->td_maxend + 1) ?
@@ -718,8 +712,6 @@ static int tcp_in_window(struct ip_ct_tcp *state,
718 : "ACK is over the upper bound (ACKed data not seen yet)" 712 : "ACK is over the upper bound (ACKed data not seen yet)"
719 : "SEQ is under the lower bound (already ACKed data retransmitted)" 713 : "SEQ is under the lower bound (already ACKed data retransmitted)"
720 : "SEQ is over the upper bound (over the window of the receiver)"); 714 : "SEQ is over the upper bound (over the window of the receiver)");
721
722 res = nf_ct_tcp_be_liberal;
723 } 715 }
724 716
725 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " 717 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
@@ -1063,8 +1055,6 @@ static int tcp_new(struct nf_conn *conntrack,
1063 1055
1064 tcp_options(skb, dataoff, th, &conntrack->proto.tcp.seen[0]); 1056 tcp_options(skb, dataoff, th, &conntrack->proto.tcp.seen[0]);
1065 conntrack->proto.tcp.seen[1].flags = 0; 1057 conntrack->proto.tcp.seen[1].flags = 0;
1066 conntrack->proto.tcp.seen[0].loose =
1067 conntrack->proto.tcp.seen[1].loose = 0;
1068 } else if (nf_ct_tcp_loose == 0) { 1058 } else if (nf_ct_tcp_loose == 0) {
1069 /* Don't try to pick up connections. */ 1059 /* Don't try to pick up connections. */
1070 return 0; 1060 return 0;
@@ -1085,11 +1075,11 @@ static int tcp_new(struct nf_conn *conntrack,
1085 conntrack->proto.tcp.seen[0].td_maxwin; 1075 conntrack->proto.tcp.seen[0].td_maxwin;
1086 conntrack->proto.tcp.seen[0].td_scale = 0; 1076 conntrack->proto.tcp.seen[0].td_scale = 0;
1087 1077
1088 /* We assume SACK. Should we assume window scaling too? */ 1078 /* We assume SACK and liberal window checking to handle
1079 * window scaling */
1089 conntrack->proto.tcp.seen[0].flags = 1080 conntrack->proto.tcp.seen[0].flags =
1090 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM; 1081 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1091 conntrack->proto.tcp.seen[0].loose = 1082 IP_CT_TCP_FLAG_BE_LIBERAL;
1092 conntrack->proto.tcp.seen[1].loose = nf_ct_tcp_loose;
1093 } 1083 }
1094 1084
1095 conntrack->proto.tcp.seen[1].td_end = 0; 1085 conntrack->proto.tcp.seen[1].td_end = 0;
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
new file mode 100644
index 000000000000..eb2d1dc46d45
--- /dev/null
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -0,0 +1,242 @@
1/* SANE connection tracking helper
2 * (SANE = Scanner Access Now Easy)
3 * For documentation about the SANE network protocol see
4 * http://www.sane-project.org/html/doc015.html
5 */
6
7/* Copyright (C) 2007 Red Hat, Inc.
8 * Author: Michal Schmidt <mschmidt@redhat.com>
9 * Based on the FTP conntrack helper (net/netfilter/nf_conntrack_ftp.c):
10 * (C) 1999-2001 Paul `Rusty' Russell
11 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
12 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
13 * (C) 2003 Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/netfilter.h>
23#include <linux/in.h>
24#include <linux/tcp.h>
25#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_helper.h>
27#include <net/netfilter/nf_conntrack_expect.h>
28#include <linux/netfilter/nf_conntrack_sane.h>
29
30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Michal Schmidt <mschmidt@redhat.com>");
32MODULE_DESCRIPTION("SANE connection tracking helper");
33
34static char *sane_buffer;
35
36static DEFINE_SPINLOCK(nf_sane_lock);
37
38#define MAX_PORTS 8
39static u_int16_t ports[MAX_PORTS];
40static unsigned int ports_c;
41module_param_array(ports, ushort, &ports_c, 0400);
42
43#if 0
44#define DEBUGP printk
45#else
46#define DEBUGP(format, args...)
47#endif
48
49struct sane_request {
50 __be32 RPC_code;
51#define SANE_NET_START 7 /* RPC code */
52
53 __be32 handle;
54};
55
56struct sane_reply_net_start {
57 __be32 status;
58#define SANE_STATUS_SUCCESS 0
59
60 __be16 zero;
61 __be16 port;
62 /* other fields aren't interesting for conntrack */
63};
64
65static int help(struct sk_buff **pskb,
66 unsigned int protoff,
67 struct nf_conn *ct,
68 enum ip_conntrack_info ctinfo)
69{
70 unsigned int dataoff, datalen;
71 struct tcphdr _tcph, *th;
72 char *sb_ptr;
73 int ret = NF_ACCEPT;
74 int dir = CTINFO2DIR(ctinfo);
75 struct nf_ct_sane_master *ct_sane_info;
76 struct nf_conntrack_expect *exp;
77 struct nf_conntrack_tuple *tuple;
78 struct sane_request *req;
79 struct sane_reply_net_start *reply;
80 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
81
82 ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
83 /* Until there's been traffic both ways, don't look in packets. */
84 if (ctinfo != IP_CT_ESTABLISHED &&
85 ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY)
86 return NF_ACCEPT;
87
88 /* Not a full tcp header? */
89 th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph);
90 if (th == NULL)
91 return NF_ACCEPT;
92
93 /* No data? */
94 dataoff = protoff + th->doff * 4;
95 if (dataoff >= (*pskb)->len)
96 return NF_ACCEPT;
97
98 datalen = (*pskb)->len - dataoff;
99
100 spin_lock_bh(&nf_sane_lock);
101 sb_ptr = skb_header_pointer(*pskb, dataoff, datalen, sane_buffer);
102 BUG_ON(sb_ptr == NULL);
103
104 if (dir == IP_CT_DIR_ORIGINAL) {
105 if (datalen != sizeof(struct sane_request))
106 goto out;
107
108 req = (struct sane_request *)sb_ptr;
109 if (req->RPC_code != htonl(SANE_NET_START)) {
110 /* Not an interesting command */
111 ct_sane_info->state = SANE_STATE_NORMAL;
112 goto out;
113 }
114
115 /* We're interested in the next reply */
116 ct_sane_info->state = SANE_STATE_START_REQUESTED;
117 goto out;
118 }
119
120 /* Is it a reply to an uninteresting command? */
121 if (ct_sane_info->state != SANE_STATE_START_REQUESTED)
122 goto out;
123
124 /* It's a reply to SANE_NET_START. */
125 ct_sane_info->state = SANE_STATE_NORMAL;
126
127 if (datalen < sizeof(struct sane_reply_net_start)) {
128 DEBUGP("nf_ct_sane: NET_START reply too short\n");
129 goto out;
130 }
131
132 reply = (struct sane_reply_net_start *)sb_ptr;
133 if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
134 /* saned refused the command */
135 DEBUGP("nf_ct_sane: unsuccessful SANE_STATUS = %u\n",
136 ntohl(reply->status));
137 goto out;
138 }
139
140 /* Invalid saned reply? Ignore it. */
141 if (reply->zero != 0)
142 goto out;
143
144 exp = nf_conntrack_expect_alloc(ct);
145 if (exp == NULL) {
146 ret = NF_DROP;
147 goto out;
148 }
149
150 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
151 nf_conntrack_expect_init(exp, family,
152 &tuple->src.u3, &tuple->dst.u3,
153 IPPROTO_TCP,
154 NULL, &reply->port);
155
156 DEBUGP("nf_ct_sane: expect: ");
157 NF_CT_DUMP_TUPLE(&exp->tuple);
158 NF_CT_DUMP_TUPLE(&exp->mask);
159
160 /* Can't expect this? Best to drop packet now. */
161 if (nf_conntrack_expect_related(exp) != 0)
162 ret = NF_DROP;
163
164 nf_conntrack_expect_put(exp);
165
166out:
167 spin_unlock_bh(&nf_sane_lock);
168 return ret;
169}
170
171static struct nf_conntrack_helper sane[MAX_PORTS][2];
172static char sane_names[MAX_PORTS][2][sizeof("sane-65535")];
173
174/* don't make this __exit, since it's called from __init ! */
175static void nf_conntrack_sane_fini(void)
176{
177 int i, j;
178
179 for (i = 0; i < ports_c; i++) {
180 for (j = 0; j < 2; j++) {
181 DEBUGP("nf_ct_sane: unregistering helper for pf: %d "
182 "port: %d\n",
183 sane[i][j].tuple.src.l3num, ports[i]);
184 nf_conntrack_helper_unregister(&sane[i][j]);
185 }
186 }
187
188 kfree(sane_buffer);
189}
190
191static int __init nf_conntrack_sane_init(void)
192{
193 int i, j = -1, ret = 0;
194 char *tmpname;
195
196 sane_buffer = kmalloc(65536, GFP_KERNEL);
197 if (!sane_buffer)
198 return -ENOMEM;
199
200 if (ports_c == 0)
201 ports[ports_c++] = SANE_PORT;
202
203 /* FIXME should be configurable whether IPv4 and IPv6 connections
204 are tracked or not - YK */
205 for (i = 0; i < ports_c; i++) {
206 sane[i][0].tuple.src.l3num = PF_INET;
207 sane[i][1].tuple.src.l3num = PF_INET6;
208 for (j = 0; j < 2; j++) {
209 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
210 sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
211 sane[i][j].mask.src.u.tcp.port = 0xFFFF;
212 sane[i][j].mask.dst.protonum = 0xFF;
213 sane[i][j].max_expected = 1;
214 sane[i][j].timeout = 5 * 60; /* 5 Minutes */
215 sane[i][j].me = THIS_MODULE;
216 sane[i][j].help = help;
217 tmpname = &sane_names[i][j][0];
218 if (ports[i] == SANE_PORT)
219 sprintf(tmpname, "sane");
220 else
221 sprintf(tmpname, "sane-%d", ports[i]);
222 sane[i][j].name = tmpname;
223
224 DEBUGP("nf_ct_sane: registering helper for pf: %d "
225 "port: %d\n",
226 sane[i][j].tuple.src.l3num, ports[i]);
227 ret = nf_conntrack_helper_register(&sane[i][j]);
228 if (ret) {
229 printk(KERN_ERR "nf_ct_sane: failed to "
230 "register helper for pf: %d port: %d\n",
231 sane[i][j].tuple.src.l3num, ports[i]);
232 nf_conntrack_sane_fini();
233 return ret;
234 }
235 }
236 }
237
238 return 0;
239}
240
241module_init(nf_conntrack_sane_init);
242module_exit(nf_conntrack_sane_fini);
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index 50de965bb104..195e92990da7 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -33,9 +33,7 @@ target(struct sk_buff **pskb,
33{ 33{
34 const struct xt_classify_target_info *clinfo = targinfo; 34 const struct xt_classify_target_info *clinfo = targinfo;
35 35
36 if ((*pskb)->priority != clinfo->priority) 36 (*pskb)->priority = clinfo->priority;
37 (*pskb)->priority = clinfo->priority;
38
39 return XT_CONTINUE; 37 return XT_CONTINUE;
40} 38}
41 39
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
index 0534bfa65cce..795c058b16a5 100644
--- a/net/netfilter/xt_CONNMARK.c
+++ b/net/netfilter/xt_CONNMARK.c
@@ -61,7 +61,7 @@ target(struct sk_buff **pskb,
61#else 61#else
62 nf_conntrack_event_cache(IPCT_MARK, *pskb); 62 nf_conntrack_event_cache(IPCT_MARK, *pskb);
63#endif 63#endif
64 } 64 }
65 break; 65 break;
66 case XT_CONNMARK_SAVE: 66 case XT_CONNMARK_SAVE:
67 newmark = (*ctmark & ~markinfo->mask) | 67 newmark = (*ctmark & ~markinfo->mask) |
@@ -78,8 +78,7 @@ target(struct sk_buff **pskb,
78 case XT_CONNMARK_RESTORE: 78 case XT_CONNMARK_RESTORE:
79 mark = (*pskb)->mark; 79 mark = (*pskb)->mark;
80 diff = (*ctmark ^ mark) & markinfo->mask; 80 diff = (*ctmark ^ mark) & markinfo->mask;
81 if (diff != 0) 81 (*pskb)->mark = mark ^ diff;
82 (*pskb)->mark = mark ^ diff;
83 break; 82 break;
84 } 83 }
85 } 84 }
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index a3fe3c334b09..1ab0db641f96 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -41,8 +41,7 @@ static void secmark_save(struct sk_buff *skb)
41 41
42 connsecmark = nf_ct_get_secmark(skb, &ctinfo); 42 connsecmark = nf_ct_get_secmark(skb, &ctinfo);
43 if (connsecmark && !*connsecmark) 43 if (connsecmark && !*connsecmark)
44 if (*connsecmark != skb->secmark) 44 *connsecmark = skb->secmark;
45 *connsecmark = skb->secmark;
46 } 45 }
47} 46}
48 47
@@ -58,8 +57,7 @@ static void secmark_restore(struct sk_buff *skb)
58 57
59 connsecmark = nf_ct_get_secmark(skb, &ctinfo); 58 connsecmark = nf_ct_get_secmark(skb, &ctinfo);
60 if (connsecmark && *connsecmark) 59 if (connsecmark && *connsecmark)
61 if (skb->secmark != *connsecmark) 60 skb->secmark = *connsecmark;
62 skb->secmark = *connsecmark;
63 } 61 }
64} 62}
65 63
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
index 0b48547e8d64..cfc45af357d5 100644
--- a/net/netfilter/xt_MARK.c
+++ b/net/netfilter/xt_MARK.c
@@ -31,9 +31,7 @@ target_v0(struct sk_buff **pskb,
31{ 31{
32 const struct xt_mark_target_info *markinfo = targinfo; 32 const struct xt_mark_target_info *markinfo = targinfo;
33 33
34 if((*pskb)->mark != markinfo->mark) 34 (*pskb)->mark = markinfo->mark;
35 (*pskb)->mark = markinfo->mark;
36
37 return XT_CONTINUE; 35 return XT_CONTINUE;
38} 36}
39 37
@@ -62,9 +60,7 @@ target_v1(struct sk_buff **pskb,
62 break; 60 break;
63 } 61 }
64 62
65 if((*pskb)->mark != mark) 63 (*pskb)->mark = mark;
66 (*pskb)->mark = mark;
67
68 return XT_CONTINUE; 64 return XT_CONTINUE;
69} 65}
70 66
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index add752196290..f1131c3a9db5 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -47,9 +47,7 @@ static unsigned int target(struct sk_buff **pskb, const struct net_device *in,
47 BUG(); 47 BUG();
48 } 48 }
49 49
50 if ((*pskb)->secmark != secmark) 50 (*pskb)->secmark = secmark;
51 (*pskb)->secmark = secmark;
52
53 return XT_CONTINUE; 51 return XT_CONTINUE;
54} 52}
55 53
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
new file mode 100644
index 000000000000..db7e38c08de2
--- /dev/null
+++ b/net/netfilter/xt_TCPMSS.c
@@ -0,0 +1,296 @@
1/*
2 * This is a module which is used for setting the MSS option in TCP packets.
3 *
4 * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/ip.h>
14#include <linux/ipv6.h>
15#include <linux/tcp.h>
16#include <net/ipv6.h>
17#include <net/tcp.h>
18
19#include <linux/netfilter_ipv4/ip_tables.h>
20#include <linux/netfilter_ipv6/ip6_tables.h>
21#include <linux/netfilter/x_tables.h>
22#include <linux/netfilter/xt_tcpudp.h>
23#include <linux/netfilter/xt_TCPMSS.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
27MODULE_DESCRIPTION("x_tables TCP MSS modification module");
28MODULE_ALIAS("ipt_TCPMSS");
29MODULE_ALIAS("ip6t_TCPMSS");
30
31static inline unsigned int
32optlen(const u_int8_t *opt, unsigned int offset)
33{
34 /* Beware zero-length options: make finite progress */
35 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
36 return 1;
37 else
38 return opt[offset+1];
39}
40
41static int
42tcpmss_mangle_packet(struct sk_buff **pskb,
43 const struct xt_tcpmss_info *info,
44 unsigned int tcphoff,
45 unsigned int minlen)
46{
47 struct tcphdr *tcph;
48 unsigned int tcplen, i;
49 __be16 oldval;
50 u16 newmss;
51 u8 *opt;
52
53 if (!skb_make_writable(pskb, (*pskb)->len))
54 return -1;
55
56 tcplen = (*pskb)->len - tcphoff;
57 tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff);
58
59 /* Since it passed flags test in tcp match, we know it is is
60 not a fragment, and has data >= tcp header length. SYN
61 packets should not contain data: if they did, then we risk
62 running over MTU, sending Frag Needed and breaking things
63 badly. --RR */
64 if (tcplen != tcph->doff*4) {
65 if (net_ratelimit())
66 printk(KERN_ERR "xt_TCPMSS: bad length (%u bytes)\n",
67 (*pskb)->len);
68 return -1;
69 }
70
71 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
72 if (dst_mtu((*pskb)->dst) <= minlen) {
73 if (net_ratelimit())
74 printk(KERN_ERR "xt_TCPMSS: "
75 "unknown or invalid path-MTU (%u)\n",
76 dst_mtu((*pskb)->dst));
77 return -1;
78 }
79 newmss = dst_mtu((*pskb)->dst) - minlen;
80 } else
81 newmss = info->mss;
82
83 opt = (u_int8_t *)tcph;
84 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
85 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
86 opt[i+1] == TCPOLEN_MSS) {
87 u_int16_t oldmss;
88
89 oldmss = (opt[i+2] << 8) | opt[i+3];
90
91 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
92 oldmss <= newmss)
93 return 0;
94
95 opt[i+2] = (newmss & 0xff00) >> 8;
96 opt[i+3] = (newmss & 0x00ff);
97
98 nf_proto_csum_replace2(&tcph->check, *pskb,
99 htons(oldmss), htons(newmss), 0);
100 return 0;
101 }
102 }
103
104 /*
105 * MSS Option not found ?! add it..
106 */
107 if (skb_tailroom((*pskb)) < TCPOLEN_MSS) {
108 struct sk_buff *newskb;
109
110 newskb = skb_copy_expand(*pskb, skb_headroom(*pskb),
111 TCPOLEN_MSS, GFP_ATOMIC);
112 if (!newskb)
113 return -1;
114 kfree_skb(*pskb);
115 *pskb = newskb;
116 tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff);
117 }
118
119 skb_put((*pskb), TCPOLEN_MSS);
120
121 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
122 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
123
124 nf_proto_csum_replace2(&tcph->check, *pskb,
125 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
126 opt[0] = TCPOPT_MSS;
127 opt[1] = TCPOLEN_MSS;
128 opt[2] = (newmss & 0xff00) >> 8;
129 opt[3] = (newmss & 0x00ff);
130
131 nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0);
132
133 oldval = ((__be16 *)tcph)[6];
134 tcph->doff += TCPOLEN_MSS/4;
135 nf_proto_csum_replace2(&tcph->check, *pskb,
136 oldval, ((__be16 *)tcph)[6], 0);
137 return TCPOLEN_MSS;
138}
139
140static unsigned int
141xt_tcpmss_target4(struct sk_buff **pskb,
142 const struct net_device *in,
143 const struct net_device *out,
144 unsigned int hooknum,
145 const struct xt_target *target,
146 const void *targinfo)
147{
148 struct iphdr *iph = (*pskb)->nh.iph;
149 __be16 newlen;
150 int ret;
151
152 ret = tcpmss_mangle_packet(pskb, targinfo, iph->ihl * 4,
153 sizeof(*iph) + sizeof(struct tcphdr));
154 if (ret < 0)
155 return NF_DROP;
156 if (ret > 0) {
157 iph = (*pskb)->nh.iph;
158 newlen = htons(ntohs(iph->tot_len) + ret);
159 nf_csum_replace2(&iph->check, iph->tot_len, newlen);
160 iph->tot_len = newlen;
161 }
162 return XT_CONTINUE;
163}
164
165#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
166static unsigned int
167xt_tcpmss_target6(struct sk_buff **pskb,
168 const struct net_device *in,
169 const struct net_device *out,
170 unsigned int hooknum,
171 const struct xt_target *target,
172 const void *targinfo)
173{
174 struct ipv6hdr *ipv6h = (*pskb)->nh.ipv6h;
175 u8 nexthdr;
176 int tcphoff;
177 int ret;
178
179 nexthdr = ipv6h->nexthdr;
180 tcphoff = ipv6_skip_exthdr(*pskb, sizeof(*ipv6h), &nexthdr);
181 if (tcphoff < 0) {
182 WARN_ON(1);
183 return NF_DROP;
184 }
185 ret = tcpmss_mangle_packet(pskb, targinfo, tcphoff,
186 sizeof(*ipv6h) + sizeof(struct tcphdr));
187 if (ret < 0)
188 return NF_DROP;
189 if (ret > 0) {
190 ipv6h = (*pskb)->nh.ipv6h;
191 ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret);
192 }
193 return XT_CONTINUE;
194}
195#endif
196
197#define TH_SYN 0x02
198
199/* Must specify -p tcp --syn */
200static inline int find_syn_match(const struct xt_entry_match *m)
201{
202 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
203
204 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
205 tcpinfo->flg_cmp & TH_SYN &&
206 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
207 return 1;
208
209 return 0;
210}
211
212static int
213xt_tcpmss_checkentry4(const char *tablename,
214 const void *entry,
215 const struct xt_target *target,
216 void *targinfo,
217 unsigned int hook_mask)
218{
219 const struct xt_tcpmss_info *info = targinfo;
220 const struct ipt_entry *e = entry;
221
222 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
223 (hook_mask & ~((1 << NF_IP_FORWARD) |
224 (1 << NF_IP_LOCAL_OUT) |
225 (1 << NF_IP_POST_ROUTING))) != 0) {
226 printk("xt_TCPMSS: path-MTU clamping only supported in "
227 "FORWARD, OUTPUT and POSTROUTING hooks\n");
228 return 0;
229 }
230 if (IPT_MATCH_ITERATE(e, find_syn_match))
231 return 1;
232 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
233 return 0;
234}
235
236#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
237static int
238xt_tcpmss_checkentry6(const char *tablename,
239 const void *entry,
240 const struct xt_target *target,
241 void *targinfo,
242 unsigned int hook_mask)
243{
244 const struct xt_tcpmss_info *info = targinfo;
245 const struct ip6t_entry *e = entry;
246
247 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
248 (hook_mask & ~((1 << NF_IP6_FORWARD) |
249 (1 << NF_IP6_LOCAL_OUT) |
250 (1 << NF_IP6_POST_ROUTING))) != 0) {
251 printk("xt_TCPMSS: path-MTU clamping only supported in "
252 "FORWARD, OUTPUT and POSTROUTING hooks\n");
253 return 0;
254 }
255 if (IP6T_MATCH_ITERATE(e, find_syn_match))
256 return 1;
257 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
258 return 0;
259}
260#endif
261
262static struct xt_target xt_tcpmss_reg[] = {
263 {
264 .family = AF_INET,
265 .name = "TCPMSS",
266 .checkentry = xt_tcpmss_checkentry4,
267 .target = xt_tcpmss_target4,
268 .targetsize = sizeof(struct xt_tcpmss_info),
269 .proto = IPPROTO_TCP,
270 .me = THIS_MODULE,
271 },
272#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
273 {
274 .family = AF_INET6,
275 .name = "TCPMSS",
276 .checkentry = xt_tcpmss_checkentry6,
277 .target = xt_tcpmss_target6,
278 .targetsize = sizeof(struct xt_tcpmss_info),
279 .proto = IPPROTO_TCP,
280 .me = THIS_MODULE,
281 },
282#endif
283};
284
285static int __init xt_tcpmss_init(void)
286{
287 return xt_register_targets(xt_tcpmss_reg, ARRAY_SIZE(xt_tcpmss_reg));
288}
289
290static void __exit xt_tcpmss_fini(void)
291{
292 xt_unregister_targets(xt_tcpmss_reg, ARRAY_SIZE(xt_tcpmss_reg));
293}
294
295module_init(xt_tcpmss_init);
296module_exit(xt_tcpmss_fini);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index f28bf69d3d42..bd1f7a2048d6 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -414,6 +414,7 @@ hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst,
414 switch (nexthdr) { 414 switch (nexthdr) {
415 case IPPROTO_TCP: 415 case IPPROTO_TCP:
416 case IPPROTO_UDP: 416 case IPPROTO_UDP:
417 case IPPROTO_UDPLITE:
417 case IPPROTO_SCTP: 418 case IPPROTO_SCTP:
418 case IPPROTO_DCCP: 419 case IPPROTO_DCCP:
419 ports = skb_header_pointer(skb, protoff, sizeof(_ports), 420 ports = skb_header_pointer(skb, protoff, sizeof(_ports),
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6dc01bdeb76b..a6fa48788e8f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -60,6 +60,7 @@
60#include <linux/netdevice.h> 60#include <linux/netdevice.h>
61#include <linux/if_packet.h> 61#include <linux/if_packet.h>
62#include <linux/wireless.h> 62#include <linux/wireless.h>
63#include <linux/kernel.h>
63#include <linux/kmod.h> 64#include <linux/kmod.h>
64#include <net/ip.h> 65#include <net/ip.h>
65#include <net/protocol.h> 66#include <net/protocol.h>
@@ -200,7 +201,8 @@ struct packet_sock {
200#endif 201#endif
201 struct packet_type prot_hook; 202 struct packet_type prot_hook;
202 spinlock_t bind_lock; 203 spinlock_t bind_lock;
203 char running; /* prot_hook is attached*/ 204 unsigned int running:1, /* prot_hook is attached*/
205 auxdata:1;
204 int ifindex; /* bound device */ 206 int ifindex; /* bound device */
205 __be16 num; 207 __be16 num;
206#ifdef CONFIG_PACKET_MULTICAST 208#ifdef CONFIG_PACKET_MULTICAST
@@ -214,6 +216,16 @@ struct packet_sock {
214#endif 216#endif
215}; 217};
216 218
219struct packet_skb_cb {
220 unsigned int origlen;
221 union {
222 struct sockaddr_pkt pkt;
223 struct sockaddr_ll ll;
224 } sa;
225};
226
227#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
228
217#ifdef CONFIG_PACKET_MMAP 229#ifdef CONFIG_PACKET_MMAP
218 230
219static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position) 231static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position)
@@ -293,7 +305,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
293 /* drop conntrack reference */ 305 /* drop conntrack reference */
294 nf_reset(skb); 306 nf_reset(skb);
295 307
296 spkt = (struct sockaddr_pkt*)skb->cb; 308 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
297 309
298 skb_push(skb, skb->data-skb->mac.raw); 310 skb_push(skb, skb->data-skb->mac.raw);
299 311
@@ -512,7 +524,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
512 skb = nskb; 524 skb = nskb;
513 } 525 }
514 526
515 sll = (struct sockaddr_ll*)skb->cb; 527 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
528 sizeof(skb->cb));
529
530 sll = &PACKET_SKB_CB(skb)->sa.ll;
516 sll->sll_family = AF_PACKET; 531 sll->sll_family = AF_PACKET;
517 sll->sll_hatype = dev->type; 532 sll->sll_hatype = dev->type;
518 sll->sll_protocol = skb->protocol; 533 sll->sll_protocol = skb->protocol;
@@ -523,6 +538,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
523 if (dev->hard_header_parse) 538 if (dev->hard_header_parse)
524 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr); 539 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
525 540
541 PACKET_SKB_CB(skb)->origlen = skb->len;
542
526 if (pskb_trim(skb, snaplen)) 543 if (pskb_trim(skb, snaplen))
527 goto drop_n_acct; 544 goto drop_n_acct;
528 545
@@ -582,11 +599,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
582 else if (skb->pkt_type == PACKET_OUTGOING) { 599 else if (skb->pkt_type == PACKET_OUTGOING) {
583 /* Special case: outgoing packets have ll header at head */ 600 /* Special case: outgoing packets have ll header at head */
584 skb_pull(skb, skb->nh.raw - skb->data); 601 skb_pull(skb, skb->nh.raw - skb->data);
585 if (skb->ip_summed == CHECKSUM_PARTIAL)
586 status |= TP_STATUS_CSUMNOTREADY;
587 } 602 }
588 } 603 }
589 604
605 if (skb->ip_summed == CHECKSUM_PARTIAL)
606 status |= TP_STATUS_CSUMNOTREADY;
607
590 snaplen = skb->len; 608 snaplen = skb->len;
591 609
592 res = run_filter(skb, sk, snaplen); 610 res = run_filter(skb, sk, snaplen);
@@ -1092,7 +1110,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1092 * it in now. 1110 * it in now.
1093 */ 1111 */
1094 1112
1095 sll = (struct sockaddr_ll*)skb->cb; 1113 sll = &PACKET_SKB_CB(skb)->sa.ll;
1096 if (sock->type == SOCK_PACKET) 1114 if (sock->type == SOCK_PACKET)
1097 msg->msg_namelen = sizeof(struct sockaddr_pkt); 1115 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1098 else 1116 else
@@ -1117,7 +1135,22 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1117 sock_recv_timestamp(msg, sk, skb); 1135 sock_recv_timestamp(msg, sk, skb);
1118 1136
1119 if (msg->msg_name) 1137 if (msg->msg_name)
1120 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1138 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1139 msg->msg_namelen);
1140
1141 if (pkt_sk(sk)->auxdata) {
1142 struct tpacket_auxdata aux;
1143
1144 aux.tp_status = TP_STATUS_USER;
1145 if (skb->ip_summed == CHECKSUM_PARTIAL)
1146 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1147 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1148 aux.tp_snaplen = skb->len;
1149 aux.tp_mac = 0;
1150 aux.tp_net = skb->nh.raw - skb->data;
1151
1152 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1153 }
1121 1154
1122 /* 1155 /*
1123 * Free or return the buffer as appropriate. Again this 1156 * Free or return the buffer as appropriate. Again this
@@ -1317,6 +1350,7 @@ static int
1317packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1350packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1318{ 1351{
1319 struct sock *sk = sock->sk; 1352 struct sock *sk = sock->sk;
1353 struct packet_sock *po = pkt_sk(sk);
1320 int ret; 1354 int ret;
1321 1355
1322 if (level != SOL_PACKET) 1356 if (level != SOL_PACKET)
@@ -1369,6 +1403,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1369 return 0; 1403 return 0;
1370 } 1404 }
1371#endif 1405#endif
1406 case PACKET_AUXDATA:
1407 {
1408 int val;
1409
1410 if (optlen < sizeof(val))
1411 return -EINVAL;
1412 if (copy_from_user(&val, optval, sizeof(val)))
1413 return -EFAULT;
1414
1415 po->auxdata = !!val;
1416 return 0;
1417 }
1372 default: 1418 default:
1373 return -ENOPROTOOPT; 1419 return -ENOPROTOOPT;
1374 } 1420 }
@@ -1378,8 +1424,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1378 char __user *optval, int __user *optlen) 1424 char __user *optval, int __user *optlen)
1379{ 1425{
1380 int len; 1426 int len;
1427 int val;
1381 struct sock *sk = sock->sk; 1428 struct sock *sk = sock->sk;
1382 struct packet_sock *po = pkt_sk(sk); 1429 struct packet_sock *po = pkt_sk(sk);
1430 void *data;
1431 struct tpacket_stats st;
1383 1432
1384 if (level != SOL_PACKET) 1433 if (level != SOL_PACKET)
1385 return -ENOPROTOOPT; 1434 return -ENOPROTOOPT;
@@ -1392,9 +1441,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1392 1441
1393 switch(optname) { 1442 switch(optname) {
1394 case PACKET_STATISTICS: 1443 case PACKET_STATISTICS:
1395 {
1396 struct tpacket_stats st;
1397
1398 if (len > sizeof(struct tpacket_stats)) 1444 if (len > sizeof(struct tpacket_stats))
1399 len = sizeof(struct tpacket_stats); 1445 len = sizeof(struct tpacket_stats);
1400 spin_lock_bh(&sk->sk_receive_queue.lock); 1446 spin_lock_bh(&sk->sk_receive_queue.lock);
@@ -1403,16 +1449,23 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1403 spin_unlock_bh(&sk->sk_receive_queue.lock); 1449 spin_unlock_bh(&sk->sk_receive_queue.lock);
1404 st.tp_packets += st.tp_drops; 1450 st.tp_packets += st.tp_drops;
1405 1451
1406 if (copy_to_user(optval, &st, len)) 1452 data = &st;
1407 return -EFAULT; 1453 break;
1454 case PACKET_AUXDATA:
1455 if (len > sizeof(int))
1456 len = sizeof(int);
1457 val = po->auxdata;
1458
1459 data = &val;
1408 break; 1460 break;
1409 }
1410 default: 1461 default:
1411 return -ENOPROTOOPT; 1462 return -ENOPROTOOPT;
1412 } 1463 }
1413 1464
1414 if (put_user(len, optlen)) 1465 if (put_user(len, optlen))
1415 return -EFAULT; 1466 return -EFAULT;
1467 if (copy_to_user(optval, data, len))
1468 return -EFAULT;
1416 return 0; 1469 return 0;
1417} 1470}
1418 1471
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 01e69138578d..4c68c718f5ec 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -52,7 +52,7 @@ static struct tcf_hashinfo ipt_hash_info = {
52 52
53static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook) 53static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
54{ 54{
55 struct ipt_target *target; 55 struct xt_target *target;
56 int ret = 0; 56 int ret = 0;
57 57
58 target = xt_request_find_target(AF_INET, t->u.user.name, 58 target = xt_request_find_target(AF_INET, t->u.user.name,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index bc116bd6937c..3b6e6a780927 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -209,7 +209,7 @@ static void dev_watchdog(unsigned long arg)
209 dev->name); 209 dev->name);
210 dev->tx_timeout(dev); 210 dev->tx_timeout(dev);
211 } 211 }
212 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) 212 if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
213 dev_hold(dev); 213 dev_hold(dev);
214 } 214 }
215 } 215 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2567b4c96c1e..000e043ebd62 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -372,6 +372,20 @@ static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *
372 return 0; 372 return 0;
373} 373}
374 374
375static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
376 struct gnet_dump *d)
377{
378 struct prio_sched_data *q = qdisc_priv(sch);
379 struct Qdisc *cl_q;
380
381 cl_q = q->queues[cl - 1];
382 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
383 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
384 return -1;
385
386 return 0;
387}
388
375static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) 389static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
376{ 390{
377 struct prio_sched_data *q = qdisc_priv(sch); 391 struct prio_sched_data *q = qdisc_priv(sch);
@@ -414,6 +428,7 @@ static struct Qdisc_class_ops prio_class_ops = {
414 .bind_tcf = prio_bind, 428 .bind_tcf = prio_bind,
415 .unbind_tcf = prio_put, 429 .unbind_tcf = prio_put,
416 .dump = prio_dump_class, 430 .dump = prio_dump_class,
431 .dump_stats = prio_dump_class_stats,
417}; 432};
418 433
419static struct Qdisc_ops prio_qdisc_ops = { 434static struct Qdisc_ops prio_qdisc_ops = {
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 459cda258a5c..82844801e421 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -143,6 +143,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
143 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 143 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
144 (iph->protocol == IPPROTO_TCP || 144 (iph->protocol == IPPROTO_TCP ||
145 iph->protocol == IPPROTO_UDP || 145 iph->protocol == IPPROTO_UDP ||
146 iph->protocol == IPPROTO_UDPLITE ||
146 iph->protocol == IPPROTO_SCTP || 147 iph->protocol == IPPROTO_SCTP ||
147 iph->protocol == IPPROTO_DCCP || 148 iph->protocol == IPPROTO_DCCP ||
148 iph->protocol == IPPROTO_ESP)) 149 iph->protocol == IPPROTO_ESP))
@@ -156,6 +157,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
156 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; 157 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr;
157 if (iph->nexthdr == IPPROTO_TCP || 158 if (iph->nexthdr == IPPROTO_TCP ||
158 iph->nexthdr == IPPROTO_UDP || 159 iph->nexthdr == IPPROTO_UDP ||
160 iph->nexthdr == IPPROTO_UDPLITE ||
159 iph->nexthdr == IPPROTO_SCTP || 161 iph->nexthdr == IPPROTO_SCTP ||
160 iph->nexthdr == IPPROTO_DCCP || 162 iph->nexthdr == IPPROTO_DCCP ||
161 iph->nexthdr == IPPROTO_ESP) 163 iph->nexthdr == IPPROTO_ESP)
diff --git a/net/socket.c b/net/socket.c
index 4e396312f8d5..5f374e1ff526 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -407,24 +407,11 @@ int sock_map_fd(struct socket *sock)
407 407
408static struct socket *sock_from_file(struct file *file, int *err) 408static struct socket *sock_from_file(struct file *file, int *err)
409{ 409{
410 struct inode *inode;
411 struct socket *sock;
412
413 if (file->f_op == &socket_file_ops) 410 if (file->f_op == &socket_file_ops)
414 return file->private_data; /* set in sock_map_fd */ 411 return file->private_data; /* set in sock_map_fd */
415 412
416 inode = file->f_path.dentry->d_inode; 413 *err = -ENOTSOCK;
417 if (!S_ISSOCK(inode->i_mode)) { 414 return NULL;
418 *err = -ENOTSOCK;
419 return NULL;
420 }
421
422 sock = SOCKET_I(inode);
423 if (sock->file != file) {
424 printk(KERN_ERR "socki_lookup: socket file changed!\n");
425 sock->file = file;
426 }
427 return sock;
428} 415}
429 416
430/** 417/**
@@ -1527,8 +1514,9 @@ asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
1527 struct file *sock_file; 1514 struct file *sock_file;
1528 1515
1529 sock_file = fget_light(fd, &fput_needed); 1516 sock_file = fget_light(fd, &fput_needed);
1517 err = -EBADF;
1530 if (!sock_file) 1518 if (!sock_file)
1531 return -EBADF; 1519 goto out;
1532 1520
1533 sock = sock_from_file(sock_file, &err); 1521 sock = sock_from_file(sock_file, &err);
1534 if (!sock) 1522 if (!sock)
@@ -1555,6 +1543,7 @@ asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
1555 1543
1556out_put: 1544out_put:
1557 fput_light(sock_file, fput_needed); 1545 fput_light(sock_file, fput_needed);
1546out:
1558 return err; 1547 return err;
1559} 1548}
1560 1549
@@ -1586,12 +1575,13 @@ asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
1586 int fput_needed; 1575 int fput_needed;
1587 1576
1588 sock_file = fget_light(fd, &fput_needed); 1577 sock_file = fget_light(fd, &fput_needed);
1578 err = -EBADF;
1589 if (!sock_file) 1579 if (!sock_file)
1590 return -EBADF; 1580 goto out;
1591 1581
1592 sock = sock_from_file(sock_file, &err); 1582 sock = sock_from_file(sock_file, &err);
1593 if (!sock) 1583 if (!sock)
1594 goto out; 1584 goto out_put;
1595 1585
1596 msg.msg_control = NULL; 1586 msg.msg_control = NULL;
1597 msg.msg_controllen = 0; 1587 msg.msg_controllen = 0;
@@ -1610,8 +1600,9 @@ asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
1610 if (err2 < 0) 1600 if (err2 < 0)
1611 err = err2; 1601 err = err2;
1612 } 1602 }
1613out: 1603out_put:
1614 fput_light(sock_file, fput_needed); 1604 fput_light(sock_file, fput_needed);
1605out:
1615 return err; 1606 return err;
1616} 1607}
1617 1608
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 769cdd62c1bb..4d90a179aeda 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -86,8 +86,8 @@ static int wanrouter_device_del_if(struct wan_device *wandev,
86 86
87static struct wan_device *wanrouter_find_device(char *name); 87static struct wan_device *wanrouter_find_device(char *name);
88static int wanrouter_delete_interface(struct wan_device *wandev, char *name); 88static int wanrouter_delete_interface(struct wan_device *wandev, char *name);
89void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags); 89static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
90void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags); 90static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
91 91
92 92
93 93
@@ -104,8 +104,8 @@ struct wan_device* wanrouter_router_devlist; /* list of registered devices */
104 * Organize Unique Identifiers for encapsulation/decapsulation 104 * Organize Unique Identifiers for encapsulation/decapsulation
105 */ 105 */
106 106
107static unsigned char wanrouter_oui_ether[] = { 0x00, 0x00, 0x00 };
108#if 0 107#if 0
108static unsigned char wanrouter_oui_ether[] = { 0x00, 0x00, 0x00 };
109static unsigned char wanrouter_oui_802_2[] = { 0x00, 0x80, 0xC2 }; 109static unsigned char wanrouter_oui_802_2[] = { 0x00, 0x80, 0xC2 };
110#endif 110#endif
111 111
@@ -246,6 +246,8 @@ int unregister_wan_device(char *name)
246 return 0; 246 return 0;
247} 247}
248 248
249#if 0
250
249/* 251/*
250 * Encapsulate packet. 252 * Encapsulate packet.
251 * 253 *
@@ -341,6 +343,7 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
341 return ethertype; 343 return ethertype;
342} 344}
343 345
346#endif /* 0 */
344 347
345/* 348/*
346 * WAN device IOCTL. 349 * WAN device IOCTL.
@@ -799,23 +802,19 @@ static int wanrouter_delete_interface(struct wan_device *wandev, char *name)
799 return 0; 802 return 0;
800} 803}
801 804
802void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) 805static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
803{ 806{
804 spin_lock_irqsave(lock, *smp_flags); 807 spin_lock_irqsave(lock, *smp_flags);
805} 808}
806 809
807 810
808void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) 811static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
809{ 812{
810 spin_unlock_irqrestore(lock, *smp_flags); 813 spin_unlock_irqrestore(lock, *smp_flags);
811} 814}
812 815
813EXPORT_SYMBOL(register_wan_device); 816EXPORT_SYMBOL(register_wan_device);
814EXPORT_SYMBOL(unregister_wan_device); 817EXPORT_SYMBOL(unregister_wan_device);
815EXPORT_SYMBOL(wanrouter_encapsulate);
816EXPORT_SYMBOL(wanrouter_type_trans);
817EXPORT_SYMBOL(lock_adapter_irq);
818EXPORT_SYMBOL(unlock_adapter_irq);
819 818
820MODULE_LICENSE("GPL"); 819MODULE_LICENSE("GPL");
821 820
diff --git a/net/x25/Makefile b/net/x25/Makefile
index 587a71aa411d..a2c34ab6f194 100644
--- a/net/x25/Makefile
+++ b/net/x25/Makefile
@@ -6,5 +6,5 @@ obj-$(CONFIG_X25) += x25.o
6 6
7x25-y := af_x25.o x25_dev.o x25_facilities.o x25_in.o \ 7x25-y := af_x25.o x25_dev.o x25_facilities.o x25_in.o \
8 x25_link.o x25_out.o x25_route.o x25_subr.o \ 8 x25_link.o x25_out.o x25_route.o x25_subr.o \
9 x25_timer.o x25_proc.o 9 x25_timer.o x25_proc.o x25_forward.o
10x25-$(CONFIG_SYSCTL) += sysctl_net_x25.o 10x25-$(CONFIG_SYSCTL) += sysctl_net_x25.o
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index b5c80b189902..b37d894358ec 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -63,6 +63,7 @@ int sysctl_x25_call_request_timeout = X25_DEFAULT_T21;
63int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22; 63int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22;
64int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23; 64int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23;
65int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2; 65int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2;
66int sysctl_x25_forward = 0;
66 67
67HLIST_HEAD(x25_list); 68HLIST_HEAD(x25_list);
68DEFINE_RWLOCK(x25_list_lock); 69DEFINE_RWLOCK(x25_list_lock);
@@ -846,7 +847,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
846 struct x25_address source_addr, dest_addr; 847 struct x25_address source_addr, dest_addr;
847 struct x25_facilities facilities; 848 struct x25_facilities facilities;
848 struct x25_dte_facilities dte_facilities; 849 struct x25_dte_facilities dte_facilities;
849 int len, rc; 850 int len, addr_len, rc;
850 851
851 /* 852 /*
852 * Remove the LCI and frame type. 853 * Remove the LCI and frame type.
@@ -857,7 +858,8 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
857 * Extract the X.25 addresses and convert them to ASCII strings, 858 * Extract the X.25 addresses and convert them to ASCII strings,
858 * and remove them. 859 * and remove them.
859 */ 860 */
860 skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); 861 addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr);
862 skb_pull(skb, addr_len);
861 863
862 /* 864 /*
863 * Get the length of the facilities, skip past them for the moment 865 * Get the length of the facilities, skip past them for the moment
@@ -873,11 +875,28 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
873 sk = x25_find_listener(&source_addr,skb); 875 sk = x25_find_listener(&source_addr,skb);
874 skb_push(skb,len); 876 skb_push(skb,len);
875 877
878 if (sk != NULL && sk_acceptq_is_full(sk)) {
879 goto out_sock_put;
880 }
881
876 /* 882 /*
877 * We can't accept the Call Request. 883 * We dont have any listeners for this incoming call.
884 * Try forwarding it.
878 */ 885 */
879 if (sk == NULL || sk_acceptq_is_full(sk)) 886 if (sk == NULL) {
880 goto out_clear_request; 887 skb_push(skb, addr_len + X25_STD_MIN_LEN);
888 if (sysctl_x25_forward &&
889 x25_forward_call(&dest_addr, nb, skb, lci) > 0)
890 {
891 /* Call was forwarded, dont process it any more */
892 kfree_skb(skb);
893 rc = 1;
894 goto out;
895 } else {
896 /* No listeners, can't forward, clear the call */
897 goto out_clear_request;
898 }
899 }
881 900
882 /* 901 /*
883 * Try to reach a compromise on the requested facilities. 902 * Try to reach a compromise on the requested facilities.
@@ -1598,6 +1617,9 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
1598 x25_disconnect(s, ENETUNREACH, 0, 0); 1617 x25_disconnect(s, ENETUNREACH, 0, 0);
1599 1618
1600 write_unlock_bh(&x25_list_lock); 1619 write_unlock_bh(&x25_list_lock);
1620
1621 /* Remove any related forwards */
1622 x25_clear_forward_by_dev(nb->dev);
1601} 1623}
1602 1624
1603static int __init x25_init(void) 1625static int __init x25_init(void)
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index aabda59c824e..2b2e7fd689f3 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,6 +73,14 @@ static struct ctl_table x25_table[] = {
73 .extra1 = &min_timer, 73 .extra1 = &min_timer,
74 .extra2 = &max_timer, 74 .extra2 = &max_timer,
75 }, 75 },
76 {
77 .ctl_name = NET_X25_FORWARD,
78 .procname = "x25_forward",
79 .data = &sysctl_x25_forward,
80 .maxlen = sizeof(int),
81 .mode = 0644,
82 .proc_handler = &proc_dointvec,
83 },
76 { 0, }, 84 { 0, },
77}; 85};
78 86
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 328d80f000ad..f099fd6a7c0e 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -67,9 +67,18 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
67 return x25_rx_call_request(skb, nb, lci); 67 return x25_rx_call_request(skb, nb, lci);
68 68
69 /* 69 /*
70 * Its not a Call Request, nor is it a control frame. 70 * Its not a Call Request, nor is it a control frame.
71 * Let caller throw it away. 71 * Can we forward it?
72 */ 72 */
73
74 if (x25_forward_data(lci, nb, skb)) {
75 if (frametype == X25_CLEAR_CONFIRMATION) {
76 x25_clear_forward_by_lci(lci);
77 }
78 kfree_skb(skb);
79 return 1;
80 }
81
73/* 82/*
74 x25_transmit_clear_request(nb, lci, 0x0D); 83 x25_transmit_clear_request(nb, lci, 0x0D);
75*/ 84*/
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
new file mode 100644
index 000000000000..d339e0c810a8
--- /dev/null
+++ b/net/x25/x25_forward.c
@@ -0,0 +1,163 @@
1/*
2 * This module:
3 * This module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version
6 * 2 of the License, or (at your option) any later version.
7 *
8 * History
9 * 03-01-2007 Added forwarding for x.25 Andrew Hendry
10 */
11#include <linux/if_arp.h>
12#include <linux/init.h>
13#include <net/x25.h>
14
15struct list_head x25_forward_list = LIST_HEAD_INIT(x25_forward_list);
16DEFINE_RWLOCK(x25_forward_list_lock);
17
18int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
19 struct sk_buff *skb, int lci)
20{
21 struct x25_route *rt;
22 struct x25_neigh *neigh_new = NULL;
23 struct list_head *entry;
24 struct x25_forward *x25_frwd, *new_frwd;
25 struct sk_buff *skbn;
26 short same_lci = 0;
27 int rc = 0;
28
29 if ((rt = x25_get_route(dest_addr)) != NULL) {
30
31 if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
32 /* This shouldnt happen, if it occurs somehow
33 * do something sensible
34 */
35 goto out_put_route;
36 }
37
38 /* Avoid a loop. This is the normal exit path for a
39 * system with only one x.25 iface and default route
40 */
41 if (rt->dev == from->dev) {
42 goto out_put_nb;
43 }
44
45 /* Remote end sending a call request on an already
46 * established LCI? It shouldnt happen, just in case..
47 */
48 read_lock_bh(&x25_forward_list_lock);
49 list_for_each(entry, &x25_forward_list) {
50 x25_frwd = list_entry(entry, struct x25_forward, node);
51 if (x25_frwd->lci == lci) {
52 printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
53 same_lci = 1;
54 }
55 }
56 read_unlock_bh(&x25_forward_list_lock);
57
58 /* Save the forwarding details for future traffic */
59 if (!same_lci){
60 if ((new_frwd = kmalloc(sizeof(struct x25_forward),
61 GFP_ATOMIC)) == NULL){
62 rc = -ENOMEM;
63 goto out_put_nb;
64 }
65 new_frwd->lci = lci;
66 new_frwd->dev1 = rt->dev;
67 new_frwd->dev2 = from->dev;
68 write_lock_bh(&x25_forward_list_lock);
69 list_add(&new_frwd->node, &x25_forward_list);
70 write_unlock_bh(&x25_forward_list_lock);
71 }
72
73 /* Forward the call request */
74 if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
75 goto out_put_nb;
76 }
77 x25_transmit_link(skbn, neigh_new);
78 rc = 1;
79 }
80
81
82out_put_nb:
83 x25_neigh_put(neigh_new);
84
85out_put_route:
86 x25_route_put(rt);
87 return rc;
88}
89
90
91int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
92
93 struct x25_forward *frwd;
94 struct list_head *entry;
95 struct net_device *peer = NULL;
96 struct x25_neigh *nb;
97 struct sk_buff *skbn;
98 int rc = 0;
99
100 read_lock_bh(&x25_forward_list_lock);
101 list_for_each(entry, &x25_forward_list) {
102 frwd = list_entry(entry, struct x25_forward, node);
103 if (frwd->lci == lci) {
104 /* The call is established, either side can send */
105 if (from->dev == frwd->dev1) {
106 peer = frwd->dev2;
107 } else {
108 peer = frwd->dev1;
109 }
110 break;
111 }
112 }
113 read_unlock_bh(&x25_forward_list_lock);
114
115 if ( (nb = x25_get_neigh(peer)) == NULL)
116 goto out;
117
118 if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
119 goto out;
120
121 }
122 x25_transmit_link(skbn, nb);
123
124 x25_neigh_put(nb);
125 rc = 1;
126out:
127 return rc;
128}
129
130void x25_clear_forward_by_lci(unsigned int lci)
131{
132 struct x25_forward *fwd;
133 struct list_head *entry, *tmp;
134
135 write_lock_bh(&x25_forward_list_lock);
136
137 list_for_each_safe(entry, tmp, &x25_forward_list) {
138 fwd = list_entry(entry, struct x25_forward, node);
139 if (fwd->lci == lci) {
140 list_del(&fwd->node);
141 kfree(fwd);
142 }
143 }
144 write_unlock_bh(&x25_forward_list_lock);
145}
146
147
148void x25_clear_forward_by_dev(struct net_device *dev)
149{
150 struct x25_forward *fwd;
151 struct list_head *entry, *tmp;
152
153 write_lock_bh(&x25_forward_list_lock);
154
155 list_for_each_safe(entry, tmp, &x25_forward_list) {
156 fwd = list_entry(entry, struct x25_forward, node);
157 if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
158 list_del(&fwd->node);
159 kfree(fwd);
160 }
161 }
162 write_unlock_bh(&x25_forward_list_lock);
163}
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index a11837d361d2..e0470bd8c2f9 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -165,6 +165,75 @@ out:
165 return 0; 165 return 0;
166} 166}
167 167
168static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
169{
170 struct x25_forward *f;
171 struct list_head *entry;
172
173 list_for_each(entry, &x25_forward_list) {
174 f = list_entry(entry, struct x25_forward, node);
175 if (!pos--)
176 goto found;
177 }
178
179 f = NULL;
180found:
181 return f;
182}
183
184static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
185{
186 loff_t l = *pos;
187
188 read_lock_bh(&x25_forward_list_lock);
189 return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN;
190}
191
192static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
193{
194 struct x25_forward *f;
195
196 ++*pos;
197 if (v == SEQ_START_TOKEN) {
198 f = NULL;
199 if (!list_empty(&x25_forward_list))
200 f = list_entry(x25_forward_list.next,
201 struct x25_forward, node);
202 goto out;
203 }
204 f = v;
205 if (f->node.next != &x25_forward_list)
206 f = list_entry(f->node.next, struct x25_forward, node);
207 else
208 f = NULL;
209out:
210 return f;
211
212}
213
214static void x25_seq_forward_stop(struct seq_file *seq, void *v)
215{
216 read_unlock_bh(&x25_forward_list_lock);
217}
218
219static int x25_seq_forward_show(struct seq_file *seq, void *v)
220{
221 struct x25_forward *f;
222
223 if (v == SEQ_START_TOKEN) {
224 seq_printf(seq, "lci dev1 dev2\n");
225 goto out;
226 }
227
228 f = v;
229
230 seq_printf(seq, "%d %-10s %-10s\n",
231 f->lci, f->dev1->name, f->dev2->name);
232
233out:
234 return 0;
235}
236
168static struct seq_operations x25_seq_route_ops = { 237static struct seq_operations x25_seq_route_ops = {
169 .start = x25_seq_route_start, 238 .start = x25_seq_route_start,
170 .next = x25_seq_route_next, 239 .next = x25_seq_route_next,
@@ -179,6 +248,13 @@ static struct seq_operations x25_seq_socket_ops = {
179 .show = x25_seq_socket_show, 248 .show = x25_seq_socket_show,
180}; 249};
181 250
251static struct seq_operations x25_seq_forward_ops = {
252 .start = x25_seq_forward_start,
253 .next = x25_seq_forward_next,
254 .stop = x25_seq_forward_stop,
255 .show = x25_seq_forward_show,
256};
257
182static int x25_seq_socket_open(struct inode *inode, struct file *file) 258static int x25_seq_socket_open(struct inode *inode, struct file *file)
183{ 259{
184 return seq_open(file, &x25_seq_socket_ops); 260 return seq_open(file, &x25_seq_socket_ops);
@@ -189,6 +265,11 @@ static int x25_seq_route_open(struct inode *inode, struct file *file)
189 return seq_open(file, &x25_seq_route_ops); 265 return seq_open(file, &x25_seq_route_ops);
190} 266}
191 267
268static int x25_seq_forward_open(struct inode *inode, struct file *file)
269{
270 return seq_open(file, &x25_seq_forward_ops);
271}
272
192static struct file_operations x25_seq_socket_fops = { 273static struct file_operations x25_seq_socket_fops = {
193 .owner = THIS_MODULE, 274 .owner = THIS_MODULE,
194 .open = x25_seq_socket_open, 275 .open = x25_seq_socket_open,
@@ -205,6 +286,14 @@ static struct file_operations x25_seq_route_fops = {
205 .release = seq_release, 286 .release = seq_release,
206}; 287};
207 288
289static struct file_operations x25_seq_forward_fops = {
290 .owner = THIS_MODULE,
291 .open = x25_seq_forward_open,
292 .read = seq_read,
293 .llseek = seq_lseek,
294 .release = seq_release,
295};
296
208static struct proc_dir_entry *x25_proc_dir; 297static struct proc_dir_entry *x25_proc_dir;
209 298
210int __init x25_proc_init(void) 299int __init x25_proc_init(void)
@@ -225,9 +314,17 @@ int __init x25_proc_init(void)
225 if (!p) 314 if (!p)
226 goto out_socket; 315 goto out_socket;
227 p->proc_fops = &x25_seq_socket_fops; 316 p->proc_fops = &x25_seq_socket_fops;
317
318 p = create_proc_entry("forward", S_IRUGO, x25_proc_dir);
319 if (!p)
320 goto out_forward;
321 p->proc_fops = &x25_seq_forward_fops;
228 rc = 0; 322 rc = 0;
323
229out: 324out:
230 return rc; 325 return rc;
326out_forward:
327 remove_proc_entry("socket", x25_proc_dir);
231out_socket: 328out_socket:
232 remove_proc_entry("route", x25_proc_dir); 329 remove_proc_entry("route", x25_proc_dir);
233out_route: 330out_route:
@@ -237,6 +334,7 @@ out_route:
237 334
238void __exit x25_proc_exit(void) 335void __exit x25_proc_exit(void)
239{ 336{
337 remove_proc_entry("forward", x25_proc_dir);
240 remove_proc_entry("route", x25_proc_dir); 338 remove_proc_entry("route", x25_proc_dir);
241 remove_proc_entry("socket", x25_proc_dir); 339 remove_proc_entry("socket", x25_proc_dir);
242 remove_proc_entry("x25", proc_net); 340 remove_proc_entry("x25", proc_net);
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 2a3fe986b245..883a848bca5b 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -119,6 +119,9 @@ void x25_route_device_down(struct net_device *dev)
119 __x25_remove_route(rt); 119 __x25_remove_route(rt);
120 } 120 }
121 write_unlock_bh(&x25_route_list_lock); 121 write_unlock_bh(&x25_route_list_lock);
122
123 /* Remove any related forwarding */
124 x25_clear_forward_by_dev(dev);
122} 125}
123 126
124/* 127/*
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 0faab6332586..577a4f821b98 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -24,6 +24,17 @@ config XFRM_SUB_POLICY
24 24
25 If unsure, say N. 25 If unsure, say N.
26 26
27config XFRM_MIGRATE
28 bool "Transformation migrate database (EXPERIMENTAL)"
29 depends on XFRM && EXPERIMENTAL
30 ---help---
31 A feature to update locator(s) of a given IPsec security
32 association dynamically. This feature is required, for
33 instance, in a Mobile IPv6 environment with IPsec configuration
34 where mobile nodes change their attachment point to the Internet.
35
36 If unsure, say N.
37
27config NET_KEY 38config NET_KEY
28 tristate "PF_KEY sockets" 39 tristate "PF_KEY sockets"
29 select XFRM 40 select XFRM
@@ -34,4 +45,19 @@ config NET_KEY
34 45
35 Say Y unless you know what you are doing. 46 Say Y unless you know what you are doing.
36 47
48config NET_KEY_MIGRATE
49 bool "PF_KEY MIGRATE (EXPERIMENTAL)"
50 depends on NET_KEY && EXPERIMENTAL
51 select XFRM_MIGRATE
52 ---help---
53 Add a PF_KEY MIGRATE message to PF_KEYv2 socket family.
54 The PF_KEY MIGRATE message is used to dynamically update
55 locator(s) of a given IPsec security association.
56 This feature is required, for instance, in a Mobile IPv6
57 environment with IPsec configuration where mobile nodes
58 change their attachment point to the Internet. Detail
59 information can be found in the internet-draft
60 <draft-sugimoto-mip6-pfkey-migrate>.
61
62 If unsure, say N.
37 63
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b7e537fe2d75..fa7ce060b454 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2236,3 +2236,234 @@ void __init xfrm_init(void)
2236 xfrm_input_init(); 2236 xfrm_input_init();
2237} 2237}
2238 2238
2239#ifdef CONFIG_XFRM_MIGRATE
2240static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
2241 struct xfrm_selector *sel_tgt)
2242{
2243 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2244 if (sel_tgt->family == sel_cmp->family &&
2245 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2246 sel_cmp->family) == 0 &&
2247 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2248 sel_cmp->family) == 0 &&
2249 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2250 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2251 return 1;
2252 }
2253 } else {
2254 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2255 return 1;
2256 }
2257 }
2258 return 0;
2259}
2260
2261static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
2262 u8 dir, u8 type)
2263{
2264 struct xfrm_policy *pol, *ret = NULL;
2265 struct hlist_node *entry;
2266 struct hlist_head *chain;
2267 u32 priority = ~0U;
2268
2269 read_lock_bh(&xfrm_policy_lock);
2270 chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
2271 hlist_for_each_entry(pol, entry, chain, bydst) {
2272 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2273 pol->type == type) {
2274 ret = pol;
2275 priority = ret->priority;
2276 break;
2277 }
2278 }
2279 chain = &xfrm_policy_inexact[dir];
2280 hlist_for_each_entry(pol, entry, chain, bydst) {
2281 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2282 pol->type == type &&
2283 pol->priority < priority) {
2284 ret = pol;
2285 break;
2286 }
2287 }
2288
2289 if (ret)
2290 xfrm_pol_hold(ret);
2291
2292 read_unlock_bh(&xfrm_policy_lock);
2293
2294 return ret;
2295}
2296
2297static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
2298{
2299 int match = 0;
2300
2301 if (t->mode == m->mode && t->id.proto == m->proto &&
2302 (m->reqid == 0 || t->reqid == m->reqid)) {
2303 switch (t->mode) {
2304 case XFRM_MODE_TUNNEL:
2305 case XFRM_MODE_BEET:
2306 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2307 m->old_family) == 0 &&
2308 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2309 m->old_family) == 0) {
2310 match = 1;
2311 }
2312 break;
2313 case XFRM_MODE_TRANSPORT:
2314 /* in case of transport mode, template does not store
2315 any IP addresses, hence we just compare mode and
2316 protocol */
2317 match = 1;
2318 break;
2319 default:
2320 break;
2321 }
2322 }
2323 return match;
2324}
2325
2326/* update endpoint address(es) of template(s) */
2327static int xfrm_policy_migrate(struct xfrm_policy *pol,
2328 struct xfrm_migrate *m, int num_migrate)
2329{
2330 struct xfrm_migrate *mp;
2331 struct dst_entry *dst;
2332 int i, j, n = 0;
2333
2334 write_lock_bh(&pol->lock);
2335 if (unlikely(pol->dead)) {
2336 /* target policy has been deleted */
2337 write_unlock_bh(&pol->lock);
2338 return -ENOENT;
2339 }
2340
2341 for (i = 0; i < pol->xfrm_nr; i++) {
2342 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2343 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2344 continue;
2345 n++;
2346 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL)
2347 continue;
2348 /* update endpoints */
2349 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2350 sizeof(pol->xfrm_vec[i].id.daddr));
2351 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2352 sizeof(pol->xfrm_vec[i].saddr));
2353 pol->xfrm_vec[i].encap_family = mp->new_family;
2354 /* flush bundles */
2355 while ((dst = pol->bundles) != NULL) {
2356 pol->bundles = dst->next;
2357 dst_free(dst);
2358 }
2359 }
2360 }
2361
2362 write_unlock_bh(&pol->lock);
2363
2364 if (!n)
2365 return -ENODATA;
2366
2367 return 0;
2368}
2369
2370static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
2371{
2372 int i, j;
2373
2374 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2375 return -EINVAL;
2376
2377 for (i = 0; i < num_migrate; i++) {
2378 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2379 m[i].old_family) == 0) &&
2380 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2381 m[i].old_family) == 0))
2382 return -EINVAL;
2383 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2384 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2385 return -EINVAL;
2386
2387 /* check if there is any duplicated entry */
2388 for (j = i + 1; j < num_migrate; j++) {
2389 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2390 sizeof(m[i].old_daddr)) &&
2391 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2392 sizeof(m[i].old_saddr)) &&
2393 m[i].proto == m[j].proto &&
2394 m[i].mode == m[j].mode &&
2395 m[i].reqid == m[j].reqid &&
2396 m[i].old_family == m[j].old_family)
2397 return -EINVAL;
2398 }
2399 }
2400
2401 return 0;
2402}
2403
2404int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2405 struct xfrm_migrate *m, int num_migrate)
2406{
2407 int i, err, nx_cur = 0, nx_new = 0;
2408 struct xfrm_policy *pol = NULL;
2409 struct xfrm_state *x, *xc;
2410 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2411 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2412 struct xfrm_migrate *mp;
2413
2414 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2415 goto out;
2416
2417 /* Stage 1 - find policy */
2418 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2419 err = -ENOENT;
2420 goto out;
2421 }
2422
2423 /* Stage 2 - find and update state(s) */
2424 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2425 if ((x = xfrm_migrate_state_find(mp))) {
2426 x_cur[nx_cur] = x;
2427 nx_cur++;
2428 if ((xc = xfrm_state_migrate(x, mp))) {
2429 x_new[nx_new] = xc;
2430 nx_new++;
2431 } else {
2432 err = -ENODATA;
2433 goto restore_state;
2434 }
2435 }
2436 }
2437
2438 /* Stage 3 - update policy */
2439 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2440 goto restore_state;
2441
2442 /* Stage 4 - delete old state(s) */
2443 if (nx_cur) {
2444 xfrm_states_put(x_cur, nx_cur);
2445 xfrm_states_delete(x_cur, nx_cur);
2446 }
2447
2448 /* Stage 5 - announce */
2449 km_migrate(sel, dir, type, m, num_migrate);
2450
2451 xfrm_pol_put(pol);
2452
2453 return 0;
2454out:
2455 return err;
2456
2457restore_state:
2458 if (pol)
2459 xfrm_pol_put(pol);
2460 if (nx_cur)
2461 xfrm_states_put(x_cur, nx_cur);
2462 if (nx_new)
2463 xfrm_states_delete(x_new, nx_new);
2464
2465 return err;
2466}
2467EXPORT_SYMBOL(xfrm_migrate);
2468#endif
2469
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index fdb08d9f34aa..91b02687db52 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -183,9 +183,6 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
183 183
184int __xfrm_state_delete(struct xfrm_state *x); 184int __xfrm_state_delete(struct xfrm_state *x);
185 185
186static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
187static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
188
189int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 186int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
190void km_state_expired(struct xfrm_state *x, int hard, u32 pid); 187void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
191 188
@@ -831,6 +828,160 @@ out:
831} 828}
832EXPORT_SYMBOL(xfrm_state_add); 829EXPORT_SYMBOL(xfrm_state_add);
833 830
831#ifdef CONFIG_XFRM_MIGRATE
832struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
833{
834 int err = -ENOMEM;
835 struct xfrm_state *x = xfrm_state_alloc();
836 if (!x)
837 goto error;
838
839 memcpy(&x->id, &orig->id, sizeof(x->id));
840 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
841 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
842 x->props.mode = orig->props.mode;
843 x->props.replay_window = orig->props.replay_window;
844 x->props.reqid = orig->props.reqid;
845 x->props.family = orig->props.family;
846 x->props.saddr = orig->props.saddr;
847
848 if (orig->aalg) {
849 x->aalg = xfrm_algo_clone(orig->aalg);
850 if (!x->aalg)
851 goto error;
852 }
853 x->props.aalgo = orig->props.aalgo;
854
855 if (orig->ealg) {
856 x->ealg = xfrm_algo_clone(orig->ealg);
857 if (!x->ealg)
858 goto error;
859 }
860 x->props.ealgo = orig->props.ealgo;
861
862 if (orig->calg) {
863 x->calg = xfrm_algo_clone(orig->calg);
864 if (!x->calg)
865 goto error;
866 }
867 x->props.calgo = orig->props.calgo;
868
869 if (orig->encap) {
870 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
871 if (!x->encap)
872 goto error;
873 }
874
875 if (orig->coaddr) {
876 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
877 GFP_KERNEL);
878 if (!x->coaddr)
879 goto error;
880 }
881
882 err = xfrm_init_state(x);
883 if (err)
884 goto error;
885
886 x->props.flags = orig->props.flags;
887
888 x->curlft.add_time = orig->curlft.add_time;
889 x->km.state = orig->km.state;
890 x->km.seq = orig->km.seq;
891
892 return x;
893
894 error:
895 if (errp)
896 *errp = err;
897 if (x) {
898 kfree(x->aalg);
899 kfree(x->ealg);
900 kfree(x->calg);
901 kfree(x->encap);
902 kfree(x->coaddr);
903 }
904 kfree(x);
905 return NULL;
906}
907EXPORT_SYMBOL(xfrm_state_clone);
908
909/* xfrm_state_lock is held */
910struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
911{
912 unsigned int h;
913 struct xfrm_state *x;
914 struct hlist_node *entry;
915
916 if (m->reqid) {
917 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
918 m->reqid, m->old_family);
919 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
920 if (x->props.mode != m->mode ||
921 x->id.proto != m->proto)
922 continue;
923 if (m->reqid && x->props.reqid != m->reqid)
924 continue;
925 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
926 m->old_family) ||
927 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
928 m->old_family))
929 continue;
930 xfrm_state_hold(x);
931 return x;
932 }
933 } else {
934 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
935 m->old_family);
936 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
937 if (x->props.mode != m->mode ||
938 x->id.proto != m->proto)
939 continue;
940 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
941 m->old_family) ||
942 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
943 m->old_family))
944 continue;
945 xfrm_state_hold(x);
946 return x;
947 }
948 }
949
950 return NULL;
951}
952EXPORT_SYMBOL(xfrm_migrate_state_find);
953
954struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
955 struct xfrm_migrate *m)
956{
957 struct xfrm_state *xc;
958 int err;
959
960 xc = xfrm_state_clone(x, &err);
961 if (!xc)
962 return NULL;
963
964 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
965 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
966
967 /* add state */
968 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
969 /* a care is needed when the destination address of the
970 state is to be updated as it is a part of triplet */
971 xfrm_state_insert(xc);
972 } else {
973 if ((err = xfrm_state_add(xc)) < 0)
974 goto error;
975 }
976
977 return xc;
978error:
979 kfree(xc);
980 return NULL;
981}
982EXPORT_SYMBOL(xfrm_state_migrate);
983#endif
984
834int xfrm_state_update(struct xfrm_state *x) 985int xfrm_state_update(struct xfrm_state *x)
835{ 986{
836 struct xfrm_state *x1; 987 struct xfrm_state *x1;
@@ -1345,6 +1496,26 @@ void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1345} 1496}
1346EXPORT_SYMBOL(km_policy_expired); 1497EXPORT_SYMBOL(km_policy_expired);
1347 1498
1499int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1500 struct xfrm_migrate *m, int num_migrate)
1501{
1502 int err = -EINVAL;
1503 int ret;
1504 struct xfrm_mgr *km;
1505
1506 read_lock(&xfrm_km_lock);
1507 list_for_each_entry(km, &xfrm_km_list, list) {
1508 if (km->migrate) {
1509 ret = km->migrate(sel, dir, type, m, num_migrate);
1510 if (!ret)
1511 err = ret;
1512 }
1513 }
1514 read_unlock(&xfrm_km_lock);
1515 return err;
1516}
1517EXPORT_SYMBOL(km_migrate);
1518
1348int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) 1519int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1349{ 1520{
1350 int err = -EINVAL; 1521 int err = -EINVAL;
@@ -1458,7 +1629,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1458} 1629}
1459EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1630EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1460 1631
1461static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family) 1632struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1462{ 1633{
1463 struct xfrm_state_afinfo *afinfo; 1634 struct xfrm_state_afinfo *afinfo;
1464 if (unlikely(family >= NPROTO)) 1635 if (unlikely(family >= NPROTO))
@@ -1470,11 +1641,14 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1470 return afinfo; 1641 return afinfo;
1471} 1642}
1472 1643
1473static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1644void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1474{ 1645{
1475 read_unlock(&xfrm_state_afinfo_lock); 1646 read_unlock(&xfrm_state_afinfo_lock);
1476} 1647}
1477 1648
1649EXPORT_SYMBOL(xfrm_state_get_afinfo);
1650EXPORT_SYMBOL(xfrm_state_put_afinfo);
1651
1478/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ 1652/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1479void xfrm_state_delete_tunnel(struct xfrm_state *x) 1653void xfrm_state_delete_tunnel(struct xfrm_state *x)
1480{ 1654{
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 82f36d396fca..079a5d315759 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1632,6 +1632,176 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1632 return 0; 1632 return 0;
1633} 1633}
1634 1634
1635#ifdef CONFIG_XFRM_MIGRATE
1636static int verify_user_migrate(struct rtattr **xfrma)
1637{
1638 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1639 struct xfrm_user_migrate *um;
1640
1641 if (!rt)
1642 return -EINVAL;
1643
1644 if ((rt->rta_len - sizeof(*rt)) < sizeof(*um))
1645 return -EINVAL;
1646
1647 return 0;
1648}
1649
1650static int copy_from_user_migrate(struct xfrm_migrate *ma,
1651 struct rtattr **xfrma, int *num)
1652{
1653 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
1654 struct xfrm_user_migrate *um;
1655 int i, num_migrate;
1656
1657 um = RTA_DATA(rt);
1658 num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um);
1659
1660 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1661 return -EINVAL;
1662
1663 for (i = 0; i < num_migrate; i++, um++, ma++) {
1664 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1665 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1666 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1667 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1668
1669 ma->proto = um->proto;
1670 ma->mode = um->mode;
1671 ma->reqid = um->reqid;
1672
1673 ma->old_family = um->old_family;
1674 ma->new_family = um->new_family;
1675 }
1676
1677 *num = i;
1678 return 0;
1679}
1680
1681static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1682 struct rtattr **xfrma)
1683{
1684 struct xfrm_userpolicy_id *pi = NLMSG_DATA(nlh);
1685 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1686 u8 type;
1687 int err;
1688 int n = 0;
1689
1690 err = verify_user_migrate((struct rtattr **)xfrma);
1691 if (err)
1692 return err;
1693
1694 err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
1695 if (err)
1696 return err;
1697
1698 err = copy_from_user_migrate((struct xfrm_migrate *)m,
1699 (struct rtattr **)xfrma, &n);
1700 if (err)
1701 return err;
1702
1703 if (!n)
1704 return 0;
1705
1706 xfrm_migrate(&pi->sel, pi->dir, type, m, n);
1707
1708 return 0;
1709}
1710#else
1711static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1712 struct rtattr **xfrma)
1713{
1714 return -ENOPROTOOPT;
1715}
1716#endif
1717
1718#ifdef CONFIG_XFRM_MIGRATE
1719static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1720{
1721 struct xfrm_user_migrate um;
1722
1723 memset(&um, 0, sizeof(um));
1724 um.proto = m->proto;
1725 um.mode = m->mode;
1726 um.reqid = m->reqid;
1727 um.old_family = m->old_family;
1728 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1729 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1730 um.new_family = m->new_family;
1731 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1732 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1733
1734 RTA_PUT(skb, XFRMA_MIGRATE, sizeof(um), &um);
1735 return 0;
1736
1737rtattr_failure:
1738 return -1;
1739}
1740
1741static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1742 int num_migrate, struct xfrm_selector *sel,
1743 u8 dir, u8 type)
1744{
1745 struct xfrm_migrate *mp;
1746 struct xfrm_userpolicy_id *pol_id;
1747 struct nlmsghdr *nlh;
1748 unsigned char *b = skb->tail;
1749 int i;
1750
1751 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id));
1752 pol_id = NLMSG_DATA(nlh);
1753 nlh->nlmsg_flags = 0;
1754
1755 /* copy data from selector, dir, and type to the pol_id */
1756 memset(pol_id, 0, sizeof(*pol_id));
1757 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1758 pol_id->dir = dir;
1759
1760 if (copy_to_user_policy_type(type, skb) < 0)
1761 goto nlmsg_failure;
1762
1763 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1764 if (copy_to_user_migrate(mp, skb) < 0)
1765 goto nlmsg_failure;
1766 }
1767
1768 nlh->nlmsg_len = skb->tail - b;
1769 return skb->len;
1770nlmsg_failure:
1771 skb_trim(skb, b - skb->data);
1772 return -1;
1773}
1774
1775static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1776 struct xfrm_migrate *m, int num_migrate)
1777{
1778 struct sk_buff *skb;
1779 size_t len;
1780
1781 len = RTA_SPACE(sizeof(struct xfrm_user_migrate) * num_migrate);
1782 len += NLMSG_SPACE(sizeof(struct xfrm_userpolicy_id));
1783#ifdef CONFIG_XFRM_SUB_POLICY
1784 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
1785#endif
1786 skb = alloc_skb(len, GFP_ATOMIC);
1787 if (skb == NULL)
1788 return -ENOMEM;
1789
1790 /* build migrate */
1791 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
1792 BUG();
1793
1794 NETLINK_CB(skb).dst_group = XFRMNLGRP_MIGRATE;
1795 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE,
1796 GFP_ATOMIC);
1797}
1798#else
1799static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1800 struct xfrm_migrate *m, int num_migrate)
1801{
1802 return -ENOPROTOOPT;
1803}
1804#endif
1635 1805
1636#define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type)) 1806#define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type))
1637 1807
@@ -1653,6 +1823,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1653 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 1823 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1654 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 1824 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1655 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 1825 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1826 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1656}; 1827};
1657 1828
1658#undef XMSGSIZE 1829#undef XMSGSIZE
@@ -1679,6 +1850,7 @@ static struct xfrm_link {
1679 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 1850 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1680 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 1851 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1681 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 1852 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1853 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1682}; 1854};
1683 1855
1684static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) 1856static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
@@ -2285,6 +2457,7 @@ static struct xfrm_mgr netlink_mgr = {
2285 .compile_policy = xfrm_compile_policy, 2457 .compile_policy = xfrm_compile_policy,
2286 .notify_policy = xfrm_send_policy_notify, 2458 .notify_policy = xfrm_send_policy_notify,
2287 .report = xfrm_send_report, 2459 .report = xfrm_send_report,
2460 .migrate = xfrm_send_migrate,
2288}; 2461};
2289 2462
2290static int __init xfrm_user_init(void) 2463static int __init xfrm_user_init(void)