aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-02-01 05:06:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-02-01 05:06:29 -0500
commitcec03afcb62fbbb0eaf943f6349ade61b89d7d40 (patch)
treecc80c13e373337d1c1dee9dd7269173da1f7c079 /net
parent2da53b0134ad41b91556d2d2a322cc03487a1ab7 (diff)
parent4814bdbd590e835ecec2d5e505165ec1c19796b2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (173 commits) [NETNS]: Lookup in FIB semantic hashes taking into account the namespace. [NETNS]: Add a namespace mark to fib_info. [IPV4]: fib_sync_down rework. [NETNS]: Process interface address manipulation routines in the namespace. [IPV4]: Small style cleanup of the error path in rtm_to_ifaddr. [IPV4]: Fix memory leak on error path during FIB initialization. [NETFILTER]: Ipv6-related xt_hashlimit compilation fix. [NET_SCHED]: Add flow classifier [NET_SCHED]: sch_sfq: make internal queues visible as classes [NET_SCHED]: sch_sfq: add support for external classifiers [NET_SCHED]: Constify struct tcf_ext_map [BLUETOOTH]: Fix bugs in previous conn add/del workqueue changes. [TCP]: Unexport sysctl_tcp_tso_win_divisor [IPV4]: Make struct ipv4_devconf static. [TR] net/802/tr.c: sysctl_tr_rif_timeout static [XFRM]: Fix statistics. [XFRM]: Remove unused exports. [PKT_SCHED] sch_teql.c: Duplicate IFF_BROADCAST in FMASK, remove 2nd. [BNX2]: Fix ASYM PAUSE advertisement for remote PHY. [IPV4] route cache: Introduce rt_genid for smooth cache invalidation ...
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c2
-rw-r--r--net/8021q/vlan_dev.c7
-rw-r--r--net/9p/conv.c5
-rw-r--r--net/ax25/af_ax25.c13
-rw-r--r--net/bluetooth/hci_sysfs.c52
-rw-r--r--net/bridge/br_netfilter.c4
-rw-r--r--net/bridge/netfilter/ebt_802_3.c10
-rw-r--r--net/bridge/netfilter/ebt_among.c27
-rw-r--r--net/bridge/netfilter/ebt_arp.c17
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c17
-rw-r--r--net/bridge/netfilter/ebt_dnat.c8
-rw-r--r--net/bridge/netfilter/ebt_ip.c14
-rw-r--r--net/bridge/netfilter/ebt_limit.c6
-rw-r--r--net/bridge/netfilter/ebt_log.c19
-rw-r--r--net/bridge/netfilter/ebt_mark.c8
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c8
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c8
-rw-r--r--net/bridge/netfilter/ebt_redirect.c8
-rw-r--r--net/bridge/netfilter/ebt_snat.c11
-rw-r--r--net/bridge/netfilter/ebt_stp.c28
-rw-r--r--net/bridge/netfilter/ebt_ulog.c9
-rw-r--r--net/bridge/netfilter/ebt_vlan.c12
-rw-r--r--net/core/dev.c96
-rw-r--r--net/core/dev_mcast.c39
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/sock.c11
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c9
-rw-r--r--net/ipv4/devinet.c37
-rw-r--r--net/ipv4/esp4.c554
-rw-r--r--net/ipv4/fib_frontend.c14
-rw-r--r--net/ipv4/fib_hash.c47
-rw-r--r--net/ipv4/fib_semantics.c116
-rw-r--r--net/ipv4/fib_trie.c104
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_diag.c15
-rw-r--r--net/ipv4/inet_hashtables.c69
-rw-r--r--net/ipv4/ip_output.c7
-rw-r--r--net/ipv4/ipcomp.c7
-rw-r--r--net/ipv4/netfilter/arp_tables.c102
-rw-r--r--net/ipv4/netfilter/arptable_filter.c31
-rw-r--r--net/ipv4/netfilter/ip_queue.c18
-rw-r--r--net/ipv4/netfilter/ip_tables.c112
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c7
-rw-r--r--net/ipv4/netfilter/ipt_recent.c6
-rw-r--r--net/ipv4/netfilter/iptable_filter.c33
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c33
-rw-r--r--net/ipv4/netfilter/iptable_raw.c33
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c14
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c40
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c22
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c42
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c2
-rw-r--r--net/ipv4/raw.c42
-rw-r--r--net/ipv4/route.c211
-rw-r--r--net/ipv4/sysctl_net_ipv4.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c15
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c25
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv4/xfrm4_tunnel.c4
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c515
-rw-r--r--net/ipv6/inet6_hashtables.c135
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ipcomp6.c7
-rw-r--r--net/ipv6/mip6.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c18
-rw-r--r--net/ipv6/netfilter/ip6_tables.c113
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c33
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c33
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c31
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c22
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c16
-rw-r--r--net/ipv6/raw.c5
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/tcp_ipv6.c19
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/mac80211/rx.c48
-rw-r--r--net/netfilter/nf_conntrack_core.c234
-rw-r--r--net/netfilter/nf_conntrack_expect.c53
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c156
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c23
-rw-r--r--net/netfilter/nf_conntrack_h323_types.c346
-rw-r--r--net/netfilter/nf_conntrack_helper.c60
-rw-r--r--net/netfilter/nf_conntrack_irc.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c68
-rw-r--r--net/netfilter/nf_conntrack_pptp.c14
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c192
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c19
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c19
-rw-r--r--net/netfilter/nf_conntrack_sane.c9
-rw-r--r--net/netfilter/nf_conntrack_sip.c29
-rw-r--r--net/netfilter/nf_conntrack_standalone.c66
-rw-r--r--net/netfilter/nf_conntrack_tftp.c5
-rw-r--r--net/netfilter/nf_log.c2
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c6
-rw-r--r--net/netfilter/x_tables.c313
-rw-r--r--net/netfilter/xt_TCPMSS.c62
-rw-r--r--net/netfilter/xt_connlimit.c6
-rw-r--r--net/netfilter/xt_conntrack.c50
-rw-r--r--net/netfilter/xt_hashlimit.c326
-rw-r--r--net/netfilter/xt_iprange.c2
-rw-r--r--net/netfilter/xt_owner.c14
-rw-r--r--net/netlink/af_netlink.c52
-rw-r--r--net/rfkill/rfkill-input.c9
-rw-r--r--net/rfkill/rfkill.c3
-rw-r--r--net/rxrpc/ar-call.c2
-rw-r--r--net/rxrpc/ar-internal.h6
-rw-r--r--net/rxrpc/ar-proc.c6
-rw-r--r--net/sched/Kconfig13
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/cls_api.c6
-rw-r--r--net/sched/cls_basic.c2
-rw-r--r--net/sched/cls_flow.c660
-rw-r--r--net/sched/cls_fw.c2
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/sch_ingress.c79
-rw-r--r--net/sched/sch_sfq.c134
-rw-r--r--net/sched/sch_teql.c2
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/xfrm/xfrm_algo.c144
-rw-r--r--net/xfrm/xfrm_input.c5
-rw-r--r--net/xfrm/xfrm_output.c1
-rw-r--r--net/xfrm/xfrm_proc.c3
-rw-r--r--net/xfrm/xfrm_state.c18
-rw-r--r--net/xfrm/xfrm_user.c71
152 files changed, 4368 insertions, 2298 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index 3f16b1720554..18c66475d8c3 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -76,7 +76,7 @@ static DEFINE_SPINLOCK(rif_lock);
76 76
77static struct timer_list rif_timer; 77static struct timer_list rif_timer;
78 78
79int sysctl_tr_rif_timeout = 60*10*HZ; 79static int sysctl_tr_rif_timeout = 60*10*HZ;
80 80
81static inline unsigned long rif_hash(const unsigned char *addr) 81static inline unsigned long rif_hash(const unsigned char *addr)
82{ 82{
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 8059fa42b085..77f04e49a1a0 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -563,6 +563,7 @@ static int vlan_dev_stop(struct net_device *dev)
563 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 563 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
564 564
565 dev_mc_unsync(real_dev, dev); 565 dev_mc_unsync(real_dev, dev);
566 dev_unicast_unsync(real_dev, dev);
566 if (dev->flags & IFF_ALLMULTI) 567 if (dev->flags & IFF_ALLMULTI)
567 dev_set_allmulti(real_dev, -1); 568 dev_set_allmulti(real_dev, -1);
568 if (dev->flags & IFF_PROMISC) 569 if (dev->flags & IFF_PROMISC)
@@ -634,9 +635,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
634 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); 635 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
635} 636}
636 637
637static void vlan_dev_set_multicast_list(struct net_device *vlan_dev) 638static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
638{ 639{
639 dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); 640 dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
641 dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
640} 642}
641 643
642/* 644/*
@@ -702,7 +704,8 @@ void vlan_setup(struct net_device *dev)
702 dev->open = vlan_dev_open; 704 dev->open = vlan_dev_open;
703 dev->stop = vlan_dev_stop; 705 dev->stop = vlan_dev_stop;
704 dev->set_mac_address = vlan_dev_set_mac_address; 706 dev->set_mac_address = vlan_dev_set_mac_address;
705 dev->set_multicast_list = vlan_dev_set_multicast_list; 707 dev->set_rx_mode = vlan_dev_set_rx_mode;
708 dev->set_multicast_list = vlan_dev_set_rx_mode;
706 dev->change_rx_flags = vlan_dev_change_rx_flags; 709 dev->change_rx_flags = vlan_dev_change_rx_flags;
707 dev->do_ioctl = vlan_dev_ioctl; 710 dev->do_ioctl = vlan_dev_ioctl;
708 dev->destructor = free_netdev; 711 dev->destructor = free_netdev;
diff --git a/net/9p/conv.c b/net/9p/conv.c
index aa2aa9884f95..3fe35d532c87 100644
--- a/net/9p/conv.c
+++ b/net/9p/conv.c
@@ -128,11 +128,6 @@ static char *buf_put_stringn(struct cbuf *buf, const char *s, u16 slen)
128 return ret; 128 return ret;
129} 129}
130 130
131static inline void buf_put_string(struct cbuf *buf, const char *s)
132{
133 buf_put_stringn(buf, s, strlen(s));
134}
135
136static u8 buf_get_int8(struct cbuf *buf) 131static u8 buf_get_int8(struct cbuf *buf)
137{ 132{
138 u8 ret = 0; 133 u8 ret = 0;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 1bc0e85f04a5..8fc64e3150a2 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1037,16 +1037,13 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1037 int err = 0; 1037 int err = 0;
1038 1038
1039 if (addr_len != sizeof(struct sockaddr_ax25) && 1039 if (addr_len != sizeof(struct sockaddr_ax25) &&
1040 addr_len != sizeof(struct full_sockaddr_ax25)) { 1040 addr_len != sizeof(struct full_sockaddr_ax25))
1041 /* support for old structure may go away some time */ 1041 /* support for old structure may go away some time
1042 * ax25_bind(): uses old (6 digipeater) socket structure.
1043 */
1042 if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) || 1044 if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
1043 (addr_len > sizeof(struct full_sockaddr_ax25))) { 1045 (addr_len > sizeof(struct full_sockaddr_ax25)))
1044 return -EINVAL; 1046 return -EINVAL;
1045 }
1046
1047 printk(KERN_WARNING "ax25_bind(): %s uses old (6 digipeater) socket structure.\n",
1048 current->comm);
1049 }
1050 1047
1051 if (addr->fsa_ax25.sax25_family != AF_AX25) 1048 if (addr->fsa_ax25.sax25_family != AF_AX25)
1052 return -EINVAL; 1049 return -EINVAL;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 17f7fb720553..e13cf5ef144c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -12,6 +12,8 @@
12#undef BT_DBG 12#undef BT_DBG
13#define BT_DBG(D...) 13#define BT_DBG(D...)
14#endif 14#endif
15static struct workqueue_struct *btaddconn;
16static struct workqueue_struct *btdelconn;
15 17
16static inline char *typetostr(int type) 18static inline char *typetostr(int type)
17{ 19{
@@ -279,6 +281,8 @@ static void add_conn(struct work_struct *work)
279 struct hci_conn *conn = container_of(work, struct hci_conn, work); 281 struct hci_conn *conn = container_of(work, struct hci_conn, work);
280 int i; 282 int i;
281 283
284 flush_workqueue(btdelconn);
285
282 if (device_add(&conn->dev) < 0) { 286 if (device_add(&conn->dev) < 0) {
283 BT_ERR("Failed to register connection device"); 287 BT_ERR("Failed to register connection device");
284 return; 288 return;
@@ -313,7 +317,7 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
313 317
314 INIT_WORK(&conn->work, add_conn); 318 INIT_WORK(&conn->work, add_conn);
315 319
316 schedule_work(&conn->work); 320 queue_work(btaddconn, &conn->work);
317} 321}
318 322
319static int __match_tty(struct device *dev, void *data) 323static int __match_tty(struct device *dev, void *data)
@@ -349,7 +353,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
349 353
350 INIT_WORK(&conn->work, del_conn); 354 INIT_WORK(&conn->work, del_conn);
351 355
352 schedule_work(&conn->work); 356 queue_work(btdelconn, &conn->work);
353} 357}
354 358
355int hci_register_sysfs(struct hci_dev *hdev) 359int hci_register_sysfs(struct hci_dev *hdev)
@@ -398,28 +402,54 @@ int __init bt_sysfs_init(void)
398{ 402{
399 int err; 403 int err;
400 404
405 btaddconn = create_singlethread_workqueue("btaddconn");
406 if (!btaddconn) {
407 err = -ENOMEM;
408 goto out;
409 }
410
411 btdelconn = create_singlethread_workqueue("btdelconn");
412 if (!btdelconn) {
413 err = -ENOMEM;
414 goto out_del;
415 }
416
401 bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0); 417 bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0);
402 if (IS_ERR(bt_platform)) 418 if (IS_ERR(bt_platform)) {
403 return PTR_ERR(bt_platform); 419 err = PTR_ERR(bt_platform);
420 goto out_platform;
421 }
404 422
405 err = bus_register(&bt_bus); 423 err = bus_register(&bt_bus);
406 if (err < 0) { 424 if (err < 0)
407 platform_device_unregister(bt_platform); 425 goto out_bus;
408 return err;
409 }
410 426
411 bt_class = class_create(THIS_MODULE, "bluetooth"); 427 bt_class = class_create(THIS_MODULE, "bluetooth");
412 if (IS_ERR(bt_class)) { 428 if (IS_ERR(bt_class)) {
413 bus_unregister(&bt_bus); 429 err = PTR_ERR(bt_class);
414 platform_device_unregister(bt_platform); 430 goto out_class;
415 return PTR_ERR(bt_class);
416 } 431 }
417 432
418 return 0; 433 return 0;
434
435out_class:
436 bus_unregister(&bt_bus);
437out_bus:
438 platform_device_unregister(bt_platform);
439out_platform:
440 destroy_workqueue(btdelconn);
441out_del:
442 destroy_workqueue(btaddconn);
443out:
444 return err;
419} 445}
420 446
421void bt_sysfs_cleanup(void) 447void bt_sysfs_cleanup(void)
422{ 448{
449 destroy_workqueue(btaddconn);
450
451 destroy_workqueue(btdelconn);
452
423 class_destroy(bt_class); 453 class_destroy(bt_class);
424 454
425 bus_unregister(&bt_bus); 455 bus_unregister(&bt_bus);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 80014bab81b0..1c0efd8ad9f3 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -828,10 +828,6 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
828 nf_bridge_pull_encap_header(skb); 828 nf_bridge_pull_encap_header(skb);
829 nf_bridge_save_header(skb); 829 nf_bridge_save_header(skb);
830 830
831#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
832 if (nf_bridge->netoutdev)
833 realoutdev = nf_bridge->netoutdev;
834#endif
835 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev, 831 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
836 br_nf_dev_queue_xmit); 832 br_nf_dev_queue_xmit);
837 833
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index 41a78072cd0e..98534025360f 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -15,8 +15,8 @@
15static int ebt_filter_802_3(const struct sk_buff *skb, const struct net_device *in, 15static int ebt_filter_802_3(const struct sk_buff *skb, const struct net_device *in,
16 const struct net_device *out, const void *data, unsigned int datalen) 16 const struct net_device *out, const void *data, unsigned int datalen)
17{ 17{
18 struct ebt_802_3_info *info = (struct ebt_802_3_info *)data; 18 const struct ebt_802_3_info *info = data;
19 struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb); 19 const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb);
20 __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type; 20 __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type;
21 21
22 if (info->bitmask & EBT_802_3_SAP) { 22 if (info->bitmask & EBT_802_3_SAP) {
@@ -40,7 +40,7 @@ static struct ebt_match filter_802_3;
40static int ebt_802_3_check(const char *tablename, unsigned int hookmask, 40static int ebt_802_3_check(const char *tablename, unsigned int hookmask,
41 const struct ebt_entry *e, void *data, unsigned int datalen) 41 const struct ebt_entry *e, void *data, unsigned int datalen)
42{ 42{
43 struct ebt_802_3_info *info = (struct ebt_802_3_info *)data; 43 const struct ebt_802_3_info *info = data;
44 44
45 if (datalen < sizeof(struct ebt_802_3_info)) 45 if (datalen < sizeof(struct ebt_802_3_info))
46 return -EINVAL; 46 return -EINVAL;
@@ -50,8 +50,7 @@ static int ebt_802_3_check(const char *tablename, unsigned int hookmask,
50 return 0; 50 return 0;
51} 51}
52 52
53static struct ebt_match filter_802_3 = 53static struct ebt_match filter_802_3 __read_mostly = {
54{
55 .name = EBT_802_3_MATCH, 54 .name = EBT_802_3_MATCH,
56 .match = ebt_filter_802_3, 55 .match = ebt_filter_802_3,
57 .check = ebt_802_3_check, 56 .check = ebt_802_3_check,
@@ -70,4 +69,5 @@ static void __exit ebt_802_3_fini(void)
70 69
71module_init(ebt_802_3_init); 70module_init(ebt_802_3_init);
72module_exit(ebt_802_3_fini); 71module_exit(ebt_802_3_fini);
72MODULE_DESCRIPTION("Ebtables: DSAP/SSAP field and SNAP type matching");
73MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 6436d30a550e..70b6dca5ea75 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -25,7 +25,7 @@ static int ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
25 const struct ebt_mac_wormhash_tuple *p; 25 const struct ebt_mac_wormhash_tuple *p;
26 int start, limit, i; 26 int start, limit, i;
27 uint32_t cmp[2] = { 0, 0 }; 27 uint32_t cmp[2] = { 0, 0 };
28 int key = (const unsigned char) mac[5]; 28 int key = ((const unsigned char *)mac)[5];
29 29
30 memcpy(((char *) cmp) + 2, mac, 6); 30 memcpy(((char *) cmp) + 2, mac, 6);
31 start = wh->table[key]; 31 start = wh->table[key];
@@ -73,15 +73,18 @@ static int ebt_mac_wormhash_check_integrity(const struct ebt_mac_wormhash
73static int get_ip_dst(const struct sk_buff *skb, __be32 *addr) 73static int get_ip_dst(const struct sk_buff *skb, __be32 *addr)
74{ 74{
75 if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { 75 if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
76 struct iphdr _iph, *ih; 76 const struct iphdr *ih;
77 struct iphdr _iph;
77 78
78 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); 79 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
79 if (ih == NULL) 80 if (ih == NULL)
80 return -1; 81 return -1;
81 *addr = ih->daddr; 82 *addr = ih->daddr;
82 } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { 83 } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
83 struct arphdr _arph, *ah; 84 const struct arphdr *ah;
84 __be32 buf, *bp; 85 struct arphdr _arph;
86 const __be32 *bp;
87 __be32 buf;
85 88
86 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); 89 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
87 if (ah == NULL || 90 if (ah == NULL ||
@@ -101,15 +104,18 @@ static int get_ip_dst(const struct sk_buff *skb, __be32 *addr)
101static int get_ip_src(const struct sk_buff *skb, __be32 *addr) 104static int get_ip_src(const struct sk_buff *skb, __be32 *addr)
102{ 105{
103 if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { 106 if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
104 struct iphdr _iph, *ih; 107 const struct iphdr *ih;
108 struct iphdr _iph;
105 109
106 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); 110 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
107 if (ih == NULL) 111 if (ih == NULL)
108 return -1; 112 return -1;
109 *addr = ih->saddr; 113 *addr = ih->saddr;
110 } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { 114 } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
111 struct arphdr _arph, *ah; 115 const struct arphdr *ah;
112 __be32 buf, *bp; 116 struct arphdr _arph;
117 const __be32 *bp;
118 __be32 buf;
113 119
114 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); 120 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
115 if (ah == NULL || 121 if (ah == NULL ||
@@ -130,7 +136,7 @@ static int ebt_filter_among(const struct sk_buff *skb,
130 const struct net_device *out, const void *data, 136 const struct net_device *out, const void *data,
131 unsigned int datalen) 137 unsigned int datalen)
132{ 138{
133 struct ebt_among_info *info = (struct ebt_among_info *) data; 139 const struct ebt_among_info *info = data;
134 const char *dmac, *smac; 140 const char *dmac, *smac;
135 const struct ebt_mac_wormhash *wh_dst, *wh_src; 141 const struct ebt_mac_wormhash *wh_dst, *wh_src;
136 __be32 dip = 0, sip = 0; 142 __be32 dip = 0, sip = 0;
@@ -175,7 +181,7 @@ static int ebt_among_check(const char *tablename, unsigned int hookmask,
175 const struct ebt_entry *e, void *data, 181 const struct ebt_entry *e, void *data,
176 unsigned int datalen) 182 unsigned int datalen)
177{ 183{
178 struct ebt_among_info *info = (struct ebt_among_info *) data; 184 const struct ebt_among_info *info = data;
179 int expected_length = sizeof(struct ebt_among_info); 185 int expected_length = sizeof(struct ebt_among_info);
180 const struct ebt_mac_wormhash *wh_dst, *wh_src; 186 const struct ebt_mac_wormhash *wh_dst, *wh_src;
181 int err; 187 int err;
@@ -206,7 +212,7 @@ static int ebt_among_check(const char *tablename, unsigned int hookmask,
206 return 0; 212 return 0;
207} 213}
208 214
209static struct ebt_match filter_among = { 215static struct ebt_match filter_among __read_mostly = {
210 .name = EBT_AMONG_MATCH, 216 .name = EBT_AMONG_MATCH,
211 .match = ebt_filter_among, 217 .match = ebt_filter_among,
212 .check = ebt_among_check, 218 .check = ebt_among_check,
@@ -225,4 +231,5 @@ static void __exit ebt_among_fini(void)
225 231
226module_init(ebt_among_init); 232module_init(ebt_among_init);
227module_exit(ebt_among_fini); 233module_exit(ebt_among_fini);
234MODULE_DESCRIPTION("Ebtables: Combined MAC/IP address list matching");
228MODULE_LICENSE("GPL"); 235MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index 18141392a9b4..7c535be75665 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -18,8 +18,9 @@
18static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in, 18static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in,
19 const struct net_device *out, const void *data, unsigned int datalen) 19 const struct net_device *out, const void *data, unsigned int datalen)
20{ 20{
21 struct ebt_arp_info *info = (struct ebt_arp_info *)data; 21 const struct ebt_arp_info *info = data;
22 struct arphdr _arph, *ah; 22 const struct arphdr *ah;
23 struct arphdr _arph;
23 24
24 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); 25 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
25 if (ah == NULL) 26 if (ah == NULL)
@@ -35,7 +36,8 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in
35 return EBT_NOMATCH; 36 return EBT_NOMATCH;
36 37
37 if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) { 38 if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) {
38 __be32 saddr, daddr, *sap, *dap; 39 const __be32 *sap, *dap;
40 __be32 saddr, daddr;
39 41
40 if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP)) 42 if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP))
41 return EBT_NOMATCH; 43 return EBT_NOMATCH;
@@ -61,7 +63,8 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in
61 } 63 }
62 64
63 if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) { 65 if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) {
64 unsigned char _mac[ETH_ALEN], *mp; 66 const unsigned char *mp;
67 unsigned char _mac[ETH_ALEN];
65 uint8_t verdict, i; 68 uint8_t verdict, i;
66 69
67 if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER)) 70 if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER))
@@ -100,7 +103,7 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in
100static int ebt_arp_check(const char *tablename, unsigned int hookmask, 103static int ebt_arp_check(const char *tablename, unsigned int hookmask,
101 const struct ebt_entry *e, void *data, unsigned int datalen) 104 const struct ebt_entry *e, void *data, unsigned int datalen)
102{ 105{
103 struct ebt_arp_info *info = (struct ebt_arp_info *)data; 106 const struct ebt_arp_info *info = data;
104 107
105 if (datalen != EBT_ALIGN(sizeof(struct ebt_arp_info))) 108 if (datalen != EBT_ALIGN(sizeof(struct ebt_arp_info)))
106 return -EINVAL; 109 return -EINVAL;
@@ -113,8 +116,7 @@ static int ebt_arp_check(const char *tablename, unsigned int hookmask,
113 return 0; 116 return 0;
114} 117}
115 118
116static struct ebt_match filter_arp = 119static struct ebt_match filter_arp __read_mostly = {
117{
118 .name = EBT_ARP_MATCH, 120 .name = EBT_ARP_MATCH,
119 .match = ebt_filter_arp, 121 .match = ebt_filter_arp,
120 .check = ebt_arp_check, 122 .check = ebt_arp_check,
@@ -133,4 +135,5 @@ static void __exit ebt_arp_fini(void)
133 135
134module_init(ebt_arp_init); 136module_init(ebt_arp_init);
135module_exit(ebt_arp_fini); 137module_exit(ebt_arp_fini);
138MODULE_DESCRIPTION("Ebtables: ARP protocol packet match");
136MODULE_LICENSE("GPL"); 139MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 48a80e423287..0c4279590fc7 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -19,10 +19,13 @@ static int ebt_target_reply(struct sk_buff *skb, unsigned int hooknr,
19 const struct net_device *in, const struct net_device *out, 19 const struct net_device *in, const struct net_device *out,
20 const void *data, unsigned int datalen) 20 const void *data, unsigned int datalen)
21{ 21{
22 struct ebt_arpreply_info *info = (struct ebt_arpreply_info *)data; 22 struct ebt_arpreply_info *info = (void *)data;
23 __be32 _sip, *siptr, _dip, *diptr; 23 const __be32 *siptr, *diptr;
24 struct arphdr _ah, *ap; 24 __be32 _sip, _dip;
25 unsigned char _sha[ETH_ALEN], *shp; 25 const struct arphdr *ap;
26 struct arphdr _ah;
27 const unsigned char *shp;
28 unsigned char _sha[ETH_ALEN];
26 29
27 ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah); 30 ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah);
28 if (ap == NULL) 31 if (ap == NULL)
@@ -58,7 +61,7 @@ static int ebt_target_reply(struct sk_buff *skb, unsigned int hooknr,
58static int ebt_target_reply_check(const char *tablename, unsigned int hookmask, 61static int ebt_target_reply_check(const char *tablename, unsigned int hookmask,
59 const struct ebt_entry *e, void *data, unsigned int datalen) 62 const struct ebt_entry *e, void *data, unsigned int datalen)
60{ 63{
61 struct ebt_arpreply_info *info = (struct ebt_arpreply_info *)data; 64 const struct ebt_arpreply_info *info = data;
62 65
63 if (datalen != EBT_ALIGN(sizeof(struct ebt_arpreply_info))) 66 if (datalen != EBT_ALIGN(sizeof(struct ebt_arpreply_info)))
64 return -EINVAL; 67 return -EINVAL;
@@ -73,8 +76,7 @@ static int ebt_target_reply_check(const char *tablename, unsigned int hookmask,
73 return 0; 76 return 0;
74} 77}
75 78
76static struct ebt_target reply_target = 79static struct ebt_target reply_target __read_mostly = {
77{
78 .name = EBT_ARPREPLY_TARGET, 80 .name = EBT_ARPREPLY_TARGET,
79 .target = ebt_target_reply, 81 .target = ebt_target_reply,
80 .check = ebt_target_reply_check, 82 .check = ebt_target_reply_check,
@@ -93,4 +95,5 @@ static void __exit ebt_arpreply_fini(void)
93 95
94module_init(ebt_arpreply_init); 96module_init(ebt_arpreply_init);
95module_exit(ebt_arpreply_fini); 97module_exit(ebt_arpreply_fini);
98MODULE_DESCRIPTION("Ebtables: ARP reply target");
96MODULE_LICENSE("GPL"); 99MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 74262e9a566a..e700cbf634c2 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -18,7 +18,7 @@ static int ebt_target_dnat(struct sk_buff *skb, unsigned int hooknr,
18 const struct net_device *in, const struct net_device *out, 18 const struct net_device *in, const struct net_device *out,
19 const void *data, unsigned int datalen) 19 const void *data, unsigned int datalen)
20{ 20{
21 struct ebt_nat_info *info = (struct ebt_nat_info *)data; 21 const struct ebt_nat_info *info = data;
22 22
23 if (skb_make_writable(skb, 0)) 23 if (skb_make_writable(skb, 0))
24 return NF_DROP; 24 return NF_DROP;
@@ -30,7 +30,7 @@ static int ebt_target_dnat(struct sk_buff *skb, unsigned int hooknr,
30static int ebt_target_dnat_check(const char *tablename, unsigned int hookmask, 30static int ebt_target_dnat_check(const char *tablename, unsigned int hookmask,
31 const struct ebt_entry *e, void *data, unsigned int datalen) 31 const struct ebt_entry *e, void *data, unsigned int datalen)
32{ 32{
33 struct ebt_nat_info *info = (struct ebt_nat_info *)data; 33 const struct ebt_nat_info *info = data;
34 34
35 if (BASE_CHAIN && info->target == EBT_RETURN) 35 if (BASE_CHAIN && info->target == EBT_RETURN)
36 return -EINVAL; 36 return -EINVAL;
@@ -46,8 +46,7 @@ static int ebt_target_dnat_check(const char *tablename, unsigned int hookmask,
46 return 0; 46 return 0;
47} 47}
48 48
49static struct ebt_target dnat = 49static struct ebt_target dnat __read_mostly = {
50{
51 .name = EBT_DNAT_TARGET, 50 .name = EBT_DNAT_TARGET,
52 .target = ebt_target_dnat, 51 .target = ebt_target_dnat,
53 .check = ebt_target_dnat_check, 52 .check = ebt_target_dnat_check,
@@ -66,4 +65,5 @@ static void __exit ebt_dnat_fini(void)
66 65
67module_init(ebt_dnat_init); 66module_init(ebt_dnat_init);
68module_exit(ebt_dnat_fini); 67module_exit(ebt_dnat_fini);
68MODULE_DESCRIPTION("Ebtables: Destination MAC address translation");
69MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index 69f7f0ab9c76..65caa00dcf2a 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -28,9 +28,11 @@ static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in,
28 const struct net_device *out, const void *data, 28 const struct net_device *out, const void *data,
29 unsigned int datalen) 29 unsigned int datalen)
30{ 30{
31 struct ebt_ip_info *info = (struct ebt_ip_info *)data; 31 const struct ebt_ip_info *info = data;
32 struct iphdr _iph, *ih; 32 const struct iphdr *ih;
33 struct tcpudphdr _ports, *pptr; 33 struct iphdr _iph;
34 const struct tcpudphdr *pptr;
35 struct tcpudphdr _ports;
34 36
35 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); 37 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
36 if (ih == NULL) 38 if (ih == NULL)
@@ -79,7 +81,7 @@ static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in,
79static int ebt_ip_check(const char *tablename, unsigned int hookmask, 81static int ebt_ip_check(const char *tablename, unsigned int hookmask,
80 const struct ebt_entry *e, void *data, unsigned int datalen) 82 const struct ebt_entry *e, void *data, unsigned int datalen)
81{ 83{
82 struct ebt_ip_info *info = (struct ebt_ip_info *)data; 84 const struct ebt_ip_info *info = data;
83 85
84 if (datalen != EBT_ALIGN(sizeof(struct ebt_ip_info))) 86 if (datalen != EBT_ALIGN(sizeof(struct ebt_ip_info)))
85 return -EINVAL; 87 return -EINVAL;
@@ -105,8 +107,7 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask,
105 return 0; 107 return 0;
106} 108}
107 109
108static struct ebt_match filter_ip = 110static struct ebt_match filter_ip __read_mostly = {
109{
110 .name = EBT_IP_MATCH, 111 .name = EBT_IP_MATCH,
111 .match = ebt_filter_ip, 112 .match = ebt_filter_ip,
112 .check = ebt_ip_check, 113 .check = ebt_ip_check,
@@ -125,4 +126,5 @@ static void __exit ebt_ip_fini(void)
125 126
126module_init(ebt_ip_init); 127module_init(ebt_ip_init);
127module_exit(ebt_ip_fini); 128module_exit(ebt_ip_fini);
129MODULE_DESCRIPTION("Ebtables: IPv4 protocol packet match");
128MODULE_LICENSE("GPL"); 130MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index d48fa5cb26cf..8cbdc01c253e 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -69,7 +69,7 @@ user2credits(u_int32_t user)
69static int ebt_limit_check(const char *tablename, unsigned int hookmask, 69static int ebt_limit_check(const char *tablename, unsigned int hookmask,
70 const struct ebt_entry *e, void *data, unsigned int datalen) 70 const struct ebt_entry *e, void *data, unsigned int datalen)
71{ 71{
72 struct ebt_limit_info *info = (struct ebt_limit_info *)data; 72 struct ebt_limit_info *info = data;
73 73
74 if (datalen != EBT_ALIGN(sizeof(struct ebt_limit_info))) 74 if (datalen != EBT_ALIGN(sizeof(struct ebt_limit_info)))
75 return -EINVAL; 75 return -EINVAL;
@@ -90,8 +90,7 @@ static int ebt_limit_check(const char *tablename, unsigned int hookmask,
90 return 0; 90 return 0;
91} 91}
92 92
93static struct ebt_match ebt_limit_reg = 93static struct ebt_match ebt_limit_reg __read_mostly = {
94{
95 .name = EBT_LIMIT_MATCH, 94 .name = EBT_LIMIT_MATCH,
96 .match = ebt_limit_match, 95 .match = ebt_limit_match,
97 .check = ebt_limit_check, 96 .check = ebt_limit_check,
@@ -110,4 +109,5 @@ static void __exit ebt_limit_fini(void)
110 109
111module_init(ebt_limit_init); 110module_init(ebt_limit_init);
112module_exit(ebt_limit_fini); 111module_exit(ebt_limit_fini);
112MODULE_DESCRIPTION("Ebtables: Rate-limit match");
113MODULE_LICENSE("GPL"); 113MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 3be9e9898553..0b209e4aad0a 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -24,7 +24,7 @@ static DEFINE_SPINLOCK(ebt_log_lock);
24static int ebt_log_check(const char *tablename, unsigned int hookmask, 24static int ebt_log_check(const char *tablename, unsigned int hookmask,
25 const struct ebt_entry *e, void *data, unsigned int datalen) 25 const struct ebt_entry *e, void *data, unsigned int datalen)
26{ 26{
27 struct ebt_log_info *info = (struct ebt_log_info *)data; 27 struct ebt_log_info *info = data;
28 28
29 if (datalen != EBT_ALIGN(sizeof(struct ebt_log_info))) 29 if (datalen != EBT_ALIGN(sizeof(struct ebt_log_info)))
30 return -EINVAL; 30 return -EINVAL;
@@ -50,7 +50,7 @@ struct arppayload
50 unsigned char ip_dst[4]; 50 unsigned char ip_dst[4];
51}; 51};
52 52
53static void print_MAC(unsigned char *p) 53static void print_MAC(const unsigned char *p)
54{ 54{
55 int i; 55 int i;
56 56
@@ -84,7 +84,8 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
84 84
85 if ((bitmask & EBT_LOG_IP) && eth_hdr(skb)->h_proto == 85 if ((bitmask & EBT_LOG_IP) && eth_hdr(skb)->h_proto ==
86 htons(ETH_P_IP)){ 86 htons(ETH_P_IP)){
87 struct iphdr _iph, *ih; 87 const struct iphdr *ih;
88 struct iphdr _iph;
88 89
89 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); 90 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
90 if (ih == NULL) { 91 if (ih == NULL) {
@@ -99,7 +100,8 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
99 ih->protocol == IPPROTO_UDPLITE || 100 ih->protocol == IPPROTO_UDPLITE ||
100 ih->protocol == IPPROTO_SCTP || 101 ih->protocol == IPPROTO_SCTP ||
101 ih->protocol == IPPROTO_DCCP) { 102 ih->protocol == IPPROTO_DCCP) {
102 struct tcpudphdr _ports, *pptr; 103 const struct tcpudphdr *pptr;
104 struct tcpudphdr _ports;
103 105
104 pptr = skb_header_pointer(skb, ih->ihl*4, 106 pptr = skb_header_pointer(skb, ih->ihl*4,
105 sizeof(_ports), &_ports); 107 sizeof(_ports), &_ports);
@@ -116,7 +118,8 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
116 if ((bitmask & EBT_LOG_ARP) && 118 if ((bitmask & EBT_LOG_ARP) &&
117 ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) || 119 ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) ||
118 (eth_hdr(skb)->h_proto == htons(ETH_P_RARP)))) { 120 (eth_hdr(skb)->h_proto == htons(ETH_P_RARP)))) {
119 struct arphdr _arph, *ah; 121 const struct arphdr *ah;
122 struct arphdr _arph;
120 123
121 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); 124 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
122 if (ah == NULL) { 125 if (ah == NULL) {
@@ -132,7 +135,8 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
132 if (ah->ar_hrd == htons(1) && 135 if (ah->ar_hrd == htons(1) &&
133 ah->ar_hln == ETH_ALEN && 136 ah->ar_hln == ETH_ALEN &&
134 ah->ar_pln == sizeof(__be32)) { 137 ah->ar_pln == sizeof(__be32)) {
135 struct arppayload _arpp, *ap; 138 const struct arppayload *ap;
139 struct arppayload _arpp;
136 140
137 ap = skb_header_pointer(skb, sizeof(_arph), 141 ap = skb_header_pointer(skb, sizeof(_arph),
138 sizeof(_arpp), &_arpp); 142 sizeof(_arpp), &_arpp);
@@ -160,7 +164,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
160 const struct net_device *in, const struct net_device *out, 164 const struct net_device *in, const struct net_device *out,
161 const void *data, unsigned int datalen) 165 const void *data, unsigned int datalen)
162{ 166{
163 struct ebt_log_info *info = (struct ebt_log_info *)data; 167 const struct ebt_log_info *info = data;
164 struct nf_loginfo li; 168 struct nf_loginfo li;
165 169
166 li.type = NF_LOG_TYPE_LOG; 170 li.type = NF_LOG_TYPE_LOG;
@@ -208,4 +212,5 @@ static void __exit ebt_log_fini(void)
208 212
209module_init(ebt_log_init); 213module_init(ebt_log_init);
210module_exit(ebt_log_fini); 214module_exit(ebt_log_fini);
215MODULE_DESCRIPTION("Ebtables: Packet logging to syslog");
211MODULE_LICENSE("GPL"); 216MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 6cba54309c09..36723f47db0a 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -21,7 +21,7 @@ static int ebt_target_mark(struct sk_buff *skb, unsigned int hooknr,
21 const struct net_device *in, const struct net_device *out, 21 const struct net_device *in, const struct net_device *out,
22 const void *data, unsigned int datalen) 22 const void *data, unsigned int datalen)
23{ 23{
24 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data; 24 const struct ebt_mark_t_info *info = data;
25 int action = info->target & -16; 25 int action = info->target & -16;
26 26
27 if (action == MARK_SET_VALUE) 27 if (action == MARK_SET_VALUE)
@@ -39,7 +39,7 @@ static int ebt_target_mark(struct sk_buff *skb, unsigned int hooknr,
39static int ebt_target_mark_check(const char *tablename, unsigned int hookmask, 39static int ebt_target_mark_check(const char *tablename, unsigned int hookmask,
40 const struct ebt_entry *e, void *data, unsigned int datalen) 40 const struct ebt_entry *e, void *data, unsigned int datalen)
41{ 41{
42 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data; 42 const struct ebt_mark_t_info *info = data;
43 int tmp; 43 int tmp;
44 44
45 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info))) 45 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info)))
@@ -57,8 +57,7 @@ static int ebt_target_mark_check(const char *tablename, unsigned int hookmask,
57 return 0; 57 return 0;
58} 58}
59 59
60static struct ebt_target mark_target = 60static struct ebt_target mark_target __read_mostly = {
61{
62 .name = EBT_MARK_TARGET, 61 .name = EBT_MARK_TARGET,
63 .target = ebt_target_mark, 62 .target = ebt_target_mark,
64 .check = ebt_target_mark_check, 63 .check = ebt_target_mark_check,
@@ -77,4 +76,5 @@ static void __exit ebt_mark_fini(void)
77 76
78module_init(ebt_mark_init); 77module_init(ebt_mark_init);
79module_exit(ebt_mark_fini); 78module_exit(ebt_mark_fini);
79MODULE_DESCRIPTION("Ebtables: Packet mark modification");
80MODULE_LICENSE("GPL"); 80MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index 6b0d2169af74..9b0a4543861f 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -16,7 +16,7 @@ static int ebt_filter_mark(const struct sk_buff *skb,
16 const struct net_device *in, const struct net_device *out, const void *data, 16 const struct net_device *in, const struct net_device *out, const void *data,
17 unsigned int datalen) 17 unsigned int datalen)
18{ 18{
19 struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data; 19 const struct ebt_mark_m_info *info = data;
20 20
21 if (info->bitmask & EBT_MARK_OR) 21 if (info->bitmask & EBT_MARK_OR)
22 return !(!!(skb->mark & info->mask) ^ info->invert); 22 return !(!!(skb->mark & info->mask) ^ info->invert);
@@ -26,7 +26,7 @@ static int ebt_filter_mark(const struct sk_buff *skb,
26static int ebt_mark_check(const char *tablename, unsigned int hookmask, 26static int ebt_mark_check(const char *tablename, unsigned int hookmask,
27 const struct ebt_entry *e, void *data, unsigned int datalen) 27 const struct ebt_entry *e, void *data, unsigned int datalen)
28{ 28{
29 struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data; 29 const struct ebt_mark_m_info *info = data;
30 30
31 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_m_info))) 31 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_m_info)))
32 return -EINVAL; 32 return -EINVAL;
@@ -39,8 +39,7 @@ static int ebt_mark_check(const char *tablename, unsigned int hookmask,
39 return 0; 39 return 0;
40} 40}
41 41
42static struct ebt_match filter_mark = 42static struct ebt_match filter_mark __read_mostly = {
43{
44 .name = EBT_MARK_MATCH, 43 .name = EBT_MARK_MATCH,
45 .match = ebt_filter_mark, 44 .match = ebt_filter_mark,
46 .check = ebt_mark_check, 45 .check = ebt_mark_check,
@@ -59,4 +58,5 @@ static void __exit ebt_mark_m_fini(void)
59 58
60module_init(ebt_mark_m_init); 59module_init(ebt_mark_m_init);
61module_exit(ebt_mark_m_fini); 60module_exit(ebt_mark_m_fini);
61MODULE_DESCRIPTION("Ebtables: Packet mark match");
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c
index 4fffd70e4da7..676db32df3d1 100644
--- a/net/bridge/netfilter/ebt_pkttype.c
+++ b/net/bridge/netfilter/ebt_pkttype.c
@@ -18,7 +18,7 @@ static int ebt_filter_pkttype(const struct sk_buff *skb,
18 const void *data, 18 const void *data,
19 unsigned int datalen) 19 unsigned int datalen)
20{ 20{
21 struct ebt_pkttype_info *info = (struct ebt_pkttype_info *)data; 21 const struct ebt_pkttype_info *info = data;
22 22
23 return (skb->pkt_type != info->pkt_type) ^ info->invert; 23 return (skb->pkt_type != info->pkt_type) ^ info->invert;
24} 24}
@@ -26,7 +26,7 @@ static int ebt_filter_pkttype(const struct sk_buff *skb,
26static int ebt_pkttype_check(const char *tablename, unsigned int hookmask, 26static int ebt_pkttype_check(const char *tablename, unsigned int hookmask,
27 const struct ebt_entry *e, void *data, unsigned int datalen) 27 const struct ebt_entry *e, void *data, unsigned int datalen)
28{ 28{
29 struct ebt_pkttype_info *info = (struct ebt_pkttype_info *)data; 29 const struct ebt_pkttype_info *info = data;
30 30
31 if (datalen != EBT_ALIGN(sizeof(struct ebt_pkttype_info))) 31 if (datalen != EBT_ALIGN(sizeof(struct ebt_pkttype_info)))
32 return -EINVAL; 32 return -EINVAL;
@@ -36,8 +36,7 @@ static int ebt_pkttype_check(const char *tablename, unsigned int hookmask,
36 return 0; 36 return 0;
37} 37}
38 38
39static struct ebt_match filter_pkttype = 39static struct ebt_match filter_pkttype __read_mostly = {
40{
41 .name = EBT_PKTTYPE_MATCH, 40 .name = EBT_PKTTYPE_MATCH,
42 .match = ebt_filter_pkttype, 41 .match = ebt_filter_pkttype,
43 .check = ebt_pkttype_check, 42 .check = ebt_pkttype_check,
@@ -56,4 +55,5 @@ static void __exit ebt_pkttype_fini(void)
56 55
57module_init(ebt_pkttype_init); 56module_init(ebt_pkttype_init);
58module_exit(ebt_pkttype_fini); 57module_exit(ebt_pkttype_fini);
58MODULE_DESCRIPTION("Ebtables: Link layer packet type match");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 422cb834cff9..bfdf2fb60b1f 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -19,7 +19,7 @@ static int ebt_target_redirect(struct sk_buff *skb, unsigned int hooknr,
19 const struct net_device *in, const struct net_device *out, 19 const struct net_device *in, const struct net_device *out,
20 const void *data, unsigned int datalen) 20 const void *data, unsigned int datalen)
21{ 21{
22 struct ebt_redirect_info *info = (struct ebt_redirect_info *)data; 22 const struct ebt_redirect_info *info = data;
23 23
24 if (skb_make_writable(skb, 0)) 24 if (skb_make_writable(skb, 0))
25 return NF_DROP; 25 return NF_DROP;
@@ -36,7 +36,7 @@ static int ebt_target_redirect(struct sk_buff *skb, unsigned int hooknr,
36static int ebt_target_redirect_check(const char *tablename, unsigned int hookmask, 36static int ebt_target_redirect_check(const char *tablename, unsigned int hookmask,
37 const struct ebt_entry *e, void *data, unsigned int datalen) 37 const struct ebt_entry *e, void *data, unsigned int datalen)
38{ 38{
39 struct ebt_redirect_info *info = (struct ebt_redirect_info *)data; 39 const struct ebt_redirect_info *info = data;
40 40
41 if (datalen != EBT_ALIGN(sizeof(struct ebt_redirect_info))) 41 if (datalen != EBT_ALIGN(sizeof(struct ebt_redirect_info)))
42 return -EINVAL; 42 return -EINVAL;
@@ -51,8 +51,7 @@ static int ebt_target_redirect_check(const char *tablename, unsigned int hookmas
51 return 0; 51 return 0;
52} 52}
53 53
54static struct ebt_target redirect_target = 54static struct ebt_target redirect_target __read_mostly = {
55{
56 .name = EBT_REDIRECT_TARGET, 55 .name = EBT_REDIRECT_TARGET,
57 .target = ebt_target_redirect, 56 .target = ebt_target_redirect,
58 .check = ebt_target_redirect_check, 57 .check = ebt_target_redirect_check,
@@ -71,4 +70,5 @@ static void __exit ebt_redirect_fini(void)
71 70
72module_init(ebt_redirect_init); 71module_init(ebt_redirect_init);
73module_exit(ebt_redirect_fini); 72module_exit(ebt_redirect_fini);
73MODULE_DESCRIPTION("Ebtables: Packet redirection to localhost");
74MODULE_LICENSE("GPL"); 74MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 425ac920904d..e252dabbb143 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -20,7 +20,7 @@ static int ebt_target_snat(struct sk_buff *skb, unsigned int hooknr,
20 const struct net_device *in, const struct net_device *out, 20 const struct net_device *in, const struct net_device *out,
21 const void *data, unsigned int datalen) 21 const void *data, unsigned int datalen)
22{ 22{
23 struct ebt_nat_info *info = (struct ebt_nat_info *) data; 23 const struct ebt_nat_info *info = data;
24 24
25 if (skb_make_writable(skb, 0)) 25 if (skb_make_writable(skb, 0))
26 return NF_DROP; 26 return NF_DROP;
@@ -28,7 +28,8 @@ static int ebt_target_snat(struct sk_buff *skb, unsigned int hooknr,
28 memcpy(eth_hdr(skb)->h_source, info->mac, ETH_ALEN); 28 memcpy(eth_hdr(skb)->h_source, info->mac, ETH_ALEN);
29 if (!(info->target & NAT_ARP_BIT) && 29 if (!(info->target & NAT_ARP_BIT) &&
30 eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { 30 eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
31 struct arphdr _ah, *ap; 31 const struct arphdr *ap;
32 struct arphdr _ah;
32 33
33 ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah); 34 ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah);
34 if (ap == NULL) 35 if (ap == NULL)
@@ -45,7 +46,7 @@ out:
45static int ebt_target_snat_check(const char *tablename, unsigned int hookmask, 46static int ebt_target_snat_check(const char *tablename, unsigned int hookmask,
46 const struct ebt_entry *e, void *data, unsigned int datalen) 47 const struct ebt_entry *e, void *data, unsigned int datalen)
47{ 48{
48 struct ebt_nat_info *info = (struct ebt_nat_info *) data; 49 const struct ebt_nat_info *info = data;
49 int tmp; 50 int tmp;
50 51
51 if (datalen != EBT_ALIGN(sizeof(struct ebt_nat_info))) 52 if (datalen != EBT_ALIGN(sizeof(struct ebt_nat_info)))
@@ -67,8 +68,7 @@ static int ebt_target_snat_check(const char *tablename, unsigned int hookmask,
67 return 0; 68 return 0;
68} 69}
69 70
70static struct ebt_target snat = 71static struct ebt_target snat __read_mostly = {
71{
72 .name = EBT_SNAT_TARGET, 72 .name = EBT_SNAT_TARGET,
73 .target = ebt_target_snat, 73 .target = ebt_target_snat,
74 .check = ebt_target_snat_check, 74 .check = ebt_target_snat_check,
@@ -87,4 +87,5 @@ static void __exit ebt_snat_fini(void)
87 87
88module_init(ebt_snat_init); 88module_init(ebt_snat_init);
89module_exit(ebt_snat_fini); 89module_exit(ebt_snat_fini);
90MODULE_DESCRIPTION("Ebtables: Source MAC address translation");
90MODULE_LICENSE("GPL"); 91MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 31b77367319c..40f36d37607d 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -40,10 +40,10 @@ struct stp_config_pdu {
40#define NR16(p) (p[0] << 8 | p[1]) 40#define NR16(p) (p[0] << 8 | p[1])
41#define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]) 41#define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
42 42
43static int ebt_filter_config(struct ebt_stp_info *info, 43static int ebt_filter_config(const struct ebt_stp_info *info,
44 struct stp_config_pdu *stpc) 44 const struct stp_config_pdu *stpc)
45{ 45{
46 struct ebt_stp_config_info *c; 46 const struct ebt_stp_config_info *c;
47 uint16_t v16; 47 uint16_t v16;
48 uint32_t v32; 48 uint32_t v32;
49 int verdict, i; 49 int verdict, i;
@@ -122,9 +122,10 @@ static int ebt_filter_config(struct ebt_stp_info *info,
122static int ebt_filter_stp(const struct sk_buff *skb, const struct net_device *in, 122static int ebt_filter_stp(const struct sk_buff *skb, const struct net_device *in,
123 const struct net_device *out, const void *data, unsigned int datalen) 123 const struct net_device *out, const void *data, unsigned int datalen)
124{ 124{
125 struct ebt_stp_info *info = (struct ebt_stp_info *)data; 125 const struct ebt_stp_info *info = data;
126 struct stp_header _stph, *sp; 126 const struct stp_header *sp;
127 uint8_t header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; 127 struct stp_header _stph;
128 const uint8_t header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00};
128 129
129 sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph); 130 sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph);
130 if (sp == NULL) 131 if (sp == NULL)
@@ -140,7 +141,8 @@ static int ebt_filter_stp(const struct sk_buff *skb, const struct net_device *in
140 141
141 if (sp->type == BPDU_TYPE_CONFIG && 142 if (sp->type == BPDU_TYPE_CONFIG &&
142 info->bitmask & EBT_STP_CONFIG_MASK) { 143 info->bitmask & EBT_STP_CONFIG_MASK) {
143 struct stp_config_pdu _stpc, *st; 144 const struct stp_config_pdu *st;
145 struct stp_config_pdu _stpc;
144 146
145 st = skb_header_pointer(skb, sizeof(_stph), 147 st = skb_header_pointer(skb, sizeof(_stph),
146 sizeof(_stpc), &_stpc); 148 sizeof(_stpc), &_stpc);
@@ -154,10 +156,10 @@ static int ebt_filter_stp(const struct sk_buff *skb, const struct net_device *in
154static int ebt_stp_check(const char *tablename, unsigned int hookmask, 156static int ebt_stp_check(const char *tablename, unsigned int hookmask,
155 const struct ebt_entry *e, void *data, unsigned int datalen) 157 const struct ebt_entry *e, void *data, unsigned int datalen)
156{ 158{
157 struct ebt_stp_info *info = (struct ebt_stp_info *)data; 159 const struct ebt_stp_info *info = data;
158 int len = EBT_ALIGN(sizeof(struct ebt_stp_info)); 160 const unsigned int len = EBT_ALIGN(sizeof(struct ebt_stp_info));
159 uint8_t bridge_ula[6] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 161 const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
160 uint8_t msk[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 162 const uint8_t msk[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
161 163
162 if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK || 164 if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK ||
163 !(info->bitmask & EBT_STP_MASK)) 165 !(info->bitmask & EBT_STP_MASK))
@@ -172,8 +174,7 @@ static int ebt_stp_check(const char *tablename, unsigned int hookmask,
172 return 0; 174 return 0;
173} 175}
174 176
175static struct ebt_match filter_stp = 177static struct ebt_match filter_stp __read_mostly = {
176{
177 .name = EBT_STP_MATCH, 178 .name = EBT_STP_MATCH,
178 .match = ebt_filter_stp, 179 .match = ebt_filter_stp,
179 .check = ebt_stp_check, 180 .check = ebt_stp_check,
@@ -192,4 +193,5 @@ static void __exit ebt_stp_fini(void)
192 193
193module_init(ebt_stp_init); 194module_init(ebt_stp_init);
194module_exit(ebt_stp_fini); 195module_exit(ebt_stp_fini);
196MODULE_DESCRIPTION("Ebtables: Spanning Tree Protocol packet match");
195MODULE_LICENSE("GPL"); 197MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 8e7b00b68d38..2d4c9ef909fc 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -249,7 +249,7 @@ static void ebt_ulog(const struct sk_buff *skb, unsigned int hooknr,
249 const struct net_device *in, const struct net_device *out, 249 const struct net_device *in, const struct net_device *out,
250 const void *data, unsigned int datalen) 250 const void *data, unsigned int datalen)
251{ 251{
252 struct ebt_ulog_info *uloginfo = (struct ebt_ulog_info *)data; 252 const struct ebt_ulog_info *uloginfo = data;
253 253
254 ebt_ulog_packet(hooknr, skb, in, out, uloginfo, NULL); 254 ebt_ulog_packet(hooknr, skb, in, out, uloginfo, NULL);
255} 255}
@@ -258,7 +258,7 @@ static void ebt_ulog(const struct sk_buff *skb, unsigned int hooknr,
258static int ebt_ulog_check(const char *tablename, unsigned int hookmask, 258static int ebt_ulog_check(const char *tablename, unsigned int hookmask,
259 const struct ebt_entry *e, void *data, unsigned int datalen) 259 const struct ebt_entry *e, void *data, unsigned int datalen)
260{ 260{
261 struct ebt_ulog_info *uloginfo = (struct ebt_ulog_info *)data; 261 struct ebt_ulog_info *uloginfo = data;
262 262
263 if (datalen != EBT_ALIGN(sizeof(struct ebt_ulog_info)) || 263 if (datalen != EBT_ALIGN(sizeof(struct ebt_ulog_info)) ||
264 uloginfo->nlgroup > 31) 264 uloginfo->nlgroup > 31)
@@ -272,7 +272,7 @@ static int ebt_ulog_check(const char *tablename, unsigned int hookmask,
272 return 0; 272 return 0;
273} 273}
274 274
275static struct ebt_watcher ulog = { 275static struct ebt_watcher ulog __read_mostly = {
276 .name = EBT_ULOG_WATCHER, 276 .name = EBT_ULOG_WATCHER,
277 .watcher = ebt_ulog, 277 .watcher = ebt_ulog,
278 .check = ebt_ulog_check, 278 .check = ebt_ulog_check,
@@ -340,5 +340,4 @@ module_init(ebt_ulog_init);
340module_exit(ebt_ulog_fini); 340module_exit(ebt_ulog_fini);
341MODULE_LICENSE("GPL"); 341MODULE_LICENSE("GPL");
342MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); 342MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
343MODULE_DESCRIPTION("ebtables userspace logging module for bridged Ethernet" 343MODULE_DESCRIPTION("Ebtables: Packet logging to netlink using ULOG");
344 " frames");
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 0ddf7499d496..ab60b0dade80 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -31,8 +31,7 @@ static int debug;
31module_param(debug, int, 0); 31module_param(debug, int, 0);
32MODULE_PARM_DESC(debug, "debug=1 is turn on debug messages"); 32MODULE_PARM_DESC(debug, "debug=1 is turn on debug messages");
33MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>"); 33MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>");
34MODULE_DESCRIPTION("802.1Q match module (ebtables extension), v" 34MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match");
35 MODULE_VERS);
36MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
37 36
38 37
@@ -46,8 +45,9 @@ ebt_filter_vlan(const struct sk_buff *skb,
46 const struct net_device *out, 45 const struct net_device *out,
47 const void *data, unsigned int datalen) 46 const void *data, unsigned int datalen)
48{ 47{
49 struct ebt_vlan_info *info = (struct ebt_vlan_info *) data; 48 const struct ebt_vlan_info *info = data;
50 struct vlan_hdr _frame, *fp; 49 const struct vlan_hdr *fp;
50 struct vlan_hdr _frame;
51 51
52 unsigned short TCI; /* Whole TCI, given from parsed frame */ 52 unsigned short TCI; /* Whole TCI, given from parsed frame */
53 unsigned short id; /* VLAN ID, given from frame TCI */ 53 unsigned short id; /* VLAN ID, given from frame TCI */
@@ -91,7 +91,7 @@ ebt_check_vlan(const char *tablename,
91 unsigned int hooknr, 91 unsigned int hooknr,
92 const struct ebt_entry *e, void *data, unsigned int datalen) 92 const struct ebt_entry *e, void *data, unsigned int datalen)
93{ 93{
94 struct ebt_vlan_info *info = (struct ebt_vlan_info *) data; 94 struct ebt_vlan_info *info = data;
95 95
96 /* Parameters buffer overflow check */ 96 /* Parameters buffer overflow check */
97 if (datalen != EBT_ALIGN(sizeof(struct ebt_vlan_info))) { 97 if (datalen != EBT_ALIGN(sizeof(struct ebt_vlan_info))) {
@@ -169,7 +169,7 @@ ebt_check_vlan(const char *tablename,
169 return 0; 169 return 0;
170} 170}
171 171
172static struct ebt_match filter_vlan = { 172static struct ebt_match filter_vlan __read_mostly = {
173 .name = EBT_VLAN_MATCH, 173 .name = EBT_VLAN_MATCH,
174 .match = ebt_filter_vlan, 174 .match = ebt_filter_vlan,
175 .check = ebt_check_vlan, 175 .check = ebt_check_vlan,
diff --git a/net/core/dev.c b/net/core/dev.c
index c9c593e1ba6f..edaff2720e10 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2962,6 +2962,102 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2962} 2962}
2963EXPORT_SYMBOL(dev_unicast_add); 2963EXPORT_SYMBOL(dev_unicast_add);
2964 2964
2965int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
2966 struct dev_addr_list **from, int *from_count)
2967{
2968 struct dev_addr_list *da, *next;
2969 int err = 0;
2970
2971 da = *from;
2972 while (da != NULL) {
2973 next = da->next;
2974 if (!da->da_synced) {
2975 err = __dev_addr_add(to, to_count,
2976 da->da_addr, da->da_addrlen, 0);
2977 if (err < 0)
2978 break;
2979 da->da_synced = 1;
2980 da->da_users++;
2981 } else if (da->da_users == 1) {
2982 __dev_addr_delete(to, to_count,
2983 da->da_addr, da->da_addrlen, 0);
2984 __dev_addr_delete(from, from_count,
2985 da->da_addr, da->da_addrlen, 0);
2986 }
2987 da = next;
2988 }
2989 return err;
2990}
2991
2992void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
2993 struct dev_addr_list **from, int *from_count)
2994{
2995 struct dev_addr_list *da, *next;
2996
2997 da = *from;
2998 while (da != NULL) {
2999 next = da->next;
3000 if (da->da_synced) {
3001 __dev_addr_delete(to, to_count,
3002 da->da_addr, da->da_addrlen, 0);
3003 da->da_synced = 0;
3004 __dev_addr_delete(from, from_count,
3005 da->da_addr, da->da_addrlen, 0);
3006 }
3007 da = next;
3008 }
3009}
3010
3011/**
3012 * dev_unicast_sync - Synchronize device's unicast list to another device
3013 * @to: destination device
3014 * @from: source device
3015 *
3016 * Add newly added addresses to the destination device and release
3017 * addresses that have no users left. The source device must be
3018 * locked by netif_tx_lock_bh.
3019 *
3020 * This function is intended to be called from the dev->set_rx_mode
3021 * function of layered software devices.
3022 */
3023int dev_unicast_sync(struct net_device *to, struct net_device *from)
3024{
3025 int err = 0;
3026
3027 netif_tx_lock_bh(to);
3028 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3029 &from->uc_list, &from->uc_count);
3030 if (!err)
3031 __dev_set_rx_mode(to);
3032 netif_tx_unlock_bh(to);
3033 return err;
3034}
3035EXPORT_SYMBOL(dev_unicast_sync);
3036
3037/**
3038 * dev_unicast_unsync - Remove synchronized addresses from the destination
3039 * device
3040 * @to: destination device
3041 * @from: source device
3042 *
3043 * Remove all addresses that were added to the destination device by
3044 * dev_unicast_sync(). This function is intended to be called from the
3045 * dev->stop function of layered software devices.
3046 */
3047void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3048{
3049 netif_tx_lock_bh(from);
3050 netif_tx_lock_bh(to);
3051
3052 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3053 &from->uc_list, &from->uc_count);
3054 __dev_set_rx_mode(to);
3055
3056 netif_tx_unlock_bh(to);
3057 netif_tx_unlock_bh(from);
3058}
3059EXPORT_SYMBOL(dev_unicast_unsync);
3060
2965static void __dev_addr_discard(struct dev_addr_list **list) 3061static void __dev_addr_discard(struct dev_addr_list **list)
2966{ 3062{
2967 struct dev_addr_list *tmp; 3063 struct dev_addr_list *tmp;
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index cadbfbf7e7f5..cec582563e0d 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -113,32 +113,15 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
113 * locked by netif_tx_lock_bh. 113 * locked by netif_tx_lock_bh.
114 * 114 *
115 * This function is intended to be called from the dev->set_multicast_list 115 * This function is intended to be called from the dev->set_multicast_list
116 * function of layered software devices. 116 * or dev->set_rx_mode function of layered software devices.
117 */ 117 */
118int dev_mc_sync(struct net_device *to, struct net_device *from) 118int dev_mc_sync(struct net_device *to, struct net_device *from)
119{ 119{
120 struct dev_addr_list *da, *next;
121 int err = 0; 120 int err = 0;
122 121
123 netif_tx_lock_bh(to); 122 netif_tx_lock_bh(to);
124 da = from->mc_list; 123 err = __dev_addr_sync(&to->mc_list, &to->mc_count,
125 while (da != NULL) { 124 &from->mc_list, &from->mc_count);
126 next = da->next;
127 if (!da->da_synced) {
128 err = __dev_addr_add(&to->mc_list, &to->mc_count,
129 da->da_addr, da->da_addrlen, 0);
130 if (err < 0)
131 break;
132 da->da_synced = 1;
133 da->da_users++;
134 } else if (da->da_users == 1) {
135 __dev_addr_delete(&to->mc_list, &to->mc_count,
136 da->da_addr, da->da_addrlen, 0);
137 __dev_addr_delete(&from->mc_list, &from->mc_count,
138 da->da_addr, da->da_addrlen, 0);
139 }
140 da = next;
141 }
142 if (!err) 125 if (!err)
143 __dev_set_rx_mode(to); 126 __dev_set_rx_mode(to);
144 netif_tx_unlock_bh(to); 127 netif_tx_unlock_bh(to);
@@ -160,23 +143,11 @@ EXPORT_SYMBOL(dev_mc_sync);
160 */ 143 */
161void dev_mc_unsync(struct net_device *to, struct net_device *from) 144void dev_mc_unsync(struct net_device *to, struct net_device *from)
162{ 145{
163 struct dev_addr_list *da, *next;
164
165 netif_tx_lock_bh(from); 146 netif_tx_lock_bh(from);
166 netif_tx_lock_bh(to); 147 netif_tx_lock_bh(to);
167 148
168 da = from->mc_list; 149 __dev_addr_unsync(&to->mc_list, &to->mc_count,
169 while (da != NULL) { 150 &from->mc_list, &from->mc_count);
170 next = da->next;
171 if (da->da_synced) {
172 __dev_addr_delete(&to->mc_list, &to->mc_count,
173 da->da_addr, da->da_addrlen, 0);
174 da->da_synced = 0;
175 __dev_addr_delete(&from->mc_list, &from->mc_count,
176 da->da_addr, da->da_addrlen, 0);
177 }
178 da = next;
179 }
180 __dev_set_rx_mode(to); 151 __dev_set_rx_mode(to);
181 152
182 netif_tx_unlock_bh(to); 153 netif_tx_unlock_bh(to);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index eebccdbdbaca..bfcdfaebca5c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -170,8 +170,6 @@
170 170
171#define VERSION "pktgen v2.69: Packet Generator for packet performance testing.\n" 171#define VERSION "pktgen v2.69: Packet Generator for packet performance testing.\n"
172 172
173/* The buckets are exponential in 'width' */
174#define LAT_BUCKETS_MAX 32
175#define IP_NAME_SZ 32 173#define IP_NAME_SZ 32
176#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
177#define MPLS_STACK_BOTTOM htonl(0x00000100) 175#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -2044,7 +2042,6 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us)
2044 __u64 now; 2042 __u64 now;
2045 2043
2046 start = now = getCurUs(); 2044 start = now = getCurUs();
2047 printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now));
2048 while (now < spin_until_us) { 2045 while (now < spin_until_us) {
2049 /* TODO: optimize sleeping behavior */ 2046 /* TODO: optimize sleeping behavior */
2050 if (spin_until_us - now > jiffies_to_usecs(1) + 1) 2047 if (spin_until_us - now > jiffies_to_usecs(1) + 1)
diff --git a/net/core/sock.c b/net/core/sock.c
index 1c4b1cd16d65..433715fb141a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -667,6 +667,13 @@ set_rcvbuf:
667 else 667 else
668 clear_bit(SOCK_PASSSEC, &sock->flags); 668 clear_bit(SOCK_PASSSEC, &sock->flags);
669 break; 669 break;
670 case SO_MARK:
671 if (!capable(CAP_NET_ADMIN))
672 ret = -EPERM;
673 else {
674 sk->sk_mark = val;
675 }
676 break;
670 677
671 /* We implement the SO_SNDLOWAT etc to 678 /* We implement the SO_SNDLOWAT etc to
672 not be settable (1003.1g 5.3) */ 679 not be settable (1003.1g 5.3) */
@@ -836,6 +843,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
836 case SO_PEERSEC: 843 case SO_PEERSEC:
837 return security_socket_getpeersec_stream(sock, optval, optlen, len); 844 return security_socket_getpeersec_stream(sock, optval, optlen, len);
838 845
846 case SO_MARK:
847 v.val = sk->sk_mark;
848 break;
849
839 default: 850 default:
840 return -ENOPROTOOPT; 851 return -ENOPROTOOPT;
841 } 852 }
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 9e38b0d6195c..c982ad88223d 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -218,7 +218,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
218 return; 218 return;
219 } 219 }
220 220
221 sk = inet_lookup(&dccp_hashinfo, iph->daddr, dh->dccph_dport, 221 sk = inet_lookup(&init_net, &dccp_hashinfo, iph->daddr, dh->dccph_dport,
222 iph->saddr, dh->dccph_sport, inet_iif(skb)); 222 iph->saddr, dh->dccph_sport, inet_iif(skb));
223 if (sk == NULL) { 223 if (sk == NULL) {
224 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 224 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
@@ -436,7 +436,7 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
436 if (req != NULL) 436 if (req != NULL)
437 return dccp_check_req(sk, skb, req, prev); 437 return dccp_check_req(sk, skb, req, prev);
438 438
439 nsk = inet_lookup_established(&dccp_hashinfo, 439 nsk = inet_lookup_established(&init_net, &dccp_hashinfo,
440 iph->saddr, dh->dccph_sport, 440 iph->saddr, dh->dccph_sport,
441 iph->daddr, dh->dccph_dport, 441 iph->daddr, dh->dccph_dport,
442 inet_iif(skb)); 442 inet_iif(skb));
@@ -817,7 +817,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
817 817
818 /* Step 2: 818 /* Step 2:
819 * Look up flow ID in table and get corresponding socket */ 819 * Look up flow ID in table and get corresponding socket */
820 sk = __inet_lookup(&dccp_hashinfo, 820 sk = __inet_lookup(&init_net, &dccp_hashinfo,
821 iph->saddr, dh->dccph_sport, 821 iph->saddr, dh->dccph_sport,
822 iph->daddr, dh->dccph_dport, inet_iif(skb)); 822 iph->daddr, dh->dccph_dport, inet_iif(skb));
823 /* 823 /*
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index f42b75ce7f5c..ed0a0053a797 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -101,8 +101,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
101 int err; 101 int err;
102 __u64 seq; 102 __u64 seq;
103 103
104 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport, 104 sk = inet6_lookup(&init_net, &dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
105 &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); 105 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
106 106
107 if (sk == NULL) { 107 if (sk == NULL) {
108 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); 108 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
@@ -366,7 +366,7 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
366 if (req != NULL) 366 if (req != NULL)
367 return dccp_check_req(sk, skb, req, prev); 367 return dccp_check_req(sk, skb, req, prev);
368 368
369 nsk = __inet6_lookup_established(&dccp_hashinfo, 369 nsk = __inet6_lookup_established(&init_net, &dccp_hashinfo,
370 &iph->saddr, dh->dccph_sport, 370 &iph->saddr, dh->dccph_sport,
371 &iph->daddr, ntohs(dh->dccph_dport), 371 &iph->daddr, ntohs(dh->dccph_dport),
372 inet6_iif(skb)); 372 inet6_iif(skb));
@@ -797,7 +797,7 @@ static int dccp_v6_rcv(struct sk_buff *skb)
797 797
798 /* Step 2: 798 /* Step 2:
799 * Look up flow ID in table and get corresponding socket */ 799 * Look up flow ID in table and get corresponding socket */
800 sk = __inet6_lookup(&dccp_hashinfo, &ipv6_hdr(skb)->saddr, 800 sk = __inet6_lookup(&init_net, &dccp_hashinfo, &ipv6_hdr(skb)->saddr,
801 dh->dccph_sport, 801 dh->dccph_sport,
802 &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport), 802 &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport),
803 inet6_iif(skb)); 803 inet6_iif(skb));
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 24e2b7294bf8..19880b086e71 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -343,6 +343,7 @@ config INET_ESP
343 tristate "IP: ESP transformation" 343 tristate "IP: ESP transformation"
344 select XFRM 344 select XFRM
345 select CRYPTO 345 select CRYPTO
346 select CRYPTO_AEAD
346 select CRYPTO_HMAC 347 select CRYPTO_HMAC
347 select CRYPTO_MD5 348 select CRYPTO_MD5
348 select CRYPTO_CBC 349 select CRYPTO_CBC
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index d76803a3dcae..9d4555ec0b59 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -300,7 +300,7 @@ static void ah_destroy(struct xfrm_state *x)
300} 300}
301 301
302 302
303static struct xfrm_type ah_type = 303static const struct xfrm_type ah_type =
304{ 304{
305 .description = "AH4", 305 .description = "AH4",
306 .owner = THIS_MODULE, 306 .owner = THIS_MODULE,
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 5976c598cc4b..8e17f65f4002 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -558,8 +558,9 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
558 */ 558 */
559struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, 559struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
560 struct net_device *dev, __be32 src_ip, 560 struct net_device *dev, __be32 src_ip,
561 unsigned char *dest_hw, unsigned char *src_hw, 561 const unsigned char *dest_hw,
562 unsigned char *target_hw) 562 const unsigned char *src_hw,
563 const unsigned char *target_hw)
563{ 564{
564 struct sk_buff *skb; 565 struct sk_buff *skb;
565 struct arphdr *arp; 566 struct arphdr *arp;
@@ -672,8 +673,8 @@ void arp_xmit(struct sk_buff *skb)
672 */ 673 */
673void arp_send(int type, int ptype, __be32 dest_ip, 674void arp_send(int type, int ptype, __be32 dest_ip,
674 struct net_device *dev, __be32 src_ip, 675 struct net_device *dev, __be32 src_ip,
675 unsigned char *dest_hw, unsigned char *src_hw, 676 const unsigned char *dest_hw, const unsigned char *src_hw,
676 unsigned char *target_hw) 677 const unsigned char *target_hw)
677{ 678{
678 struct sk_buff *skb; 679 struct sk_buff *skb;
679 680
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 21f71bf912d5..f282b26f63eb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -64,7 +64,7 @@
64#include <net/rtnetlink.h> 64#include <net/rtnetlink.h>
65#include <net/net_namespace.h> 65#include <net/net_namespace.h>
66 66
67struct ipv4_devconf ipv4_devconf = { 67static struct ipv4_devconf ipv4_devconf = {
68 .data = { 68 .data = {
69 [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1, 69 [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1,
70 [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1, 70 [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1,
@@ -485,46 +485,41 @@ errout:
485 return err; 485 return err;
486} 486}
487 487
488static struct in_ifaddr *rtm_to_ifaddr(struct nlmsghdr *nlh) 488static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
489{ 489{
490 struct nlattr *tb[IFA_MAX+1]; 490 struct nlattr *tb[IFA_MAX+1];
491 struct in_ifaddr *ifa; 491 struct in_ifaddr *ifa;
492 struct ifaddrmsg *ifm; 492 struct ifaddrmsg *ifm;
493 struct net_device *dev; 493 struct net_device *dev;
494 struct in_device *in_dev; 494 struct in_device *in_dev;
495 int err = -EINVAL; 495 int err;
496 496
497 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); 497 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
498 if (err < 0) 498 if (err < 0)
499 goto errout; 499 goto errout;
500 500
501 ifm = nlmsg_data(nlh); 501 ifm = nlmsg_data(nlh);
502 if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL) { 502 err = -EINVAL;
503 err = -EINVAL; 503 if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL)
504 goto errout; 504 goto errout;
505 }
506 505
507 dev = __dev_get_by_index(&init_net, ifm->ifa_index); 506 dev = __dev_get_by_index(net, ifm->ifa_index);
508 if (dev == NULL) { 507 err = -ENODEV;
509 err = -ENODEV; 508 if (dev == NULL)
510 goto errout; 509 goto errout;
511 }
512 510
513 in_dev = __in_dev_get_rtnl(dev); 511 in_dev = __in_dev_get_rtnl(dev);
514 if (in_dev == NULL) { 512 err = -ENOBUFS;
515 err = -ENOBUFS; 513 if (in_dev == NULL)
516 goto errout; 514 goto errout;
517 }
518 515
519 ifa = inet_alloc_ifa(); 516 ifa = inet_alloc_ifa();
520 if (ifa == NULL) { 517 if (ifa == NULL)
521 /* 518 /*
522 * A potential indev allocation can be left alive, it stays 519 * A potential indev allocation can be left alive, it stays
523 * assigned to its device and is destroy with it. 520 * assigned to its device and is destroy with it.
524 */ 521 */
525 err = -ENOBUFS;
526 goto errout; 522 goto errout;
527 }
528 523
529 ipv4_devconf_setall(in_dev); 524 ipv4_devconf_setall(in_dev);
530 in_dev_hold(in_dev); 525 in_dev_hold(in_dev);
@@ -568,7 +563,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
568 if (net != &init_net) 563 if (net != &init_net)
569 return -EINVAL; 564 return -EINVAL;
570 565
571 ifa = rtm_to_ifaddr(nlh); 566 ifa = rtm_to_ifaddr(net, nlh);
572 if (IS_ERR(ifa)) 567 if (IS_ERR(ifa))
573 return PTR_ERR(ifa); 568 return PTR_ERR(ifa);
574 569
@@ -1182,7 +1177,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1182 1177
1183 s_ip_idx = ip_idx = cb->args[1]; 1178 s_ip_idx = ip_idx = cb->args[1];
1184 idx = 0; 1179 idx = 0;
1185 for_each_netdev(&init_net, dev) { 1180 for_each_netdev(net, dev) {
1186 if (idx < s_idx) 1181 if (idx < s_idx)
1187 goto cont; 1182 goto cont;
1188 if (idx > s_idx) 1183 if (idx > s_idx)
@@ -1216,7 +1211,9 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh,
1216 struct sk_buff *skb; 1211 struct sk_buff *skb;
1217 u32 seq = nlh ? nlh->nlmsg_seq : 0; 1212 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1218 int err = -ENOBUFS; 1213 int err = -ENOBUFS;
1214 struct net *net;
1219 1215
1216 net = ifa->ifa_dev->dev->nd_net;
1220 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); 1217 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1221 if (skb == NULL) 1218 if (skb == NULL)
1222 goto errout; 1219 goto errout;
@@ -1228,10 +1225,10 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh,
1228 kfree_skb(skb); 1225 kfree_skb(skb);
1229 goto errout; 1226 goto errout;
1230 } 1227 }
1231 err = rtnl_notify(skb, &init_net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); 1228 err = rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1232errout: 1229errout:
1233 if (err < 0) 1230 if (err < 0)
1234 rtnl_set_sk_err(&init_net, RTNLGRP_IPV4_IFADDR, err); 1231 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1235} 1232}
1236 1233
1237#ifdef CONFIG_SYSCTL 1234#ifdef CONFIG_SYSCTL
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 28ea5c77ca23..258d17631b4b 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,27 +1,118 @@
1#include <crypto/aead.h>
2#include <crypto/authenc.h>
1#include <linux/err.h> 3#include <linux/err.h>
2#include <linux/module.h> 4#include <linux/module.h>
3#include <net/ip.h> 5#include <net/ip.h>
4#include <net/xfrm.h> 6#include <net/xfrm.h>
5#include <net/esp.h> 7#include <net/esp.h>
6#include <linux/scatterlist.h> 8#include <linux/scatterlist.h>
7#include <linux/crypto.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/pfkeyv2.h> 10#include <linux/pfkeyv2.h>
10#include <linux/random.h> 11#include <linux/rtnetlink.h>
12#include <linux/slab.h>
11#include <linux/spinlock.h> 13#include <linux/spinlock.h>
12#include <linux/in6.h> 14#include <linux/in6.h>
13#include <net/icmp.h> 15#include <net/icmp.h>
14#include <net/protocol.h> 16#include <net/protocol.h>
15#include <net/udp.h> 17#include <net/udp.h>
16 18
19struct esp_skb_cb {
20 struct xfrm_skb_cb xfrm;
21 void *tmp;
22};
23
24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
25
26/*
27 * Allocate an AEAD request structure with extra space for SG and IV.
28 *
29 * For alignment considerations the IV is placed at the front, followed
30 * by the request and finally the SG list.
31 *
32 * TODO: Use spare space in skb for this where possible.
33 */
34static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
35{
36 unsigned int len;
37
38 len = crypto_aead_ivsize(aead);
39 if (len) {
40 len += crypto_aead_alignmask(aead) &
41 ~(crypto_tfm_ctx_alignment() - 1);
42 len = ALIGN(len, crypto_tfm_ctx_alignment());
43 }
44
45 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
46 len = ALIGN(len, __alignof__(struct scatterlist));
47
48 len += sizeof(struct scatterlist) * nfrags;
49
50 return kmalloc(len, GFP_ATOMIC);
51}
52
53static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
54{
55 return crypto_aead_ivsize(aead) ?
56 PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
57}
58
59static inline struct aead_givcrypt_request *esp_tmp_givreq(
60 struct crypto_aead *aead, u8 *iv)
61{
62 struct aead_givcrypt_request *req;
63
64 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
65 crypto_tfm_ctx_alignment());
66 aead_givcrypt_set_tfm(req, aead);
67 return req;
68}
69
70static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
71{
72 struct aead_request *req;
73
74 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
75 crypto_tfm_ctx_alignment());
76 aead_request_set_tfm(req, aead);
77 return req;
78}
79
80static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
81 struct aead_request *req)
82{
83 return (void *)ALIGN((unsigned long)(req + 1) +
84 crypto_aead_reqsize(aead),
85 __alignof__(struct scatterlist));
86}
87
88static inline struct scatterlist *esp_givreq_sg(
89 struct crypto_aead *aead, struct aead_givcrypt_request *req)
90{
91 return (void *)ALIGN((unsigned long)(req + 1) +
92 crypto_aead_reqsize(aead),
93 __alignof__(struct scatterlist));
94}
95
96static void esp_output_done(struct crypto_async_request *base, int err)
97{
98 struct sk_buff *skb = base->data;
99
100 kfree(ESP_SKB_CB(skb)->tmp);
101 xfrm_output_resume(skb, err);
102}
103
17static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 104static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
18{ 105{
19 int err; 106 int err;
20 struct ip_esp_hdr *esph; 107 struct ip_esp_hdr *esph;
21 struct crypto_blkcipher *tfm; 108 struct crypto_aead *aead;
22 struct blkcipher_desc desc; 109 struct aead_givcrypt_request *req;
110 struct scatterlist *sg;
111 struct scatterlist *asg;
23 struct esp_data *esp; 112 struct esp_data *esp;
24 struct sk_buff *trailer; 113 struct sk_buff *trailer;
114 void *tmp;
115 u8 *iv;
25 u8 *tail; 116 u8 *tail;
26 int blksize; 117 int blksize;
27 int clen; 118 int clen;
@@ -36,18 +127,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
36 clen = skb->len; 127 clen = skb->len;
37 128
38 esp = x->data; 129 esp = x->data;
39 alen = esp->auth.icv_trunc_len; 130 aead = esp->aead;
40 tfm = esp->conf.tfm; 131 alen = crypto_aead_authsize(aead);
41 desc.tfm = tfm; 132
42 desc.flags = 0; 133 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
43 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
44 clen = ALIGN(clen + 2, blksize); 134 clen = ALIGN(clen + 2, blksize);
45 if (esp->conf.padlen) 135 if (esp->padlen)
46 clen = ALIGN(clen, esp->conf.padlen); 136 clen = ALIGN(clen, esp->padlen);
137
138 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
139 goto error;
140 nfrags = err;
47 141
48 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) 142 tmp = esp_alloc_tmp(aead, nfrags + 1);
143 if (!tmp)
49 goto error; 144 goto error;
50 145
146 iv = esp_tmp_iv(aead, tmp);
147 req = esp_tmp_givreq(aead, iv);
148 asg = esp_givreq_sg(aead, req);
149 sg = asg + 1;
150
51 /* Fill padding... */ 151 /* Fill padding... */
52 tail = skb_tail_pointer(trailer); 152 tail = skb_tail_pointer(trailer);
53 do { 153 do {
@@ -56,28 +156,34 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
56 tail[i] = i + 1; 156 tail[i] = i + 1;
57 } while (0); 157 } while (0);
58 tail[clen - skb->len - 2] = (clen - skb->len) - 2; 158 tail[clen - skb->len - 2] = (clen - skb->len) - 2;
59 pskb_put(skb, trailer, clen - skb->len); 159 tail[clen - skb->len - 1] = *skb_mac_header(skb);
160 pskb_put(skb, trailer, clen - skb->len + alen);
60 161
61 skb_push(skb, -skb_network_offset(skb)); 162 skb_push(skb, -skb_network_offset(skb));
62 esph = ip_esp_hdr(skb); 163 esph = ip_esp_hdr(skb);
63 *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb);
64 *skb_mac_header(skb) = IPPROTO_ESP; 164 *skb_mac_header(skb) = IPPROTO_ESP;
65 165
66 spin_lock_bh(&x->lock);
67
68 /* this is non-NULL only with UDP Encapsulation */ 166 /* this is non-NULL only with UDP Encapsulation */
69 if (x->encap) { 167 if (x->encap) {
70 struct xfrm_encap_tmpl *encap = x->encap; 168 struct xfrm_encap_tmpl *encap = x->encap;
71 struct udphdr *uh; 169 struct udphdr *uh;
72 __be32 *udpdata32; 170 __be32 *udpdata32;
171 unsigned int sport, dport;
172 int encap_type;
173
174 spin_lock_bh(&x->lock);
175 sport = encap->encap_sport;
176 dport = encap->encap_dport;
177 encap_type = encap->encap_type;
178 spin_unlock_bh(&x->lock);
73 179
74 uh = (struct udphdr *)esph; 180 uh = (struct udphdr *)esph;
75 uh->source = encap->encap_sport; 181 uh->source = sport;
76 uh->dest = encap->encap_dport; 182 uh->dest = dport;
77 uh->len = htons(skb->len + alen - skb_transport_offset(skb)); 183 uh->len = htons(skb->len - skb_transport_offset(skb));
78 uh->check = 0; 184 uh->check = 0;
79 185
80 switch (encap->encap_type) { 186 switch (encap_type) {
81 default: 187 default:
82 case UDP_ENCAP_ESPINUDP: 188 case UDP_ENCAP_ESPINUDP:
83 esph = (struct ip_esp_hdr *)(uh + 1); 189 esph = (struct ip_esp_hdr *)(uh + 1);
@@ -95,131 +201,45 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
95 esph->spi = x->id.spi; 201 esph->spi = x->id.spi;
96 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); 202 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
97 203
98 if (esp->conf.ivlen) { 204 sg_init_table(sg, nfrags);
99 if (unlikely(!esp->conf.ivinitted)) { 205 skb_to_sgvec(skb, sg,
100 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 206 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
101 esp->conf.ivinitted = 1; 207 clen + alen);
102 } 208 sg_init_one(asg, esph, sizeof(*esph));
103 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); 209
104 } 210 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
105 211 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
106 do { 212 aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
107 struct scatterlist *sg = &esp->sgbuf[0]; 213 aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
108 214
109 if (unlikely(nfrags > ESP_NUM_FAST_SG)) { 215 ESP_SKB_CB(skb)->tmp = tmp;
110 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); 216 err = crypto_aead_givencrypt(req);
111 if (!sg) 217 if (err == -EINPROGRESS)
112 goto unlock; 218 goto error;
113 }
114 sg_init_table(sg, nfrags);
115 skb_to_sgvec(skb, sg,
116 esph->enc_data +
117 esp->conf.ivlen -
118 skb->data, clen);
119 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
120 if (unlikely(sg != &esp->sgbuf[0]))
121 kfree(sg);
122 } while (0);
123
124 if (unlikely(err))
125 goto unlock;
126
127 if (esp->conf.ivlen) {
128 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
129 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
130 }
131 219
132 if (esp->auth.icv_full_len) { 220 if (err == -EBUSY)
133 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, 221 err = NET_XMIT_DROP;
134 sizeof(*esph) + esp->conf.ivlen + clen);
135 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
136 }
137 222
138unlock: 223 kfree(tmp);
139 spin_unlock_bh(&x->lock);
140 224
141error: 225error:
142 return err; 226 return err;
143} 227}
144 228
145/* 229static int esp_input_done2(struct sk_buff *skb, int err)
146 * Note: detecting truncated vs. non-truncated authentication data is very
147 * expensive, so we only support truncated data, which is the recommended
148 * and common case.
149 */
150static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
151{ 230{
152 struct iphdr *iph; 231 struct iphdr *iph;
153 struct ip_esp_hdr *esph; 232 struct xfrm_state *x = xfrm_input_state(skb);
154 struct esp_data *esp = x->data; 233 struct esp_data *esp = x->data;
155 struct crypto_blkcipher *tfm = esp->conf.tfm; 234 struct crypto_aead *aead = esp->aead;
156 struct blkcipher_desc desc = { .tfm = tfm }; 235 int alen = crypto_aead_authsize(aead);
157 struct sk_buff *trailer; 236 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
158 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 237 int elen = skb->len - hlen;
159 int alen = esp->auth.icv_trunc_len;
160 int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen;
161 int nfrags;
162 int ihl; 238 int ihl;
163 u8 nexthdr[2]; 239 u8 nexthdr[2];
164 struct scatterlist *sg;
165 int padlen; 240 int padlen;
166 int err = -EINVAL;
167
168 if (!pskb_may_pull(skb, sizeof(*esph)))
169 goto out;
170
171 if (elen <= 0 || (elen & (blksize-1)))
172 goto out;
173
174 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
175 goto out;
176 nfrags = err;
177
178 skb->ip_summed = CHECKSUM_NONE;
179
180 spin_lock(&x->lock);
181
182 /* If integrity check is required, do this. */
183 if (esp->auth.icv_full_len) {
184 u8 sum[alen];
185 241
186 err = esp_mac_digest(esp, skb, 0, skb->len - alen); 242 kfree(ESP_SKB_CB(skb)->tmp);
187 if (err)
188 goto unlock;
189
190 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
191 BUG();
192
193 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
194 err = -EBADMSG;
195 goto unlock;
196 }
197 }
198
199 esph = (struct ip_esp_hdr *)skb->data;
200
201 /* Get ivec. This can be wrong, check against another impls. */
202 if (esp->conf.ivlen)
203 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
204
205 sg = &esp->sgbuf[0];
206
207 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
208 err = -ENOMEM;
209 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
210 if (!sg)
211 goto unlock;
212 }
213 sg_init_table(sg, nfrags);
214 skb_to_sgvec(skb, sg,
215 sizeof(*esph) + esp->conf.ivlen,
216 elen);
217 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
218 if (unlikely(sg != &esp->sgbuf[0]))
219 kfree(sg);
220
221unlock:
222 spin_unlock(&x->lock);
223 243
224 if (unlikely(err)) 244 if (unlikely(err))
225 goto out; 245 goto out;
@@ -229,15 +249,11 @@ unlock:
229 249
230 err = -EINVAL; 250 err = -EINVAL;
231 padlen = nexthdr[0]; 251 padlen = nexthdr[0];
232 if (padlen+2 >= elen) 252 if (padlen + 2 + alen >= elen)
233 goto out; 253 goto out;
234 254
235 /* ... check padding bits here. Silly. :-) */ 255 /* ... check padding bits here. Silly. :-) */
236 256
237 /* RFC4303: Drop dummy packets without any error */
238 if (nexthdr[1] == IPPROTO_NONE)
239 goto out;
240
241 iph = ip_hdr(skb); 257 iph = ip_hdr(skb);
242 ihl = iph->ihl * 4; 258 ihl = iph->ihl * 4;
243 259
@@ -279,10 +295,87 @@ unlock:
279 } 295 }
280 296
281 pskb_trim(skb, skb->len - alen - padlen - 2); 297 pskb_trim(skb, skb->len - alen - padlen - 2);
282 __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); 298 __skb_pull(skb, hlen);
283 skb_set_transport_header(skb, -ihl); 299 skb_set_transport_header(skb, -ihl);
284 300
285 return nexthdr[1]; 301 err = nexthdr[1];
302
303 /* RFC4303: Drop dummy packets without any error */
304 if (err == IPPROTO_NONE)
305 err = -EINVAL;
306
307out:
308 return err;
309}
310
311static void esp_input_done(struct crypto_async_request *base, int err)
312{
313 struct sk_buff *skb = base->data;
314
315 xfrm_input_resume(skb, esp_input_done2(skb, err));
316}
317
318/*
319 * Note: detecting truncated vs. non-truncated authentication data is very
320 * expensive, so we only support truncated data, which is the recommended
321 * and common case.
322 */
323static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
324{
325 struct ip_esp_hdr *esph;
326 struct esp_data *esp = x->data;
327 struct crypto_aead *aead = esp->aead;
328 struct aead_request *req;
329 struct sk_buff *trailer;
330 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
331 int nfrags;
332 void *tmp;
333 u8 *iv;
334 struct scatterlist *sg;
335 struct scatterlist *asg;
336 int err = -EINVAL;
337
338 if (!pskb_may_pull(skb, sizeof(*esph)))
339 goto out;
340
341 if (elen <= 0)
342 goto out;
343
344 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
345 goto out;
346 nfrags = err;
347
348 err = -ENOMEM;
349 tmp = esp_alloc_tmp(aead, nfrags + 1);
350 if (!tmp)
351 goto out;
352
353 ESP_SKB_CB(skb)->tmp = tmp;
354 iv = esp_tmp_iv(aead, tmp);
355 req = esp_tmp_req(aead, iv);
356 asg = esp_req_sg(aead, req);
357 sg = asg + 1;
358
359 skb->ip_summed = CHECKSUM_NONE;
360
361 esph = (struct ip_esp_hdr *)skb->data;
362
363 /* Get ivec. This can be wrong, check against another impls. */
364 iv = esph->enc_data;
365
366 sg_init_table(sg, nfrags);
367 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
368 sg_init_one(asg, esph, sizeof(*esph));
369
370 aead_request_set_callback(req, 0, esp_input_done, skb);
371 aead_request_set_crypt(req, sg, sg, elen, iv);
372 aead_request_set_assoc(req, asg, sizeof(*esph));
373
374 err = crypto_aead_decrypt(req);
375 if (err == -EINPROGRESS)
376 goto out;
377
378 err = esp_input_done2(skb, err);
286 379
287out: 380out:
288 return err; 381 return err;
@@ -291,11 +384,11 @@ out:
291static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) 384static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
292{ 385{
293 struct esp_data *esp = x->data; 386 struct esp_data *esp = x->data;
294 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 387 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
295 u32 align = max_t(u32, blksize, esp->conf.padlen); 388 u32 align = max_t(u32, blksize, esp->padlen);
296 u32 rem; 389 u32 rem;
297 390
298 mtu -= x->props.header_len + esp->auth.icv_trunc_len; 391 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
299 rem = mtu & (align - 1); 392 rem = mtu & (align - 1);
300 mtu &= ~(align - 1); 393 mtu &= ~(align - 1);
301 394
@@ -342,80 +435,143 @@ static void esp_destroy(struct xfrm_state *x)
342 if (!esp) 435 if (!esp)
343 return; 436 return;
344 437
345 crypto_free_blkcipher(esp->conf.tfm); 438 crypto_free_aead(esp->aead);
346 esp->conf.tfm = NULL;
347 kfree(esp->conf.ivec);
348 esp->conf.ivec = NULL;
349 crypto_free_hash(esp->auth.tfm);
350 esp->auth.tfm = NULL;
351 kfree(esp->auth.work_icv);
352 esp->auth.work_icv = NULL;
353 kfree(esp); 439 kfree(esp);
354} 440}
355 441
356static int esp_init_state(struct xfrm_state *x) 442static int esp_init_aead(struct xfrm_state *x)
357{ 443{
358 struct esp_data *esp = NULL; 444 struct esp_data *esp = x->data;
359 struct crypto_blkcipher *tfm; 445 struct crypto_aead *aead;
360 u32 align; 446 int err;
447
448 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
449 err = PTR_ERR(aead);
450 if (IS_ERR(aead))
451 goto error;
452
453 esp->aead = aead;
454
455 err = crypto_aead_setkey(aead, x->aead->alg_key,
456 (x->aead->alg_key_len + 7) / 8);
457 if (err)
458 goto error;
459
460 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
461 if (err)
462 goto error;
463
464error:
465 return err;
466}
361 467
468static int esp_init_authenc(struct xfrm_state *x)
469{
470 struct esp_data *esp = x->data;
471 struct crypto_aead *aead;
472 struct crypto_authenc_key_param *param;
473 struct rtattr *rta;
474 char *key;
475 char *p;
476 char authenc_name[CRYPTO_MAX_ALG_NAME];
477 unsigned int keylen;
478 int err;
479
480 err = -EINVAL;
362 if (x->ealg == NULL) 481 if (x->ealg == NULL)
363 goto error; 482 goto error;
364 483
365 esp = kzalloc(sizeof(*esp), GFP_KERNEL); 484 err = -ENAMETOOLONG;
366 if (esp == NULL) 485 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
367 return -ENOMEM; 486 x->aalg ? x->aalg->alg_name : "digest_null",
487 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
488 goto error;
489
490 aead = crypto_alloc_aead(authenc_name, 0, 0);
491 err = PTR_ERR(aead);
492 if (IS_ERR(aead))
493 goto error;
494
495 esp->aead = aead;
496
497 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
498 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
499 err = -ENOMEM;
500 key = kmalloc(keylen, GFP_KERNEL);
501 if (!key)
502 goto error;
503
504 p = key;
505 rta = (void *)p;
506 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
507 rta->rta_len = RTA_LENGTH(sizeof(*param));
508 param = RTA_DATA(rta);
509 p += RTA_SPACE(sizeof(*param));
368 510
369 if (x->aalg) { 511 if (x->aalg) {
370 struct xfrm_algo_desc *aalg_desc; 512 struct xfrm_algo_desc *aalg_desc;
371 struct crypto_hash *hash;
372 513
373 hash = crypto_alloc_hash(x->aalg->alg_name, 0, 514 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
374 CRYPTO_ALG_ASYNC); 515 p += (x->aalg->alg_key_len + 7) / 8;
375 if (IS_ERR(hash))
376 goto error;
377
378 esp->auth.tfm = hash;
379 if (crypto_hash_setkey(hash, x->aalg->alg_key,
380 (x->aalg->alg_key_len + 7) / 8))
381 goto error;
382 516
383 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 517 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
384 BUG_ON(!aalg_desc); 518 BUG_ON(!aalg_desc);
385 519
520 err = -EINVAL;
386 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 521 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
387 crypto_hash_digestsize(hash)) { 522 crypto_aead_authsize(aead)) {
388 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 523 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
389 x->aalg->alg_name, 524 x->aalg->alg_name,
390 crypto_hash_digestsize(hash), 525 crypto_aead_authsize(aead),
391 aalg_desc->uinfo.auth.icv_fullbits/8); 526 aalg_desc->uinfo.auth.icv_fullbits/8);
392 goto error; 527 goto free_key;
393 } 528 }
394 529
395 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 530 err = crypto_aead_setauthsize(
396 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 531 aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
397 532 if (err)
398 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); 533 goto free_key;
399 if (!esp->auth.work_icv)
400 goto error;
401 } 534 }
402 535
403 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); 536 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
404 if (IS_ERR(tfm)) 537 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
405 goto error; 538
406 esp->conf.tfm = tfm; 539 err = crypto_aead_setkey(aead, key, keylen);
407 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); 540
408 esp->conf.padlen = 0; 541free_key:
409 if (esp->conf.ivlen) { 542 kfree(key);
410 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 543
411 if (unlikely(esp->conf.ivec == NULL)) 544error:
412 goto error; 545 return err;
413 esp->conf.ivinitted = 0; 546}
414 } 547
415 if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key, 548static int esp_init_state(struct xfrm_state *x)
416 (x->ealg->alg_key_len + 7) / 8)) 549{
550 struct esp_data *esp;
551 struct crypto_aead *aead;
552 u32 align;
553 int err;
554
555 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
556 if (esp == NULL)
557 return -ENOMEM;
558
559 x->data = esp;
560
561 if (x->aead)
562 err = esp_init_aead(x);
563 else
564 err = esp_init_authenc(x);
565
566 if (err)
417 goto error; 567 goto error;
418 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 568
569 aead = esp->aead;
570
571 esp->padlen = 0;
572
573 x->props.header_len = sizeof(struct ip_esp_hdr) +
574 crypto_aead_ivsize(aead);
419 if (x->props.mode == XFRM_MODE_TUNNEL) 575 if (x->props.mode == XFRM_MODE_TUNNEL)
420 x->props.header_len += sizeof(struct iphdr); 576 x->props.header_len += sizeof(struct iphdr);
421 else if (x->props.mode == XFRM_MODE_BEET) 577 else if (x->props.mode == XFRM_MODE_BEET)
@@ -434,21 +590,17 @@ static int esp_init_state(struct xfrm_state *x)
434 break; 590 break;
435 } 591 }
436 } 592 }
437 x->data = esp; 593
438 align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 594 align = ALIGN(crypto_aead_blocksize(aead), 4);
439 if (esp->conf.padlen) 595 if (esp->padlen)
440 align = max_t(u32, align, esp->conf.padlen); 596 align = max_t(u32, align, esp->padlen);
441 x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len; 597 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
442 return 0;
443 598
444error: 599error:
445 x->data = esp; 600 return err;
446 esp_destroy(x);
447 x->data = NULL;
448 return -EINVAL;
449} 601}
450 602
451static struct xfrm_type esp_type = 603static const struct xfrm_type esp_type =
452{ 604{
453 .description = "ESP4", 605 .description = "ESP4",
454 .owner = THIS_MODULE, 606 .owner = THIS_MODULE,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index d28261826bc2..86ff2711fc95 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -808,7 +808,7 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
808 First of all, we scan fib_info list searching 808 First of all, we scan fib_info list searching
809 for stray nexthop entries, then ignite fib_flush. 809 for stray nexthop entries, then ignite fib_flush.
810 */ 810 */
811 if (fib_sync_down(ifa->ifa_local, NULL, 0)) 811 if (fib_sync_down_addr(dev->nd_net, ifa->ifa_local))
812 fib_flush(dev->nd_net); 812 fib_flush(dev->nd_net);
813 } 813 }
814 } 814 }
@@ -898,7 +898,7 @@ static void nl_fib_lookup_exit(struct net *net)
898 898
899static void fib_disable_ip(struct net_device *dev, int force) 899static void fib_disable_ip(struct net_device *dev, int force)
900{ 900{
901 if (fib_sync_down(0, dev, force)) 901 if (fib_sync_down_dev(dev, force))
902 fib_flush(dev->nd_net); 902 fib_flush(dev->nd_net);
903 rt_cache_flush(0); 903 rt_cache_flush(0);
904 arp_ifdown(dev); 904 arp_ifdown(dev);
@@ -975,6 +975,7 @@ static struct notifier_block fib_netdev_notifier = {
975 975
976static int __net_init ip_fib_net_init(struct net *net) 976static int __net_init ip_fib_net_init(struct net *net)
977{ 977{
978 int err;
978 unsigned int i; 979 unsigned int i;
979 980
980 net->ipv4.fib_table_hash = kzalloc( 981 net->ipv4.fib_table_hash = kzalloc(
@@ -985,7 +986,14 @@ static int __net_init ip_fib_net_init(struct net *net)
985 for (i = 0; i < FIB_TABLE_HASHSZ; i++) 986 for (i = 0; i < FIB_TABLE_HASHSZ; i++)
986 INIT_HLIST_HEAD(&net->ipv4.fib_table_hash[i]); 987 INIT_HLIST_HEAD(&net->ipv4.fib_table_hash[i]);
987 988
988 return fib4_rules_init(net); 989 err = fib4_rules_init(net);
990 if (err < 0)
991 goto fail;
992 return 0;
993
994fail:
995 kfree(net->ipv4.fib_table_hash);
996 return err;
989} 997}
990 998
991static void __net_exit ip_fib_net_exit(struct net *net) 999static void __net_exit ip_fib_net_exit(struct net *net)
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index a15b2f1b2721..76b9c684cccd 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -424,19 +424,43 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
424 424
425 if (fa && fa->fa_tos == tos && 425 if (fa && fa->fa_tos == tos &&
426 fa->fa_info->fib_priority == fi->fib_priority) { 426 fa->fa_info->fib_priority == fi->fib_priority) {
427 struct fib_alias *fa_orig; 427 struct fib_alias *fa_first, *fa_match;
428 428
429 err = -EEXIST; 429 err = -EEXIST;
430 if (cfg->fc_nlflags & NLM_F_EXCL) 430 if (cfg->fc_nlflags & NLM_F_EXCL)
431 goto out; 431 goto out;
432 432
433 /* We have 2 goals:
434 * 1. Find exact match for type, scope, fib_info to avoid
435 * duplicate routes
436 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
437 */
438 fa_match = NULL;
439 fa_first = fa;
440 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
441 list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
442 if (fa->fa_tos != tos)
443 break;
444 if (fa->fa_info->fib_priority != fi->fib_priority)
445 break;
446 if (fa->fa_type == cfg->fc_type &&
447 fa->fa_scope == cfg->fc_scope &&
448 fa->fa_info == fi) {
449 fa_match = fa;
450 break;
451 }
452 }
453
433 if (cfg->fc_nlflags & NLM_F_REPLACE) { 454 if (cfg->fc_nlflags & NLM_F_REPLACE) {
434 struct fib_info *fi_drop; 455 struct fib_info *fi_drop;
435 u8 state; 456 u8 state;
436 457
437 if (fi->fib_treeref > 1) 458 fa = fa_first;
459 if (fa_match) {
460 if (fa == fa_match)
461 err = 0;
438 goto out; 462 goto out;
439 463 }
440 write_lock_bh(&fib_hash_lock); 464 write_lock_bh(&fib_hash_lock);
441 fi_drop = fa->fa_info; 465 fi_drop = fa->fa_info;
442 fa->fa_info = fi; 466 fa->fa_info = fi;
@@ -459,20 +483,11 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
459 * uses the same scope, type, and nexthop 483 * uses the same scope, type, and nexthop
460 * information. 484 * information.
461 */ 485 */
462 fa_orig = fa; 486 if (fa_match)
463 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list); 487 goto out;
464 list_for_each_entry_continue(fa, &f->fn_alias, fa_list) { 488
465 if (fa->fa_tos != tos)
466 break;
467 if (fa->fa_info->fib_priority != fi->fib_priority)
468 break;
469 if (fa->fa_type == cfg->fc_type &&
470 fa->fa_scope == cfg->fc_scope &&
471 fa->fa_info == fi)
472 goto out;
473 }
474 if (!(cfg->fc_nlflags & NLM_F_APPEND)) 489 if (!(cfg->fc_nlflags & NLM_F_APPEND))
475 fa = fa_orig; 490 fa = fa_first;
476 } 491 }
477 492
478 err = -ENOENT; 493 err = -ENOENT;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c7912866d987..a13c84763d4c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -229,6 +229,8 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
229 head = &fib_info_hash[hash]; 229 head = &fib_info_hash[hash];
230 230
231 hlist_for_each_entry(fi, node, head, fib_hash) { 231 hlist_for_each_entry(fi, node, head, fib_hash) {
232 if (fi->fib_net != nfi->fib_net)
233 continue;
232 if (fi->fib_nhs != nfi->fib_nhs) 234 if (fi->fib_nhs != nfi->fib_nhs)
233 continue; 235 continue;
234 if (nfi->fib_protocol == fi->fib_protocol && 236 if (nfi->fib_protocol == fi->fib_protocol &&
@@ -687,6 +689,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
687 struct fib_info *fi = NULL; 689 struct fib_info *fi = NULL;
688 struct fib_info *ofi; 690 struct fib_info *ofi;
689 int nhs = 1; 691 int nhs = 1;
692 struct net *net = cfg->fc_nlinfo.nl_net;
690 693
691 /* Fast check to catch the most weird cases */ 694 /* Fast check to catch the most weird cases */
692 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) 695 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
@@ -727,6 +730,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
727 goto failure; 730 goto failure;
728 fib_info_cnt++; 731 fib_info_cnt++;
729 732
733 fi->fib_net = net;
730 fi->fib_protocol = cfg->fc_protocol; 734 fi->fib_protocol = cfg->fc_protocol;
731 fi->fib_flags = cfg->fc_flags; 735 fi->fib_flags = cfg->fc_flags;
732 fi->fib_priority = cfg->fc_priority; 736 fi->fib_priority = cfg->fc_priority;
@@ -798,8 +802,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
798 if (nhs != 1 || nh->nh_gw) 802 if (nhs != 1 || nh->nh_gw)
799 goto err_inval; 803 goto err_inval;
800 nh->nh_scope = RT_SCOPE_NOWHERE; 804 nh->nh_scope = RT_SCOPE_NOWHERE;
801 nh->nh_dev = dev_get_by_index(cfg->fc_nlinfo.nl_net, 805 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
802 fi->fib_nh->nh_oif);
803 err = -ENODEV; 806 err = -ENODEV;
804 if (nh->nh_dev == NULL) 807 if (nh->nh_dev == NULL)
805 goto failure; 808 goto failure;
@@ -813,8 +816,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
813 if (fi->fib_prefsrc) { 816 if (fi->fib_prefsrc) {
814 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || 817 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
815 fi->fib_prefsrc != cfg->fc_dst) 818 fi->fib_prefsrc != cfg->fc_dst)
816 if (inet_addr_type(cfg->fc_nlinfo.nl_net, 819 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
817 fi->fib_prefsrc) != RTN_LOCAL)
818 goto err_inval; 820 goto err_inval;
819 } 821 }
820 822
@@ -1031,70 +1033,74 @@ nla_put_failure:
1031 referring to it. 1033 referring to it.
1032 - device went down -> we must shutdown all nexthops going via it. 1034 - device went down -> we must shutdown all nexthops going via it.
1033 */ 1035 */
1034 1036int fib_sync_down_addr(struct net *net, __be32 local)
1035int fib_sync_down(__be32 local, struct net_device *dev, int force)
1036{ 1037{
1037 int ret = 0; 1038 int ret = 0;
1038 int scope = RT_SCOPE_NOWHERE; 1039 unsigned int hash = fib_laddr_hashfn(local);
1039 1040 struct hlist_head *head = &fib_info_laddrhash[hash];
1040 if (force) 1041 struct hlist_node *node;
1041 scope = -1; 1042 struct fib_info *fi;
1042 1043
1043 if (local && fib_info_laddrhash) { 1044 if (fib_info_laddrhash == NULL || local == 0)
1044 unsigned int hash = fib_laddr_hashfn(local); 1045 return 0;
1045 struct hlist_head *head = &fib_info_laddrhash[hash];
1046 struct hlist_node *node;
1047 struct fib_info *fi;
1048 1046
1049 hlist_for_each_entry(fi, node, head, fib_lhash) { 1047 hlist_for_each_entry(fi, node, head, fib_lhash) {
1050 if (fi->fib_prefsrc == local) { 1048 if (fi->fib_net != net)
1051 fi->fib_flags |= RTNH_F_DEAD; 1049 continue;
1052 ret++; 1050 if (fi->fib_prefsrc == local) {
1053 } 1051 fi->fib_flags |= RTNH_F_DEAD;
1052 ret++;
1054 } 1053 }
1055 } 1054 }
1055 return ret;
1056}
1056 1057
1057 if (dev) { 1058int fib_sync_down_dev(struct net_device *dev, int force)
1058 struct fib_info *prev_fi = NULL; 1059{
1059 unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1060 int ret = 0;
1060 struct hlist_head *head = &fib_info_devhash[hash]; 1061 int scope = RT_SCOPE_NOWHERE;
1061 struct hlist_node *node; 1062 struct fib_info *prev_fi = NULL;
1062 struct fib_nh *nh; 1063 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1064 struct hlist_head *head = &fib_info_devhash[hash];
1065 struct hlist_node *node;
1066 struct fib_nh *nh;
1063 1067
1064 hlist_for_each_entry(nh, node, head, nh_hash) { 1068 if (force)
1065 struct fib_info *fi = nh->nh_parent; 1069 scope = -1;
1066 int dead;
1067 1070
1068 BUG_ON(!fi->fib_nhs); 1071 hlist_for_each_entry(nh, node, head, nh_hash) {
1069 if (nh->nh_dev != dev || fi == prev_fi) 1072 struct fib_info *fi = nh->nh_parent;
1070 continue; 1073 int dead;
1071 prev_fi = fi; 1074
1072 dead = 0; 1075 BUG_ON(!fi->fib_nhs);
1073 change_nexthops(fi) { 1076 if (nh->nh_dev != dev || fi == prev_fi)
1074 if (nh->nh_flags&RTNH_F_DEAD) 1077 continue;
1075 dead++; 1078 prev_fi = fi;
1076 else if (nh->nh_dev == dev && 1079 dead = 0;
1077 nh->nh_scope != scope) { 1080 change_nexthops(fi) {
1078 nh->nh_flags |= RTNH_F_DEAD; 1081 if (nh->nh_flags&RTNH_F_DEAD)
1082 dead++;
1083 else if (nh->nh_dev == dev &&
1084 nh->nh_scope != scope) {
1085 nh->nh_flags |= RTNH_F_DEAD;
1079#ifdef CONFIG_IP_ROUTE_MULTIPATH 1086#ifdef CONFIG_IP_ROUTE_MULTIPATH
1080 spin_lock_bh(&fib_multipath_lock); 1087 spin_lock_bh(&fib_multipath_lock);
1081 fi->fib_power -= nh->nh_power; 1088 fi->fib_power -= nh->nh_power;
1082 nh->nh_power = 0; 1089 nh->nh_power = 0;
1083 spin_unlock_bh(&fib_multipath_lock); 1090 spin_unlock_bh(&fib_multipath_lock);
1084#endif 1091#endif
1085 dead++; 1092 dead++;
1086 } 1093 }
1087#ifdef CONFIG_IP_ROUTE_MULTIPATH 1094#ifdef CONFIG_IP_ROUTE_MULTIPATH
1088 if (force > 1 && nh->nh_dev == dev) { 1095 if (force > 1 && nh->nh_dev == dev) {
1089 dead = fi->fib_nhs; 1096 dead = fi->fib_nhs;
1090 break; 1097 break;
1091 }
1092#endif
1093 } endfor_nexthops(fi)
1094 if (dead == fi->fib_nhs) {
1095 fi->fib_flags |= RTNH_F_DEAD;
1096 ret++;
1097 } 1098 }
1099#endif
1100 } endfor_nexthops(fi)
1101 if (dead == fi->fib_nhs) {
1102 fi->fib_flags |= RTNH_F_DEAD;
1103 ret++;
1098 } 1104 }
1099 } 1105 }
1100 1106
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index f2f47033f31f..35851c96bdfb 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1205,20 +1205,45 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1205 * and we need to allocate a new one of those as well. 1205 * and we need to allocate a new one of those as well.
1206 */ 1206 */
1207 1207
1208 if (fa && fa->fa_info->fib_priority == fi->fib_priority) { 1208 if (fa && fa->fa_tos == tos &&
1209 struct fib_alias *fa_orig; 1209 fa->fa_info->fib_priority == fi->fib_priority) {
1210 struct fib_alias *fa_first, *fa_match;
1210 1211
1211 err = -EEXIST; 1212 err = -EEXIST;
1212 if (cfg->fc_nlflags & NLM_F_EXCL) 1213 if (cfg->fc_nlflags & NLM_F_EXCL)
1213 goto out; 1214 goto out;
1214 1215
1216 /* We have 2 goals:
1217 * 1. Find exact match for type, scope, fib_info to avoid
1218 * duplicate routes
1219 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1220 */
1221 fa_match = NULL;
1222 fa_first = fa;
1223 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1224 list_for_each_entry_continue(fa, fa_head, fa_list) {
1225 if (fa->fa_tos != tos)
1226 break;
1227 if (fa->fa_info->fib_priority != fi->fib_priority)
1228 break;
1229 if (fa->fa_type == cfg->fc_type &&
1230 fa->fa_scope == cfg->fc_scope &&
1231 fa->fa_info == fi) {
1232 fa_match = fa;
1233 break;
1234 }
1235 }
1236
1215 if (cfg->fc_nlflags & NLM_F_REPLACE) { 1237 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1216 struct fib_info *fi_drop; 1238 struct fib_info *fi_drop;
1217 u8 state; 1239 u8 state;
1218 1240
1219 if (fi->fib_treeref > 1) 1241 fa = fa_first;
1242 if (fa_match) {
1243 if (fa == fa_match)
1244 err = 0;
1220 goto out; 1245 goto out;
1221 1246 }
1222 err = -ENOBUFS; 1247 err = -ENOBUFS;
1223 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); 1248 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1224 if (new_fa == NULL) 1249 if (new_fa == NULL)
@@ -1230,7 +1255,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1230 new_fa->fa_type = cfg->fc_type; 1255 new_fa->fa_type = cfg->fc_type;
1231 new_fa->fa_scope = cfg->fc_scope; 1256 new_fa->fa_scope = cfg->fc_scope;
1232 state = fa->fa_state; 1257 state = fa->fa_state;
1233 new_fa->fa_state &= ~FA_S_ACCESSED; 1258 new_fa->fa_state = state & ~FA_S_ACCESSED;
1234 1259
1235 list_replace_rcu(&fa->fa_list, &new_fa->fa_list); 1260 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1236 alias_free_mem_rcu(fa); 1261 alias_free_mem_rcu(fa);
@@ -1247,20 +1272,11 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1247 * uses the same scope, type, and nexthop 1272 * uses the same scope, type, and nexthop
1248 * information. 1273 * information.
1249 */ 1274 */
1250 fa_orig = fa; 1275 if (fa_match)
1251 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) { 1276 goto out;
1252 if (fa->fa_tos != tos)
1253 break;
1254 if (fa->fa_info->fib_priority != fi->fib_priority)
1255 break;
1256 if (fa->fa_type == cfg->fc_type &&
1257 fa->fa_scope == cfg->fc_scope &&
1258 fa->fa_info == fi)
1259 goto out;
1260 }
1261 1277
1262 if (!(cfg->fc_nlflags & NLM_F_APPEND)) 1278 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1263 fa = fa_orig; 1279 fa = fa_first;
1264 } 1280 }
1265 err = -ENOENT; 1281 err = -ENOENT;
1266 if (!(cfg->fc_nlflags & NLM_F_CREATE)) 1282 if (!(cfg->fc_nlflags & NLM_F_CREATE))
@@ -1600,9 +1616,8 @@ static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1600 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t); 1616 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1601 1617
1602 fa_to_delete = NULL; 1618 fa_to_delete = NULL;
1603 fa_head = fa->fa_list.prev; 1619 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1604 1620 list_for_each_entry_continue(fa, fa_head, fa_list) {
1605 list_for_each_entry(fa, fa_head, fa_list) {
1606 struct fib_info *fi = fa->fa_info; 1621 struct fib_info *fi = fa->fa_info;
1607 1622
1608 if (fa->fa_tos != tos) 1623 if (fa->fa_tos != tos)
@@ -1743,6 +1758,19 @@ static struct leaf *trie_nextleaf(struct leaf *l)
1743 return leaf_walk_rcu(p, c); 1758 return leaf_walk_rcu(p, c);
1744} 1759}
1745 1760
1761static struct leaf *trie_leafindex(struct trie *t, int index)
1762{
1763 struct leaf *l = trie_firstleaf(t);
1764
1765 while (index-- > 0) {
1766 l = trie_nextleaf(l);
1767 if (!l)
1768 break;
1769 }
1770 return l;
1771}
1772
1773
1746/* 1774/*
1747 * Caller must hold RTNL. 1775 * Caller must hold RTNL.
1748 */ 1776 */
@@ -1848,7 +1876,7 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1848 struct fib_alias *fa; 1876 struct fib_alias *fa;
1849 __be32 xkey = htonl(key); 1877 __be32 xkey = htonl(key);
1850 1878
1851 s_i = cb->args[4]; 1879 s_i = cb->args[5];
1852 i = 0; 1880 i = 0;
1853 1881
1854 /* rcu_read_lock is hold by caller */ 1882 /* rcu_read_lock is hold by caller */
@@ -1869,12 +1897,12 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1869 plen, 1897 plen,
1870 fa->fa_tos, 1898 fa->fa_tos,
1871 fa->fa_info, NLM_F_MULTI) < 0) { 1899 fa->fa_info, NLM_F_MULTI) < 0) {
1872 cb->args[4] = i; 1900 cb->args[5] = i;
1873 return -1; 1901 return -1;
1874 } 1902 }
1875 i++; 1903 i++;
1876 } 1904 }
1877 cb->args[4] = i; 1905 cb->args[5] = i;
1878 return skb->len; 1906 return skb->len;
1879} 1907}
1880 1908
@@ -1885,7 +1913,7 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1885 struct hlist_node *node; 1913 struct hlist_node *node;
1886 int i, s_i; 1914 int i, s_i;
1887 1915
1888 s_i = cb->args[3]; 1916 s_i = cb->args[4];
1889 i = 0; 1917 i = 0;
1890 1918
1891 /* rcu_read_lock is hold by caller */ 1919 /* rcu_read_lock is hold by caller */
@@ -1896,19 +1924,19 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1896 } 1924 }
1897 1925
1898 if (i > s_i) 1926 if (i > s_i)
1899 cb->args[4] = 0; 1927 cb->args[5] = 0;
1900 1928
1901 if (list_empty(&li->falh)) 1929 if (list_empty(&li->falh))
1902 continue; 1930 continue;
1903 1931
1904 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) { 1932 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
1905 cb->args[3] = i; 1933 cb->args[4] = i;
1906 return -1; 1934 return -1;
1907 } 1935 }
1908 i++; 1936 i++;
1909 } 1937 }
1910 1938
1911 cb->args[3] = i; 1939 cb->args[4] = i;
1912 return skb->len; 1940 return skb->len;
1913} 1941}
1914 1942
@@ -1918,35 +1946,37 @@ static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb,
1918 struct leaf *l; 1946 struct leaf *l;
1919 struct trie *t = (struct trie *) tb->tb_data; 1947 struct trie *t = (struct trie *) tb->tb_data;
1920 t_key key = cb->args[2]; 1948 t_key key = cb->args[2];
1949 int count = cb->args[3];
1921 1950
1922 rcu_read_lock(); 1951 rcu_read_lock();
1923 /* Dump starting at last key. 1952 /* Dump starting at last key.
1924 * Note: 0.0.0.0/0 (ie default) is first key. 1953 * Note: 0.0.0.0/0 (ie default) is first key.
1925 */ 1954 */
1926 if (!key) 1955 if (count == 0)
1927 l = trie_firstleaf(t); 1956 l = trie_firstleaf(t);
1928 else { 1957 else {
1958 /* Normally, continue from last key, but if that is missing
1959 * fallback to using slow rescan
1960 */
1929 l = fib_find_node(t, key); 1961 l = fib_find_node(t, key);
1930 if (!l) { 1962 if (!l)
1931 /* The table changed during the dump, rather than 1963 l = trie_leafindex(t, count);
1932 * giving partial data, just make application retry.
1933 */
1934 rcu_read_unlock();
1935 return -EBUSY;
1936 }
1937 } 1964 }
1938 1965
1939 while (l) { 1966 while (l) {
1940 cb->args[2] = l->key; 1967 cb->args[2] = l->key;
1941 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { 1968 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
1969 cb->args[3] = count;
1942 rcu_read_unlock(); 1970 rcu_read_unlock();
1943 return -1; 1971 return -1;
1944 } 1972 }
1945 1973
1974 ++count;
1946 l = trie_nextleaf(l); 1975 l = trie_nextleaf(l);
1947 memset(&cb->args[3], 0, 1976 memset(&cb->args[4], 0,
1948 sizeof(cb->args) - 3*sizeof(cb->args[0])); 1977 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1949 } 1978 }
1979 cb->args[3] = count;
1950 rcu_read_unlock(); 1980 rcu_read_unlock();
1951 1981
1952 return skb->len; 1982 return skb->len;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7801cceb2d1b..de5a41de191a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -87,6 +87,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo,
87 struct hlist_node *node; 87 struct hlist_node *node;
88 struct inet_bind_bucket *tb; 88 struct inet_bind_bucket *tb;
89 int ret; 89 int ret;
90 struct net *net = sk->sk_net;
90 91
91 local_bh_disable(); 92 local_bh_disable();
92 if (!snum) { 93 if (!snum) {
@@ -100,7 +101,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo,
100 head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; 101 head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)];
101 spin_lock(&head->lock); 102 spin_lock(&head->lock);
102 inet_bind_bucket_for_each(tb, node, &head->chain) 103 inet_bind_bucket_for_each(tb, node, &head->chain)
103 if (tb->port == rover) 104 if (tb->ib_net == net && tb->port == rover)
104 goto next; 105 goto next;
105 break; 106 break;
106 next: 107 next:
@@ -127,7 +128,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo,
127 head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; 128 head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)];
128 spin_lock(&head->lock); 129 spin_lock(&head->lock);
129 inet_bind_bucket_for_each(tb, node, &head->chain) 130 inet_bind_bucket_for_each(tb, node, &head->chain)
130 if (tb->port == snum) 131 if (tb->ib_net == net && tb->port == snum)
131 goto tb_found; 132 goto tb_found;
132 } 133 }
133 tb = NULL; 134 tb = NULL;
@@ -147,7 +148,8 @@ tb_found:
147 } 148 }
148tb_not_found: 149tb_not_found:
149 ret = 1; 150 ret = 1;
150 if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL) 151 if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
152 net, head, snum)) == NULL)
151 goto fail_unlock; 153 goto fail_unlock;
152 if (hlist_empty(&tb->owners)) { 154 if (hlist_empty(&tb->owners)) {
153 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) 155 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 605ed2cd7972..da97695e7096 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -259,20 +259,22 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
259 const struct inet_diag_handler *handler; 259 const struct inet_diag_handler *handler;
260 260
261 handler = inet_diag_lock_handler(nlh->nlmsg_type); 261 handler = inet_diag_lock_handler(nlh->nlmsg_type);
262 if (!handler) 262 if (IS_ERR(handler)) {
263 return -ENOENT; 263 err = PTR_ERR(handler);
264 goto unlock;
265 }
264 266
265 hashinfo = handler->idiag_hashinfo; 267 hashinfo = handler->idiag_hashinfo;
266 err = -EINVAL; 268 err = -EINVAL;
267 269
268 if (req->idiag_family == AF_INET) { 270 if (req->idiag_family == AF_INET) {
269 sk = inet_lookup(hashinfo, req->id.idiag_dst[0], 271 sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
270 req->id.idiag_dport, req->id.idiag_src[0], 272 req->id.idiag_dport, req->id.idiag_src[0],
271 req->id.idiag_sport, req->id.idiag_if); 273 req->id.idiag_sport, req->id.idiag_if);
272 } 274 }
273#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 275#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
274 else if (req->idiag_family == AF_INET6) { 276 else if (req->idiag_family == AF_INET6) {
275 sk = inet6_lookup(hashinfo, 277 sk = inet6_lookup(&init_net, hashinfo,
276 (struct in6_addr *)req->id.idiag_dst, 278 (struct in6_addr *)req->id.idiag_dst,
277 req->id.idiag_dport, 279 req->id.idiag_dport,
278 (struct in6_addr *)req->id.idiag_src, 280 (struct in6_addr *)req->id.idiag_src,
@@ -708,8 +710,8 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
708 struct inet_hashinfo *hashinfo; 710 struct inet_hashinfo *hashinfo;
709 711
710 handler = inet_diag_lock_handler(cb->nlh->nlmsg_type); 712 handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
711 if (!handler) 713 if (IS_ERR(handler))
712 goto no_handler; 714 goto unlock;
713 715
714 hashinfo = handler->idiag_hashinfo; 716 hashinfo = handler->idiag_hashinfo;
715 717
@@ -838,7 +840,6 @@ done:
838 cb->args[2] = num; 840 cb->args[2] = num;
839unlock: 841unlock:
840 inet_diag_unlock_handler(handler); 842 inet_diag_unlock_handler(handler);
841no_handler:
842 return skb->len; 843 return skb->len;
843} 844}
844 845
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 619c63c6948a..48d45008f749 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -28,12 +28,14 @@
28 * The bindhash mutex for snum's hash chain must be held here. 28 * The bindhash mutex for snum's hash chain must be held here.
29 */ 29 */
30struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, 30struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
31 struct net *net,
31 struct inet_bind_hashbucket *head, 32 struct inet_bind_hashbucket *head,
32 const unsigned short snum) 33 const unsigned short snum)
33{ 34{
34 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 35 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
35 36
36 if (tb != NULL) { 37 if (tb != NULL) {
38 tb->ib_net = net;
37 tb->port = snum; 39 tb->port = snum;
38 tb->fastreuse = 0; 40 tb->fastreuse = 0;
39 INIT_HLIST_HEAD(&tb->owners); 41 INIT_HLIST_HEAD(&tb->owners);
@@ -125,7 +127,8 @@ EXPORT_SYMBOL(inet_listen_wlock);
125 * remote address for the connection. So always assume those are both 127 * remote address for the connection. So always assume those are both
126 * wildcarded during the search since they can never be otherwise. 128 * wildcarded during the search since they can never be otherwise.
127 */ 129 */
128static struct sock *inet_lookup_listener_slow(const struct hlist_head *head, 130static struct sock *inet_lookup_listener_slow(struct net *net,
131 const struct hlist_head *head,
129 const __be32 daddr, 132 const __be32 daddr,
130 const unsigned short hnum, 133 const unsigned short hnum,
131 const int dif) 134 const int dif)
@@ -137,7 +140,8 @@ static struct sock *inet_lookup_listener_slow(const struct hlist_head *head,
137 sk_for_each(sk, node, head) { 140 sk_for_each(sk, node, head) {
138 const struct inet_sock *inet = inet_sk(sk); 141 const struct inet_sock *inet = inet_sk(sk);
139 142
140 if (inet->num == hnum && !ipv6_only_sock(sk)) { 143 if (sk->sk_net == net && inet->num == hnum &&
144 !ipv6_only_sock(sk)) {
141 const __be32 rcv_saddr = inet->rcv_saddr; 145 const __be32 rcv_saddr = inet->rcv_saddr;
142 int score = sk->sk_family == PF_INET ? 1 : 0; 146 int score = sk->sk_family == PF_INET ? 1 : 0;
143 147
@@ -163,7 +167,8 @@ static struct sock *inet_lookup_listener_slow(const struct hlist_head *head,
163} 167}
164 168
165/* Optimize the common listener case. */ 169/* Optimize the common listener case. */
166struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo, 170struct sock *__inet_lookup_listener(struct net *net,
171 struct inet_hashinfo *hashinfo,
167 const __be32 daddr, const unsigned short hnum, 172 const __be32 daddr, const unsigned short hnum,
168 const int dif) 173 const int dif)
169{ 174{
@@ -178,9 +183,9 @@ struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
178 if (inet->num == hnum && !sk->sk_node.next && 183 if (inet->num == hnum && !sk->sk_node.next &&
179 (!inet->rcv_saddr || inet->rcv_saddr == daddr) && 184 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
180 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) && 185 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
181 !sk->sk_bound_dev_if) 186 !sk->sk_bound_dev_if && sk->sk_net == net)
182 goto sherry_cache; 187 goto sherry_cache;
183 sk = inet_lookup_listener_slow(head, daddr, hnum, dif); 188 sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif);
184 } 189 }
185 if (sk) { 190 if (sk) {
186sherry_cache: 191sherry_cache:
@@ -191,7 +196,8 @@ sherry_cache:
191} 196}
192EXPORT_SYMBOL_GPL(__inet_lookup_listener); 197EXPORT_SYMBOL_GPL(__inet_lookup_listener);
193 198
194struct sock * __inet_lookup_established(struct inet_hashinfo *hashinfo, 199struct sock * __inet_lookup_established(struct net *net,
200 struct inet_hashinfo *hashinfo,
195 const __be32 saddr, const __be16 sport, 201 const __be32 saddr, const __be16 sport,
196 const __be32 daddr, const u16 hnum, 202 const __be32 daddr, const u16 hnum,
197 const int dif) 203 const int dif)
@@ -210,13 +216,15 @@ struct sock * __inet_lookup_established(struct inet_hashinfo *hashinfo,
210 prefetch(head->chain.first); 216 prefetch(head->chain.first);
211 read_lock(lock); 217 read_lock(lock);
212 sk_for_each(sk, node, &head->chain) { 218 sk_for_each(sk, node, &head->chain) {
213 if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) 219 if (INET_MATCH(sk, net, hash, acookie,
220 saddr, daddr, ports, dif))
214 goto hit; /* You sunk my battleship! */ 221 goto hit; /* You sunk my battleship! */
215 } 222 }
216 223
217 /* Must check for a TIME_WAIT'er before going to listener hash. */ 224 /* Must check for a TIME_WAIT'er before going to listener hash. */
218 sk_for_each(sk, node, &head->twchain) { 225 sk_for_each(sk, node, &head->twchain) {
219 if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) 226 if (INET_TW_MATCH(sk, net, hash, acookie,
227 saddr, daddr, ports, dif))
220 goto hit; 228 goto hit;
221 } 229 }
222 sk = NULL; 230 sk = NULL;
@@ -247,6 +255,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
247 struct sock *sk2; 255 struct sock *sk2;
248 const struct hlist_node *node; 256 const struct hlist_node *node;
249 struct inet_timewait_sock *tw; 257 struct inet_timewait_sock *tw;
258 struct net *net = sk->sk_net;
250 259
251 prefetch(head->chain.first); 260 prefetch(head->chain.first);
252 write_lock(lock); 261 write_lock(lock);
@@ -255,7 +264,8 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
255 sk_for_each(sk2, node, &head->twchain) { 264 sk_for_each(sk2, node, &head->twchain) {
256 tw = inet_twsk(sk2); 265 tw = inet_twsk(sk2);
257 266
258 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) { 267 if (INET_TW_MATCH(sk2, net, hash, acookie,
268 saddr, daddr, ports, dif)) {
259 if (twsk_unique(sk, sk2, twp)) 269 if (twsk_unique(sk, sk2, twp))
260 goto unique; 270 goto unique;
261 else 271 else
@@ -266,7 +276,8 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
266 276
267 /* And established part... */ 277 /* And established part... */
268 sk_for_each(sk2, node, &head->chain) { 278 sk_for_each(sk2, node, &head->chain) {
269 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) 279 if (INET_MATCH(sk2, net, hash, acookie,
280 saddr, daddr, ports, dif))
270 goto not_unique; 281 goto not_unique;
271 } 282 }
272 283
@@ -348,17 +359,18 @@ void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
348} 359}
349EXPORT_SYMBOL_GPL(__inet_hash); 360EXPORT_SYMBOL_GPL(__inet_hash);
350 361
351/* 362int __inet_hash_connect(struct inet_timewait_death_row *death_row,
352 * Bind a port for a connect operation and hash it. 363 struct sock *sk,
353 */ 364 int (*check_established)(struct inet_timewait_death_row *,
354int inet_hash_connect(struct inet_timewait_death_row *death_row, 365 struct sock *, __u16, struct inet_timewait_sock **),
355 struct sock *sk) 366 void (*hash)(struct inet_hashinfo *, struct sock *))
356{ 367{
357 struct inet_hashinfo *hinfo = death_row->hashinfo; 368 struct inet_hashinfo *hinfo = death_row->hashinfo;
358 const unsigned short snum = inet_sk(sk)->num; 369 const unsigned short snum = inet_sk(sk)->num;
359 struct inet_bind_hashbucket *head; 370 struct inet_bind_hashbucket *head;
360 struct inet_bind_bucket *tb; 371 struct inet_bind_bucket *tb;
361 int ret; 372 int ret;
373 struct net *net = sk->sk_net;
362 374
363 if (!snum) { 375 if (!snum) {
364 int i, remaining, low, high, port; 376 int i, remaining, low, high, port;
@@ -381,19 +393,19 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
381 * unique enough. 393 * unique enough.
382 */ 394 */
383 inet_bind_bucket_for_each(tb, node, &head->chain) { 395 inet_bind_bucket_for_each(tb, node, &head->chain) {
384 if (tb->port == port) { 396 if (tb->ib_net == net && tb->port == port) {
385 BUG_TRAP(!hlist_empty(&tb->owners)); 397 BUG_TRAP(!hlist_empty(&tb->owners));
386 if (tb->fastreuse >= 0) 398 if (tb->fastreuse >= 0)
387 goto next_port; 399 goto next_port;
388 if (!__inet_check_established(death_row, 400 if (!check_established(death_row, sk,
389 sk, port, 401 port, &tw))
390 &tw))
391 goto ok; 402 goto ok;
392 goto next_port; 403 goto next_port;
393 } 404 }
394 } 405 }
395 406
396 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port); 407 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
408 net, head, port);
397 if (!tb) { 409 if (!tb) {
398 spin_unlock(&head->lock); 410 spin_unlock(&head->lock);
399 break; 411 break;
@@ -415,7 +427,7 @@ ok:
415 inet_bind_hash(sk, tb, port); 427 inet_bind_hash(sk, tb, port);
416 if (sk_unhashed(sk)) { 428 if (sk_unhashed(sk)) {
417 inet_sk(sk)->sport = htons(port); 429 inet_sk(sk)->sport = htons(port);
418 __inet_hash_nolisten(hinfo, sk); 430 hash(hinfo, sk);
419 } 431 }
420 spin_unlock(&head->lock); 432 spin_unlock(&head->lock);
421 433
@@ -432,17 +444,28 @@ ok:
432 tb = inet_csk(sk)->icsk_bind_hash; 444 tb = inet_csk(sk)->icsk_bind_hash;
433 spin_lock_bh(&head->lock); 445 spin_lock_bh(&head->lock);
434 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 446 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
435 __inet_hash_nolisten(hinfo, sk); 447 hash(hinfo, sk);
436 spin_unlock_bh(&head->lock); 448 spin_unlock_bh(&head->lock);
437 return 0; 449 return 0;
438 } else { 450 } else {
439 spin_unlock(&head->lock); 451 spin_unlock(&head->lock);
440 /* No definite answer... Walk to established hash table */ 452 /* No definite answer... Walk to established hash table */
441 ret = __inet_check_established(death_row, sk, snum, NULL); 453 ret = check_established(death_row, sk, snum, NULL);
442out: 454out:
443 local_bh_enable(); 455 local_bh_enable();
444 return ret; 456 return ret;
445 } 457 }
446} 458}
459EXPORT_SYMBOL_GPL(__inet_hash_connect);
460
461/*
462 * Bind a port for a connect operation and hash it.
463 */
464int inet_hash_connect(struct inet_timewait_death_row *death_row,
465 struct sock *sk)
466{
467 return __inet_hash_connect(death_row, sk,
468 __inet_check_established, __inet_hash_nolisten);
469}
447 470
448EXPORT_SYMBOL_GPL(inet_hash_connect); 471EXPORT_SYMBOL_GPL(inet_hash_connect);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 18070ca65771..341779e685d9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -168,6 +168,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
168 } 168 }
169 169
170 skb->priority = sk->sk_priority; 170 skb->priority = sk->sk_priority;
171 skb->mark = sk->sk_mark;
171 172
172 /* Send it out. */ 173 /* Send it out. */
173 return ip_local_out(skb); 174 return ip_local_out(skb);
@@ -385,6 +386,7 @@ packet_routed:
385 (skb_shinfo(skb)->gso_segs ?: 1) - 1); 386 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
386 387
387 skb->priority = sk->sk_priority; 388 skb->priority = sk->sk_priority;
389 skb->mark = sk->sk_mark;
388 390
389 return ip_local_out(skb); 391 return ip_local_out(skb);
390 392
@@ -476,6 +478,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
476 if (skb_shinfo(skb)->frag_list) { 478 if (skb_shinfo(skb)->frag_list) {
477 struct sk_buff *frag; 479 struct sk_buff *frag;
478 int first_len = skb_pagelen(skb); 480 int first_len = skb_pagelen(skb);
481 int truesizes = 0;
479 482
480 if (first_len - hlen > mtu || 483 if (first_len - hlen > mtu ||
481 ((first_len - hlen) & 7) || 484 ((first_len - hlen) & 7) ||
@@ -499,7 +502,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
499 sock_hold(skb->sk); 502 sock_hold(skb->sk);
500 frag->sk = skb->sk; 503 frag->sk = skb->sk;
501 frag->destructor = sock_wfree; 504 frag->destructor = sock_wfree;
502 skb->truesize -= frag->truesize; 505 truesizes += frag->truesize;
503 } 506 }
504 } 507 }
505 508
@@ -510,6 +513,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
510 frag = skb_shinfo(skb)->frag_list; 513 frag = skb_shinfo(skb)->frag_list;
511 skb_shinfo(skb)->frag_list = NULL; 514 skb_shinfo(skb)->frag_list = NULL;
512 skb->data_len = first_len - skb_headlen(skb); 515 skb->data_len = first_len - skb_headlen(skb);
516 skb->truesize -= truesizes;
513 skb->len = first_len; 517 skb->len = first_len;
514 iph->tot_len = htons(first_len); 518 iph->tot_len = htons(first_len);
515 iph->frag_off = htons(IP_MF); 519 iph->frag_off = htons(IP_MF);
@@ -1284,6 +1288,7 @@ int ip_push_pending_frames(struct sock *sk)
1284 iph->daddr = rt->rt_dst; 1288 iph->daddr = rt->rt_dst;
1285 1289
1286 skb->priority = sk->sk_priority; 1290 skb->priority = sk->sk_priority;
1291 skb->mark = sk->sk_mark;
1287 skb->dst = dst_clone(&rt->u.dst); 1292 skb->dst = dst_clone(&rt->u.dst);
1288 1293
1289 if (iph->protocol == IPPROTO_ICMP) 1294 if (iph->protocol == IPPROTO_ICMP)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index f4af99ad8fdb..ae1f45fc23b9 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -74,6 +74,7 @@ out:
74 74
75static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) 75static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
76{ 76{
77 int nexthdr;
77 int err = -ENOMEM; 78 int err = -ENOMEM;
78 struct ip_comp_hdr *ipch; 79 struct ip_comp_hdr *ipch;
79 80
@@ -84,13 +85,15 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
84 85
85 /* Remove ipcomp header and decompress original payload */ 86 /* Remove ipcomp header and decompress original payload */
86 ipch = (void *)skb->data; 87 ipch = (void *)skb->data;
88 nexthdr = ipch->nexthdr;
89
87 skb->transport_header = skb->network_header + sizeof(*ipch); 90 skb->transport_header = skb->network_header + sizeof(*ipch);
88 __skb_pull(skb, sizeof(*ipch)); 91 __skb_pull(skb, sizeof(*ipch));
89 err = ipcomp_decompress(x, skb); 92 err = ipcomp_decompress(x, skb);
90 if (err) 93 if (err)
91 goto out; 94 goto out;
92 95
93 err = ipch->nexthdr; 96 err = nexthdr;
94 97
95out: 98out:
96 return err; 99 return err;
@@ -434,7 +437,7 @@ error:
434 goto out; 437 goto out;
435} 438}
436 439
437static struct xfrm_type ipcomp_type = { 440static const struct xfrm_type ipcomp_type = {
438 .description = "IPCOMP4", 441 .description = "IPCOMP4",
439 .owner = THIS_MODULE, 442 .owner = THIS_MODULE,
440 .proto = IPPROTO_COMP, 443 .proto = IPPROTO_COMP,
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index b4a810c28ac8..a7591ce344d2 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -22,6 +22,7 @@
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <net/compat.h> 24#include <net/compat.h>
25#include <net/sock.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26 27
27#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
@@ -850,7 +851,7 @@ static int compat_table_info(const struct xt_table_info *info,
850} 851}
851#endif 852#endif
852 853
853static int get_info(void __user *user, int *len, int compat) 854static int get_info(struct net *net, void __user *user, int *len, int compat)
854{ 855{
855 char name[ARPT_TABLE_MAXNAMELEN]; 856 char name[ARPT_TABLE_MAXNAMELEN];
856 struct arpt_table *t; 857 struct arpt_table *t;
@@ -870,7 +871,7 @@ static int get_info(void __user *user, int *len, int compat)
870 if (compat) 871 if (compat)
871 xt_compat_lock(NF_ARP); 872 xt_compat_lock(NF_ARP);
872#endif 873#endif
873 t = try_then_request_module(xt_find_table_lock(NF_ARP, name), 874 t = try_then_request_module(xt_find_table_lock(net, NF_ARP, name),
874 "arptable_%s", name); 875 "arptable_%s", name);
875 if (t && !IS_ERR(t)) { 876 if (t && !IS_ERR(t)) {
876 struct arpt_getinfo info; 877 struct arpt_getinfo info;
@@ -908,7 +909,8 @@ static int get_info(void __user *user, int *len, int compat)
908 return ret; 909 return ret;
909} 910}
910 911
911static int get_entries(struct arpt_get_entries __user *uptr, int *len) 912static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
913 int *len)
912{ 914{
913 int ret; 915 int ret;
914 struct arpt_get_entries get; 916 struct arpt_get_entries get;
@@ -926,7 +928,7 @@ static int get_entries(struct arpt_get_entries __user *uptr, int *len)
926 return -EINVAL; 928 return -EINVAL;
927 } 929 }
928 930
929 t = xt_find_table_lock(NF_ARP, get.name); 931 t = xt_find_table_lock(net, NF_ARP, get.name);
930 if (t && !IS_ERR(t)) { 932 if (t && !IS_ERR(t)) {
931 struct xt_table_info *private = t->private; 933 struct xt_table_info *private = t->private;
932 duprintf("t->private->number = %u\n", 934 duprintf("t->private->number = %u\n",
@@ -947,7 +949,8 @@ static int get_entries(struct arpt_get_entries __user *uptr, int *len)
947 return ret; 949 return ret;
948} 950}
949 951
950static int __do_replace(const char *name, unsigned int valid_hooks, 952static int __do_replace(struct net *net, const char *name,
953 unsigned int valid_hooks,
951 struct xt_table_info *newinfo, 954 struct xt_table_info *newinfo,
952 unsigned int num_counters, 955 unsigned int num_counters,
953 void __user *counters_ptr) 956 void __user *counters_ptr)
@@ -966,7 +969,7 @@ static int __do_replace(const char *name, unsigned int valid_hooks,
966 goto out; 969 goto out;
967 } 970 }
968 971
969 t = try_then_request_module(xt_find_table_lock(NF_ARP, name), 972 t = try_then_request_module(xt_find_table_lock(net, NF_ARP, name),
970 "arptable_%s", name); 973 "arptable_%s", name);
971 if (!t || IS_ERR(t)) { 974 if (!t || IS_ERR(t)) {
972 ret = t ? PTR_ERR(t) : -ENOENT; 975 ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1019,7 +1022,7 @@ static int __do_replace(const char *name, unsigned int valid_hooks,
1019 return ret; 1022 return ret;
1020} 1023}
1021 1024
1022static int do_replace(void __user *user, unsigned int len) 1025static int do_replace(struct net *net, void __user *user, unsigned int len)
1023{ 1026{
1024 int ret; 1027 int ret;
1025 struct arpt_replace tmp; 1028 struct arpt_replace tmp;
@@ -1053,7 +1056,7 @@ static int do_replace(void __user *user, unsigned int len)
1053 1056
1054 duprintf("arp_tables: Translated table\n"); 1057 duprintf("arp_tables: Translated table\n");
1055 1058
1056 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo, 1059 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1057 tmp.num_counters, tmp.counters); 1060 tmp.num_counters, tmp.counters);
1058 if (ret) 1061 if (ret)
1059 goto free_newinfo_untrans; 1062 goto free_newinfo_untrans;
@@ -1080,7 +1083,8 @@ static inline int add_counter_to_entry(struct arpt_entry *e,
1080 return 0; 1083 return 0;
1081} 1084}
1082 1085
1083static int do_add_counters(void __user *user, unsigned int len, int compat) 1086static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1087 int compat)
1084{ 1088{
1085 unsigned int i; 1089 unsigned int i;
1086 struct xt_counters_info tmp; 1090 struct xt_counters_info tmp;
@@ -1132,7 +1136,7 @@ static int do_add_counters(void __user *user, unsigned int len, int compat)
1132 goto free; 1136 goto free;
1133 } 1137 }
1134 1138
1135 t = xt_find_table_lock(NF_ARP, name); 1139 t = xt_find_table_lock(net, NF_ARP, name);
1136 if (!t || IS_ERR(t)) { 1140 if (!t || IS_ERR(t)) {
1137 ret = t ? PTR_ERR(t) : -ENOENT; 1141 ret = t ? PTR_ERR(t) : -ENOENT;
1138 goto free; 1142 goto free;
@@ -1435,7 +1439,8 @@ struct compat_arpt_replace {
1435 struct compat_arpt_entry entries[0]; 1439 struct compat_arpt_entry entries[0];
1436}; 1440};
1437 1441
1438static int compat_do_replace(void __user *user, unsigned int len) 1442static int compat_do_replace(struct net *net, void __user *user,
1443 unsigned int len)
1439{ 1444{
1440 int ret; 1445 int ret;
1441 struct compat_arpt_replace tmp; 1446 struct compat_arpt_replace tmp;
@@ -1471,7 +1476,7 @@ static int compat_do_replace(void __user *user, unsigned int len)
1471 1476
1472 duprintf("compat_do_replace: Translated table\n"); 1477 duprintf("compat_do_replace: Translated table\n");
1473 1478
1474 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo, 1479 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1475 tmp.num_counters, compat_ptr(tmp.counters)); 1480 tmp.num_counters, compat_ptr(tmp.counters));
1476 if (ret) 1481 if (ret)
1477 goto free_newinfo_untrans; 1482 goto free_newinfo_untrans;
@@ -1494,11 +1499,11 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1494 1499
1495 switch (cmd) { 1500 switch (cmd) {
1496 case ARPT_SO_SET_REPLACE: 1501 case ARPT_SO_SET_REPLACE:
1497 ret = compat_do_replace(user, len); 1502 ret = compat_do_replace(sk->sk_net, user, len);
1498 break; 1503 break;
1499 1504
1500 case ARPT_SO_SET_ADD_COUNTERS: 1505 case ARPT_SO_SET_ADD_COUNTERS:
1501 ret = do_add_counters(user, len, 1); 1506 ret = do_add_counters(sk->sk_net, user, len, 1);
1502 break; 1507 break;
1503 1508
1504 default: 1509 default:
@@ -1584,7 +1589,8 @@ struct compat_arpt_get_entries {
1584 struct compat_arpt_entry entrytable[0]; 1589 struct compat_arpt_entry entrytable[0];
1585}; 1590};
1586 1591
1587static int compat_get_entries(struct compat_arpt_get_entries __user *uptr, 1592static int compat_get_entries(struct net *net,
1593 struct compat_arpt_get_entries __user *uptr,
1588 int *len) 1594 int *len)
1589{ 1595{
1590 int ret; 1596 int ret;
@@ -1604,7 +1610,7 @@ static int compat_get_entries(struct compat_arpt_get_entries __user *uptr,
1604 } 1610 }
1605 1611
1606 xt_compat_lock(NF_ARP); 1612 xt_compat_lock(NF_ARP);
1607 t = xt_find_table_lock(NF_ARP, get.name); 1613 t = xt_find_table_lock(net, NF_ARP, get.name);
1608 if (t && !IS_ERR(t)) { 1614 if (t && !IS_ERR(t)) {
1609 struct xt_table_info *private = t->private; 1615 struct xt_table_info *private = t->private;
1610 struct xt_table_info info; 1616 struct xt_table_info info;
@@ -1641,10 +1647,10 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
1641 1647
1642 switch (cmd) { 1648 switch (cmd) {
1643 case ARPT_SO_GET_INFO: 1649 case ARPT_SO_GET_INFO:
1644 ret = get_info(user, len, 1); 1650 ret = get_info(sk->sk_net, user, len, 1);
1645 break; 1651 break;
1646 case ARPT_SO_GET_ENTRIES: 1652 case ARPT_SO_GET_ENTRIES:
1647 ret = compat_get_entries(user, len); 1653 ret = compat_get_entries(sk->sk_net, user, len);
1648 break; 1654 break;
1649 default: 1655 default:
1650 ret = do_arpt_get_ctl(sk, cmd, user, len); 1656 ret = do_arpt_get_ctl(sk, cmd, user, len);
@@ -1662,11 +1668,11 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned
1662 1668
1663 switch (cmd) { 1669 switch (cmd) {
1664 case ARPT_SO_SET_REPLACE: 1670 case ARPT_SO_SET_REPLACE:
1665 ret = do_replace(user, len); 1671 ret = do_replace(sk->sk_net, user, len);
1666 break; 1672 break;
1667 1673
1668 case ARPT_SO_SET_ADD_COUNTERS: 1674 case ARPT_SO_SET_ADD_COUNTERS:
1669 ret = do_add_counters(user, len, 0); 1675 ret = do_add_counters(sk->sk_net, user, len, 0);
1670 break; 1676 break;
1671 1677
1672 default: 1678 default:
@@ -1686,11 +1692,11 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
1686 1692
1687 switch (cmd) { 1693 switch (cmd) {
1688 case ARPT_SO_GET_INFO: 1694 case ARPT_SO_GET_INFO:
1689 ret = get_info(user, len, 0); 1695 ret = get_info(sk->sk_net, user, len, 0);
1690 break; 1696 break;
1691 1697
1692 case ARPT_SO_GET_ENTRIES: 1698 case ARPT_SO_GET_ENTRIES:
1693 ret = get_entries(user, len); 1699 ret = get_entries(sk->sk_net, user, len);
1694 break; 1700 break;
1695 1701
1696 case ARPT_SO_GET_REVISION_TARGET: { 1702 case ARPT_SO_GET_REVISION_TARGET: {
@@ -1719,19 +1725,21 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
1719 return ret; 1725 return ret;
1720} 1726}
1721 1727
1722int arpt_register_table(struct arpt_table *table, 1728struct arpt_table *arpt_register_table(struct net *net,
1723 const struct arpt_replace *repl) 1729 struct arpt_table *table,
1730 const struct arpt_replace *repl)
1724{ 1731{
1725 int ret; 1732 int ret;
1726 struct xt_table_info *newinfo; 1733 struct xt_table_info *newinfo;
1727 struct xt_table_info bootstrap 1734 struct xt_table_info bootstrap
1728 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1735 = { 0, 0, 0, { 0 }, { 0 }, { } };
1729 void *loc_cpu_entry; 1736 void *loc_cpu_entry;
1737 struct xt_table *new_table;
1730 1738
1731 newinfo = xt_alloc_table_info(repl->size); 1739 newinfo = xt_alloc_table_info(repl->size);
1732 if (!newinfo) { 1740 if (!newinfo) {
1733 ret = -ENOMEM; 1741 ret = -ENOMEM;
1734 return ret; 1742 goto out;
1735 } 1743 }
1736 1744
1737 /* choose the copy on our node/cpu */ 1745 /* choose the copy on our node/cpu */
@@ -1745,24 +1753,27 @@ int arpt_register_table(struct arpt_table *table,
1745 repl->underflow); 1753 repl->underflow);
1746 1754
1747 duprintf("arpt_register_table: translate table gives %d\n", ret); 1755 duprintf("arpt_register_table: translate table gives %d\n", ret);
1748 if (ret != 0) { 1756 if (ret != 0)
1749 xt_free_table_info(newinfo); 1757 goto out_free;
1750 return ret;
1751 }
1752 1758
1753 ret = xt_register_table(table, &bootstrap, newinfo); 1759 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1754 if (ret != 0) { 1760 if (IS_ERR(new_table)) {
1755 xt_free_table_info(newinfo); 1761 ret = PTR_ERR(new_table);
1756 return ret; 1762 goto out_free;
1757 } 1763 }
1764 return new_table;
1758 1765
1759 return 0; 1766out_free:
1767 xt_free_table_info(newinfo);
1768out:
1769 return ERR_PTR(ret);
1760} 1770}
1761 1771
1762void arpt_unregister_table(struct arpt_table *table) 1772void arpt_unregister_table(struct arpt_table *table)
1763{ 1773{
1764 struct xt_table_info *private; 1774 struct xt_table_info *private;
1765 void *loc_cpu_entry; 1775 void *loc_cpu_entry;
1776 struct module *table_owner = table->me;
1766 1777
1767 private = xt_unregister_table(table); 1778 private = xt_unregister_table(table);
1768 1779
@@ -1770,6 +1781,8 @@ void arpt_unregister_table(struct arpt_table *table)
1770 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1781 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1771 ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, 1782 ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size,
1772 cleanup_entry, NULL); 1783 cleanup_entry, NULL);
1784 if (private->number > private->initial_entries)
1785 module_put(table_owner);
1773 xt_free_table_info(private); 1786 xt_free_table_info(private);
1774} 1787}
1775 1788
@@ -1809,11 +1822,26 @@ static struct nf_sockopt_ops arpt_sockopts = {
1809 .owner = THIS_MODULE, 1822 .owner = THIS_MODULE,
1810}; 1823};
1811 1824
1825static int __net_init arp_tables_net_init(struct net *net)
1826{
1827 return xt_proto_init(net, NF_ARP);
1828}
1829
1830static void __net_exit arp_tables_net_exit(struct net *net)
1831{
1832 xt_proto_fini(net, NF_ARP);
1833}
1834
1835static struct pernet_operations arp_tables_net_ops = {
1836 .init = arp_tables_net_init,
1837 .exit = arp_tables_net_exit,
1838};
1839
1812static int __init arp_tables_init(void) 1840static int __init arp_tables_init(void)
1813{ 1841{
1814 int ret; 1842 int ret;
1815 1843
1816 ret = xt_proto_init(NF_ARP); 1844 ret = register_pernet_subsys(&arp_tables_net_ops);
1817 if (ret < 0) 1845 if (ret < 0)
1818 goto err1; 1846 goto err1;
1819 1847
@@ -1838,7 +1866,7 @@ err4:
1838err3: 1866err3:
1839 xt_unregister_target(&arpt_standard_target); 1867 xt_unregister_target(&arpt_standard_target);
1840err2: 1868err2:
1841 xt_proto_fini(NF_ARP); 1869 unregister_pernet_subsys(&arp_tables_net_ops);
1842err1: 1870err1:
1843 return ret; 1871 return ret;
1844} 1872}
@@ -1848,7 +1876,7 @@ static void __exit arp_tables_fini(void)
1848 nf_unregister_sockopt(&arpt_sockopts); 1876 nf_unregister_sockopt(&arpt_sockopts);
1849 xt_unregister_target(&arpt_error_target); 1877 xt_unregister_target(&arpt_error_target);
1850 xt_unregister_target(&arpt_standard_target); 1878 xt_unregister_target(&arpt_standard_target);
1851 xt_proto_fini(NF_ARP); 1879 unregister_pernet_subsys(&arp_tables_net_ops);
1852} 1880}
1853 1881
1854EXPORT_SYMBOL(arpt_register_table); 1882EXPORT_SYMBOL(arpt_register_table);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 7201511d54d2..4e9c496a30c2 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -20,7 +20,7 @@ static struct
20 struct arpt_replace repl; 20 struct arpt_replace repl;
21 struct arpt_standard entries[3]; 21 struct arpt_standard entries[3];
22 struct arpt_error term; 22 struct arpt_error term;
23} initial_table __initdata = { 23} initial_table __net_initdata = {
24 .repl = { 24 .repl = {
25 .name = "filter", 25 .name = "filter",
26 .valid_hooks = FILTER_VALID_HOOKS, 26 .valid_hooks = FILTER_VALID_HOOKS,
@@ -61,7 +61,7 @@ static unsigned int arpt_hook(unsigned int hook,
61 const struct net_device *out, 61 const struct net_device *out,
62 int (*okfn)(struct sk_buff *)) 62 int (*okfn)(struct sk_buff *))
63{ 63{
64 return arpt_do_table(skb, hook, in, out, &packet_filter); 64 return arpt_do_table(skb, hook, in, out, init_net.ipv4.arptable_filter);
65} 65}
66 66
67static struct nf_hook_ops arpt_ops[] __read_mostly = { 67static struct nf_hook_ops arpt_ops[] __read_mostly = {
@@ -85,12 +85,31 @@ static struct nf_hook_ops arpt_ops[] __read_mostly = {
85 }, 85 },
86}; 86};
87 87
88static int __net_init arptable_filter_net_init(struct net *net)
89{
90 /* Register table */
91 net->ipv4.arptable_filter =
92 arpt_register_table(net, &packet_filter, &initial_table.repl);
93 if (IS_ERR(net->ipv4.arptable_filter))
94 return PTR_ERR(net->ipv4.arptable_filter);
95 return 0;
96}
97
98static void __net_exit arptable_filter_net_exit(struct net *net)
99{
100 arpt_unregister_table(net->ipv4.arptable_filter);
101}
102
103static struct pernet_operations arptable_filter_net_ops = {
104 .init = arptable_filter_net_init,
105 .exit = arptable_filter_net_exit,
106};
107
88static int __init arptable_filter_init(void) 108static int __init arptable_filter_init(void)
89{ 109{
90 int ret; 110 int ret;
91 111
92 /* Register table */ 112 ret = register_pernet_subsys(&arptable_filter_net_ops);
93 ret = arpt_register_table(&packet_filter, &initial_table.repl);
94 if (ret < 0) 113 if (ret < 0)
95 return ret; 114 return ret;
96 115
@@ -100,14 +119,14 @@ static int __init arptable_filter_init(void)
100 return ret; 119 return ret;
101 120
102cleanup_table: 121cleanup_table:
103 arpt_unregister_table(&packet_filter); 122 unregister_pernet_subsys(&arptable_filter_net_ops);
104 return ret; 123 return ret;
105} 124}
106 125
107static void __exit arptable_filter_fini(void) 126static void __exit arptable_filter_fini(void)
108{ 127{
109 nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); 128 nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops));
110 arpt_unregister_table(&packet_filter); 129 unregister_pernet_subsys(&arptable_filter_net_ops);
111} 130}
112 131
113module_init(arptable_filter_init); 132module_init(arptable_filter_init);
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 5109839da222..6bda1102851b 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -512,6 +512,7 @@ static struct notifier_block ipq_nl_notifier = {
512 .notifier_call = ipq_rcv_nl_event, 512 .notifier_call = ipq_rcv_nl_event,
513}; 513};
514 514
515#ifdef CONFIG_SYSCTL
515static struct ctl_table_header *ipq_sysctl_header; 516static struct ctl_table_header *ipq_sysctl_header;
516 517
517static ctl_table ipq_table[] = { 518static ctl_table ipq_table[] = {
@@ -525,7 +526,9 @@ static ctl_table ipq_table[] = {
525 }, 526 },
526 { .ctl_name = 0 } 527 { .ctl_name = 0 }
527}; 528};
529#endif
528 530
531#ifdef CONFIG_PROC_FS
529static int ip_queue_show(struct seq_file *m, void *v) 532static int ip_queue_show(struct seq_file *m, void *v)
530{ 533{
531 read_lock_bh(&queue_lock); 534 read_lock_bh(&queue_lock);
@@ -562,6 +565,7 @@ static const struct file_operations ip_queue_proc_fops = {
562 .release = single_release, 565 .release = single_release,
563 .owner = THIS_MODULE, 566 .owner = THIS_MODULE,
564}; 567};
568#endif
565 569
566static const struct nf_queue_handler nfqh = { 570static const struct nf_queue_handler nfqh = {
567 .name = "ip_queue", 571 .name = "ip_queue",
@@ -571,7 +575,7 @@ static const struct nf_queue_handler nfqh = {
571static int __init ip_queue_init(void) 575static int __init ip_queue_init(void)
572{ 576{
573 int status = -ENOMEM; 577 int status = -ENOMEM;
574 struct proc_dir_entry *proc; 578 struct proc_dir_entry *proc __maybe_unused;
575 579
576 netlink_register_notifier(&ipq_nl_notifier); 580 netlink_register_notifier(&ipq_nl_notifier);
577 ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0, 581 ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
@@ -581,6 +585,7 @@ static int __init ip_queue_init(void)
581 goto cleanup_netlink_notifier; 585 goto cleanup_netlink_notifier;
582 } 586 }
583 587
588#ifdef CONFIG_PROC_FS
584 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net); 589 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
585 if (proc) { 590 if (proc) {
586 proc->owner = THIS_MODULE; 591 proc->owner = THIS_MODULE;
@@ -589,10 +594,11 @@ static int __init ip_queue_init(void)
589 printk(KERN_ERR "ip_queue: failed to create proc entry\n"); 594 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
590 goto cleanup_ipqnl; 595 goto cleanup_ipqnl;
591 } 596 }
592 597#endif
593 register_netdevice_notifier(&ipq_dev_notifier); 598 register_netdevice_notifier(&ipq_dev_notifier);
599#ifdef CONFIG_SYSCTL
594 ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table); 600 ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
595 601#endif
596 status = nf_register_queue_handler(PF_INET, &nfqh); 602 status = nf_register_queue_handler(PF_INET, &nfqh);
597 if (status < 0) { 603 if (status < 0) {
598 printk(KERN_ERR "ip_queue: failed to register queue handler\n"); 604 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
@@ -601,10 +607,12 @@ static int __init ip_queue_init(void)
601 return status; 607 return status;
602 608
603cleanup_sysctl: 609cleanup_sysctl:
610#ifdef CONFIG_SYSCTL
604 unregister_sysctl_table(ipq_sysctl_header); 611 unregister_sysctl_table(ipq_sysctl_header);
612#endif
605 unregister_netdevice_notifier(&ipq_dev_notifier); 613 unregister_netdevice_notifier(&ipq_dev_notifier);
606 proc_net_remove(&init_net, IPQ_PROC_FS_NAME); 614 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
607cleanup_ipqnl: 615cleanup_ipqnl: __maybe_unused
608 netlink_kernel_release(ipqnl); 616 netlink_kernel_release(ipqnl);
609 mutex_lock(&ipqnl_mutex); 617 mutex_lock(&ipqnl_mutex);
610 mutex_unlock(&ipqnl_mutex); 618 mutex_unlock(&ipqnl_mutex);
@@ -620,7 +628,9 @@ static void __exit ip_queue_fini(void)
620 synchronize_net(); 628 synchronize_net();
621 ipq_flush(NULL, 0); 629 ipq_flush(NULL, 0);
622 630
631#ifdef CONFIG_SYSCTL
623 unregister_sysctl_table(ipq_sysctl_header); 632 unregister_sysctl_table(ipq_sysctl_header);
633#endif
624 unregister_netdevice_notifier(&ipq_dev_notifier); 634 unregister_netdevice_notifier(&ipq_dev_notifier);
625 proc_net_remove(&init_net, IPQ_PROC_FS_NAME); 635 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
626 636
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 982b7f986291..600737f122d2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -291,7 +291,7 @@ static void trace_packet(struct sk_buff *skb,
291 unsigned int hook, 291 unsigned int hook,
292 const struct net_device *in, 292 const struct net_device *in,
293 const struct net_device *out, 293 const struct net_device *out,
294 char *tablename, 294 const char *tablename,
295 struct xt_table_info *private, 295 struct xt_table_info *private,
296 struct ipt_entry *e) 296 struct ipt_entry *e)
297{ 297{
@@ -1092,7 +1092,7 @@ static int compat_table_info(const struct xt_table_info *info,
1092} 1092}
1093#endif 1093#endif
1094 1094
1095static int get_info(void __user *user, int *len, int compat) 1095static int get_info(struct net *net, void __user *user, int *len, int compat)
1096{ 1096{
1097 char name[IPT_TABLE_MAXNAMELEN]; 1097 char name[IPT_TABLE_MAXNAMELEN];
1098 struct xt_table *t; 1098 struct xt_table *t;
@@ -1112,7 +1112,7 @@ static int get_info(void __user *user, int *len, int compat)
1112 if (compat) 1112 if (compat)
1113 xt_compat_lock(AF_INET); 1113 xt_compat_lock(AF_INET);
1114#endif 1114#endif
1115 t = try_then_request_module(xt_find_table_lock(AF_INET, name), 1115 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1116 "iptable_%s", name); 1116 "iptable_%s", name);
1117 if (t && !IS_ERR(t)) { 1117 if (t && !IS_ERR(t)) {
1118 struct ipt_getinfo info; 1118 struct ipt_getinfo info;
@@ -1152,7 +1152,7 @@ static int get_info(void __user *user, int *len, int compat)
1152} 1152}
1153 1153
1154static int 1154static int
1155get_entries(struct ipt_get_entries __user *uptr, int *len) 1155get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
1156{ 1156{
1157 int ret; 1157 int ret;
1158 struct ipt_get_entries get; 1158 struct ipt_get_entries get;
@@ -1170,7 +1170,7 @@ get_entries(struct ipt_get_entries __user *uptr, int *len)
1170 return -EINVAL; 1170 return -EINVAL;
1171 } 1171 }
1172 1172
1173 t = xt_find_table_lock(AF_INET, get.name); 1173 t = xt_find_table_lock(net, AF_INET, get.name);
1174 if (t && !IS_ERR(t)) { 1174 if (t && !IS_ERR(t)) {
1175 struct xt_table_info *private = t->private; 1175 struct xt_table_info *private = t->private;
1176 duprintf("t->private->number = %u\n", private->number); 1176 duprintf("t->private->number = %u\n", private->number);
@@ -1191,7 +1191,7 @@ get_entries(struct ipt_get_entries __user *uptr, int *len)
1191} 1191}
1192 1192
1193static int 1193static int
1194__do_replace(const char *name, unsigned int valid_hooks, 1194__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1195 struct xt_table_info *newinfo, unsigned int num_counters, 1195 struct xt_table_info *newinfo, unsigned int num_counters,
1196 void __user *counters_ptr) 1196 void __user *counters_ptr)
1197{ 1197{
@@ -1208,7 +1208,7 @@ __do_replace(const char *name, unsigned int valid_hooks,
1208 goto out; 1208 goto out;
1209 } 1209 }
1210 1210
1211 t = try_then_request_module(xt_find_table_lock(AF_INET, name), 1211 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1212 "iptable_%s", name); 1212 "iptable_%s", name);
1213 if (!t || IS_ERR(t)) { 1213 if (!t || IS_ERR(t)) {
1214 ret = t ? PTR_ERR(t) : -ENOENT; 1214 ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1261,7 +1261,7 @@ __do_replace(const char *name, unsigned int valid_hooks,
1261} 1261}
1262 1262
1263static int 1263static int
1264do_replace(void __user *user, unsigned int len) 1264do_replace(struct net *net, void __user *user, unsigned int len)
1265{ 1265{
1266 int ret; 1266 int ret;
1267 struct ipt_replace tmp; 1267 struct ipt_replace tmp;
@@ -1295,7 +1295,7 @@ do_replace(void __user *user, unsigned int len)
1295 1295
1296 duprintf("ip_tables: Translated table\n"); 1296 duprintf("ip_tables: Translated table\n");
1297 1297
1298 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo, 1298 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1299 tmp.num_counters, tmp.counters); 1299 tmp.num_counters, tmp.counters);
1300 if (ret) 1300 if (ret)
1301 goto free_newinfo_untrans; 1301 goto free_newinfo_untrans;
@@ -1331,7 +1331,7 @@ add_counter_to_entry(struct ipt_entry *e,
1331} 1331}
1332 1332
1333static int 1333static int
1334do_add_counters(void __user *user, unsigned int len, int compat) 1334do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1335{ 1335{
1336 unsigned int i; 1336 unsigned int i;
1337 struct xt_counters_info tmp; 1337 struct xt_counters_info tmp;
@@ -1383,7 +1383,7 @@ do_add_counters(void __user *user, unsigned int len, int compat)
1383 goto free; 1383 goto free;
1384 } 1384 }
1385 1385
1386 t = xt_find_table_lock(AF_INET, name); 1386 t = xt_find_table_lock(net, AF_INET, name);
1387 if (!t || IS_ERR(t)) { 1387 if (!t || IS_ERR(t)) {
1388 ret = t ? PTR_ERR(t) : -ENOENT; 1388 ret = t ? PTR_ERR(t) : -ENOENT;
1389 goto free; 1389 goto free;
@@ -1429,7 +1429,7 @@ struct compat_ipt_replace {
1429 1429
1430static int 1430static int
1431compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, 1431compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1432 compat_uint_t *size, struct xt_counters *counters, 1432 unsigned int *size, struct xt_counters *counters,
1433 unsigned int *i) 1433 unsigned int *i)
1434{ 1434{
1435 struct ipt_entry_target *t; 1435 struct ipt_entry_target *t;
@@ -1476,7 +1476,7 @@ compat_find_calc_match(struct ipt_entry_match *m,
1476 const char *name, 1476 const char *name,
1477 const struct ipt_ip *ip, 1477 const struct ipt_ip *ip,
1478 unsigned int hookmask, 1478 unsigned int hookmask,
1479 int *size, int *i) 1479 int *size, unsigned int *i)
1480{ 1480{
1481 struct xt_match *match; 1481 struct xt_match *match;
1482 1482
@@ -1534,7 +1534,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1534 struct ipt_entry_target *t; 1534 struct ipt_entry_target *t;
1535 struct xt_target *target; 1535 struct xt_target *target;
1536 unsigned int entry_offset; 1536 unsigned int entry_offset;
1537 int ret, off, h, j; 1537 unsigned int j;
1538 int ret, off, h;
1538 1539
1539 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1540 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1540 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 1541 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
@@ -1647,7 +1648,8 @@ static int
1647compat_check_entry(struct ipt_entry *e, const char *name, 1648compat_check_entry(struct ipt_entry *e, const char *name,
1648 unsigned int *i) 1649 unsigned int *i)
1649{ 1650{
1650 int j, ret; 1651 unsigned int j;
1652 int ret;
1651 1653
1652 j = 0; 1654 j = 0;
1653 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, 1655 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip,
@@ -1789,7 +1791,7 @@ out_unlock:
1789} 1791}
1790 1792
1791static int 1793static int
1792compat_do_replace(void __user *user, unsigned int len) 1794compat_do_replace(struct net *net, void __user *user, unsigned int len)
1793{ 1795{
1794 int ret; 1796 int ret;
1795 struct compat_ipt_replace tmp; 1797 struct compat_ipt_replace tmp;
@@ -1826,7 +1828,7 @@ compat_do_replace(void __user *user, unsigned int len)
1826 1828
1827 duprintf("compat_do_replace: Translated table\n"); 1829 duprintf("compat_do_replace: Translated table\n");
1828 1830
1829 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo, 1831 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1830 tmp.num_counters, compat_ptr(tmp.counters)); 1832 tmp.num_counters, compat_ptr(tmp.counters));
1831 if (ret) 1833 if (ret)
1832 goto free_newinfo_untrans; 1834 goto free_newinfo_untrans;
@@ -1850,11 +1852,11 @@ compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1850 1852
1851 switch (cmd) { 1853 switch (cmd) {
1852 case IPT_SO_SET_REPLACE: 1854 case IPT_SO_SET_REPLACE:
1853 ret = compat_do_replace(user, len); 1855 ret = compat_do_replace(sk->sk_net, user, len);
1854 break; 1856 break;
1855 1857
1856 case IPT_SO_SET_ADD_COUNTERS: 1858 case IPT_SO_SET_ADD_COUNTERS:
1857 ret = do_add_counters(user, len, 1); 1859 ret = do_add_counters(sk->sk_net, user, len, 1);
1858 break; 1860 break;
1859 1861
1860 default: 1862 default:
@@ -1903,7 +1905,8 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1903} 1905}
1904 1906
1905static int 1907static int
1906compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len) 1908compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1909 int *len)
1907{ 1910{
1908 int ret; 1911 int ret;
1909 struct compat_ipt_get_entries get; 1912 struct compat_ipt_get_entries get;
@@ -1924,7 +1927,7 @@ compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1924 } 1927 }
1925 1928
1926 xt_compat_lock(AF_INET); 1929 xt_compat_lock(AF_INET);
1927 t = xt_find_table_lock(AF_INET, get.name); 1930 t = xt_find_table_lock(net, AF_INET, get.name);
1928 if (t && !IS_ERR(t)) { 1931 if (t && !IS_ERR(t)) {
1929 struct xt_table_info *private = t->private; 1932 struct xt_table_info *private = t->private;
1930 struct xt_table_info info; 1933 struct xt_table_info info;
@@ -1960,10 +1963,10 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1960 1963
1961 switch (cmd) { 1964 switch (cmd) {
1962 case IPT_SO_GET_INFO: 1965 case IPT_SO_GET_INFO:
1963 ret = get_info(user, len, 1); 1966 ret = get_info(sk->sk_net, user, len, 1);
1964 break; 1967 break;
1965 case IPT_SO_GET_ENTRIES: 1968 case IPT_SO_GET_ENTRIES:
1966 ret = compat_get_entries(user, len); 1969 ret = compat_get_entries(sk->sk_net, user, len);
1967 break; 1970 break;
1968 default: 1971 default:
1969 ret = do_ipt_get_ctl(sk, cmd, user, len); 1972 ret = do_ipt_get_ctl(sk, cmd, user, len);
@@ -1982,11 +1985,11 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1982 1985
1983 switch (cmd) { 1986 switch (cmd) {
1984 case IPT_SO_SET_REPLACE: 1987 case IPT_SO_SET_REPLACE:
1985 ret = do_replace(user, len); 1988 ret = do_replace(sk->sk_net, user, len);
1986 break; 1989 break;
1987 1990
1988 case IPT_SO_SET_ADD_COUNTERS: 1991 case IPT_SO_SET_ADD_COUNTERS:
1989 ret = do_add_counters(user, len, 0); 1992 ret = do_add_counters(sk->sk_net, user, len, 0);
1990 break; 1993 break;
1991 1994
1992 default: 1995 default:
@@ -2007,11 +2010,11 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2007 2010
2008 switch (cmd) { 2011 switch (cmd) {
2009 case IPT_SO_GET_INFO: 2012 case IPT_SO_GET_INFO:
2010 ret = get_info(user, len, 0); 2013 ret = get_info(sk->sk_net, user, len, 0);
2011 break; 2014 break;
2012 2015
2013 case IPT_SO_GET_ENTRIES: 2016 case IPT_SO_GET_ENTRIES:
2014 ret = get_entries(user, len); 2017 ret = get_entries(sk->sk_net, user, len);
2015 break; 2018 break;
2016 2019
2017 case IPT_SO_GET_REVISION_MATCH: 2020 case IPT_SO_GET_REVISION_MATCH:
@@ -2048,17 +2051,21 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2048 return ret; 2051 return ret;
2049} 2052}
2050 2053
2051int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl) 2054struct xt_table *ipt_register_table(struct net *net, struct xt_table *table,
2055 const struct ipt_replace *repl)
2052{ 2056{
2053 int ret; 2057 int ret;
2054 struct xt_table_info *newinfo; 2058 struct xt_table_info *newinfo;
2055 struct xt_table_info bootstrap 2059 struct xt_table_info bootstrap
2056 = { 0, 0, 0, { 0 }, { 0 }, { } }; 2060 = { 0, 0, 0, { 0 }, { 0 }, { } };
2057 void *loc_cpu_entry; 2061 void *loc_cpu_entry;
2062 struct xt_table *new_table;
2058 2063
2059 newinfo = xt_alloc_table_info(repl->size); 2064 newinfo = xt_alloc_table_info(repl->size);
2060 if (!newinfo) 2065 if (!newinfo) {
2061 return -ENOMEM; 2066 ret = -ENOMEM;
2067 goto out;
2068 }
2062 2069
2063 /* choose the copy on our node/cpu, but dont care about preemption */ 2070 /* choose the copy on our node/cpu, but dont care about preemption */
2064 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2071 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
@@ -2069,30 +2076,36 @@ int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2069 repl->num_entries, 2076 repl->num_entries,
2070 repl->hook_entry, 2077 repl->hook_entry,
2071 repl->underflow); 2078 repl->underflow);
2072 if (ret != 0) { 2079 if (ret != 0)
2073 xt_free_table_info(newinfo); 2080 goto out_free;
2074 return ret;
2075 }
2076 2081
2077 ret = xt_register_table(table, &bootstrap, newinfo); 2082 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2078 if (ret != 0) { 2083 if (IS_ERR(new_table)) {
2079 xt_free_table_info(newinfo); 2084 ret = PTR_ERR(new_table);
2080 return ret; 2085 goto out_free;
2081 } 2086 }
2082 2087
2083 return 0; 2088 return new_table;
2089
2090out_free:
2091 xt_free_table_info(newinfo);
2092out:
2093 return ERR_PTR(ret);
2084} 2094}
2085 2095
2086void ipt_unregister_table(struct xt_table *table) 2096void ipt_unregister_table(struct xt_table *table)
2087{ 2097{
2088 struct xt_table_info *private; 2098 struct xt_table_info *private;
2089 void *loc_cpu_entry; 2099 void *loc_cpu_entry;
2100 struct module *table_owner = table->me;
2090 2101
2091 private = xt_unregister_table(table); 2102 private = xt_unregister_table(table);
2092 2103
2093 /* Decrease module usage counts and free resources */ 2104 /* Decrease module usage counts and free resources */
2094 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2105 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2095 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); 2106 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2107 if (private->number > private->initial_entries)
2108 module_put(table_owner);
2096 xt_free_table_info(private); 2109 xt_free_table_info(private);
2097} 2110}
2098 2111
@@ -2200,11 +2213,26 @@ static struct xt_match icmp_matchstruct __read_mostly = {
2200 .family = AF_INET, 2213 .family = AF_INET,
2201}; 2214};
2202 2215
2216static int __net_init ip_tables_net_init(struct net *net)
2217{
2218 return xt_proto_init(net, AF_INET);
2219}
2220
2221static void __net_exit ip_tables_net_exit(struct net *net)
2222{
2223 xt_proto_fini(net, AF_INET);
2224}
2225
2226static struct pernet_operations ip_tables_net_ops = {
2227 .init = ip_tables_net_init,
2228 .exit = ip_tables_net_exit,
2229};
2230
2203static int __init ip_tables_init(void) 2231static int __init ip_tables_init(void)
2204{ 2232{
2205 int ret; 2233 int ret;
2206 2234
2207 ret = xt_proto_init(AF_INET); 2235 ret = register_pernet_subsys(&ip_tables_net_ops);
2208 if (ret < 0) 2236 if (ret < 0)
2209 goto err1; 2237 goto err1;
2210 2238
@@ -2234,7 +2262,7 @@ err4:
2234err3: 2262err3:
2235 xt_unregister_target(&ipt_standard_target); 2263 xt_unregister_target(&ipt_standard_target);
2236err2: 2264err2:
2237 xt_proto_fini(AF_INET); 2265 unregister_pernet_subsys(&ip_tables_net_ops);
2238err1: 2266err1:
2239 return ret; 2267 return ret;
2240} 2268}
@@ -2247,7 +2275,7 @@ static void __exit ip_tables_fini(void)
2247 xt_unregister_target(&ipt_error_target); 2275 xt_unregister_target(&ipt_error_target);
2248 xt_unregister_target(&ipt_standard_target); 2276 xt_unregister_target(&ipt_standard_target);
2249 2277
2250 xt_proto_fini(AF_INET); 2278 unregister_pernet_subsys(&ip_tables_net_ops);
2251} 2279}
2252 2280
2253EXPORT_SYMBOL(ipt_register_table); 2281EXPORT_SYMBOL(ipt_register_table);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 1b31f7d14d46..c6cf84c77611 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -76,13 +76,6 @@ clusterip_config_put(struct clusterip_config *c)
76 kfree(c); 76 kfree(c);
77} 77}
78 78
79/* increase the count of entries(rules) using/referencing this config */
80static inline void
81clusterip_config_entry_get(struct clusterip_config *c)
82{
83 atomic_inc(&c->entries);
84}
85
86/* decrease the count of entries using/referencing this config. If last 79/* decrease the count of entries using/referencing this config. If last
87 * entry(rule) is removed, remove the config from lists, but don't free it 80 * entry(rule) is removed, remove the config from lists, but don't free it
88 * yet, since proc-files could still be holding references */ 81 * yet, since proc-files could still be holding references */
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index e3154a99c08a..68cbe3ca01ce 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -212,11 +212,11 @@ recent_mt(const struct sk_buff *skb, const struct net_device *in,
212 recent_entry_remove(t, e); 212 recent_entry_remove(t, e);
213 ret = !ret; 213 ret = !ret;
214 } else if (info->check_set & (IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) { 214 } else if (info->check_set & (IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) {
215 unsigned long t = jiffies - info->seconds * HZ; 215 unsigned long time = jiffies - info->seconds * HZ;
216 unsigned int i, hits = 0; 216 unsigned int i, hits = 0;
217 217
218 for (i = 0; i < e->nstamps; i++) { 218 for (i = 0; i < e->nstamps; i++) {
219 if (info->seconds && time_after(t, e->stamps[i])) 219 if (info->seconds && time_after(time, e->stamps[i]))
220 continue; 220 continue;
221 if (++hits >= info->hit_count) { 221 if (++hits >= info->hit_count) {
222 ret = !ret; 222 ret = !ret;
@@ -320,6 +320,7 @@ struct recent_iter_state {
320}; 320};
321 321
322static void *recent_seq_start(struct seq_file *seq, loff_t *pos) 322static void *recent_seq_start(struct seq_file *seq, loff_t *pos)
323 __acquires(recent_lock)
323{ 324{
324 struct recent_iter_state *st = seq->private; 325 struct recent_iter_state *st = seq->private;
325 const struct recent_table *t = st->table; 326 const struct recent_table *t = st->table;
@@ -352,6 +353,7 @@ static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
352} 353}
353 354
354static void recent_seq_stop(struct seq_file *s, void *v) 355static void recent_seq_stop(struct seq_file *s, void *v)
356 __releases(recent_lock)
355{ 357{
356 spin_unlock_bh(&recent_lock); 358 spin_unlock_bh(&recent_lock);
357} 359}
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 29bb4f9fbda0..69f3d7e6e96f 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -28,7 +28,7 @@ static struct
28 struct ipt_replace repl; 28 struct ipt_replace repl;
29 struct ipt_standard entries[3]; 29 struct ipt_standard entries[3];
30 struct ipt_error term; 30 struct ipt_error term;
31} initial_table __initdata = { 31} initial_table __net_initdata = {
32 .repl = { 32 .repl = {
33 .name = "filter", 33 .name = "filter",
34 .valid_hooks = FILTER_VALID_HOOKS, 34 .valid_hooks = FILTER_VALID_HOOKS,
@@ -69,7 +69,7 @@ ipt_hook(unsigned int hook,
69 const struct net_device *out, 69 const struct net_device *out,
70 int (*okfn)(struct sk_buff *)) 70 int (*okfn)(struct sk_buff *))
71{ 71{
72 return ipt_do_table(skb, hook, in, out, &packet_filter); 72 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_filter);
73} 73}
74 74
75static unsigned int 75static unsigned int
@@ -88,7 +88,7 @@ ipt_local_out_hook(unsigned int hook,
88 return NF_ACCEPT; 88 return NF_ACCEPT;
89 } 89 }
90 90
91 return ipt_do_table(skb, hook, in, out, &packet_filter); 91 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_filter);
92} 92}
93 93
94static struct nf_hook_ops ipt_ops[] __read_mostly = { 94static struct nf_hook_ops ipt_ops[] __read_mostly = {
@@ -119,6 +119,26 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
119static int forward = NF_ACCEPT; 119static int forward = NF_ACCEPT;
120module_param(forward, bool, 0000); 120module_param(forward, bool, 0000);
121 121
122static int __net_init iptable_filter_net_init(struct net *net)
123{
124 /* Register table */
125 net->ipv4.iptable_filter =
126 ipt_register_table(net, &packet_filter, &initial_table.repl);
127 if (IS_ERR(net->ipv4.iptable_filter))
128 return PTR_ERR(net->ipv4.iptable_filter);
129 return 0;
130}
131
132static void __net_exit iptable_filter_net_exit(struct net *net)
133{
134 ipt_unregister_table(net->ipv4.iptable_filter);
135}
136
137static struct pernet_operations iptable_filter_net_ops = {
138 .init = iptable_filter_net_init,
139 .exit = iptable_filter_net_exit,
140};
141
122static int __init iptable_filter_init(void) 142static int __init iptable_filter_init(void)
123{ 143{
124 int ret; 144 int ret;
@@ -131,8 +151,7 @@ static int __init iptable_filter_init(void)
131 /* Entry 1 is the FORWARD hook */ 151 /* Entry 1 is the FORWARD hook */
132 initial_table.entries[1].target.verdict = -forward - 1; 152 initial_table.entries[1].target.verdict = -forward - 1;
133 153
134 /* Register table */ 154 ret = register_pernet_subsys(&iptable_filter_net_ops);
135 ret = ipt_register_table(&packet_filter, &initial_table.repl);
136 if (ret < 0) 155 if (ret < 0)
137 return ret; 156 return ret;
138 157
@@ -144,14 +163,14 @@ static int __init iptable_filter_init(void)
144 return ret; 163 return ret;
145 164
146 cleanup_table: 165 cleanup_table:
147 ipt_unregister_table(&packet_filter); 166 unregister_pernet_subsys(&iptable_filter_net_ops);
148 return ret; 167 return ret;
149} 168}
150 169
151static void __exit iptable_filter_fini(void) 170static void __exit iptable_filter_fini(void)
152{ 171{
153 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 172 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
154 ipt_unregister_table(&packet_filter); 173 unregister_pernet_subsys(&iptable_filter_net_ops);
155} 174}
156 175
157module_init(iptable_filter_init); 176module_init(iptable_filter_init);
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 5c4be202430c..c55a210853a7 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -33,7 +33,7 @@ static struct
33 struct ipt_replace repl; 33 struct ipt_replace repl;
34 struct ipt_standard entries[5]; 34 struct ipt_standard entries[5];
35 struct ipt_error term; 35 struct ipt_error term;
36} initial_table __initdata = { 36} initial_table __net_initdata = {
37 .repl = { 37 .repl = {
38 .name = "mangle", 38 .name = "mangle",
39 .valid_hooks = MANGLE_VALID_HOOKS, 39 .valid_hooks = MANGLE_VALID_HOOKS,
@@ -80,7 +80,7 @@ ipt_route_hook(unsigned int hook,
80 const struct net_device *out, 80 const struct net_device *out,
81 int (*okfn)(struct sk_buff *)) 81 int (*okfn)(struct sk_buff *))
82{ 82{
83 return ipt_do_table(skb, hook, in, out, &packet_mangler); 83 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_mangle);
84} 84}
85 85
86static unsigned int 86static unsigned int
@@ -112,7 +112,7 @@ ipt_local_hook(unsigned int hook,
112 daddr = iph->daddr; 112 daddr = iph->daddr;
113 tos = iph->tos; 113 tos = iph->tos;
114 114
115 ret = ipt_do_table(skb, hook, in, out, &packet_mangler); 115 ret = ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_mangle);
116 /* Reroute for ANY change. */ 116 /* Reroute for ANY change. */
117 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { 117 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
118 iph = ip_hdr(skb); 118 iph = ip_hdr(skb);
@@ -166,12 +166,31 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
166 }, 166 },
167}; 167};
168 168
169static int __net_init iptable_mangle_net_init(struct net *net)
170{
171 /* Register table */
172 net->ipv4.iptable_mangle =
173 ipt_register_table(net, &packet_mangler, &initial_table.repl);
174 if (IS_ERR(net->ipv4.iptable_mangle))
175 return PTR_ERR(net->ipv4.iptable_mangle);
176 return 0;
177}
178
179static void __net_exit iptable_mangle_net_exit(struct net *net)
180{
181 ipt_unregister_table(net->ipv4.iptable_mangle);
182}
183
184static struct pernet_operations iptable_mangle_net_ops = {
185 .init = iptable_mangle_net_init,
186 .exit = iptable_mangle_net_exit,
187};
188
169static int __init iptable_mangle_init(void) 189static int __init iptable_mangle_init(void)
170{ 190{
171 int ret; 191 int ret;
172 192
173 /* Register table */ 193 ret = register_pernet_subsys(&iptable_mangle_net_ops);
174 ret = ipt_register_table(&packet_mangler, &initial_table.repl);
175 if (ret < 0) 194 if (ret < 0)
176 return ret; 195 return ret;
177 196
@@ -183,14 +202,14 @@ static int __init iptable_mangle_init(void)
183 return ret; 202 return ret;
184 203
185 cleanup_table: 204 cleanup_table:
186 ipt_unregister_table(&packet_mangler); 205 unregister_pernet_subsys(&iptable_mangle_net_ops);
187 return ret; 206 return ret;
188} 207}
189 208
190static void __exit iptable_mangle_fini(void) 209static void __exit iptable_mangle_fini(void)
191{ 210{
192 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 211 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
193 ipt_unregister_table(&packet_mangler); 212 unregister_pernet_subsys(&iptable_mangle_net_ops);
194} 213}
195 214
196module_init(iptable_mangle_init); 215module_init(iptable_mangle_init);
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index dc34aa274533..e41fe8ca4e1c 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -14,7 +14,7 @@ static struct
14 struct ipt_replace repl; 14 struct ipt_replace repl;
15 struct ipt_standard entries[2]; 15 struct ipt_standard entries[2];
16 struct ipt_error term; 16 struct ipt_error term;
17} initial_table __initdata = { 17} initial_table __net_initdata = {
18 .repl = { 18 .repl = {
19 .name = "raw", 19 .name = "raw",
20 .valid_hooks = RAW_VALID_HOOKS, 20 .valid_hooks = RAW_VALID_HOOKS,
@@ -52,7 +52,7 @@ ipt_hook(unsigned int hook,
52 const struct net_device *out, 52 const struct net_device *out,
53 int (*okfn)(struct sk_buff *)) 53 int (*okfn)(struct sk_buff *))
54{ 54{
55 return ipt_do_table(skb, hook, in, out, &packet_raw); 55 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_raw);
56} 56}
57 57
58static unsigned int 58static unsigned int
@@ -70,7 +70,7 @@ ipt_local_hook(unsigned int hook,
70 "packet.\n"); 70 "packet.\n");
71 return NF_ACCEPT; 71 return NF_ACCEPT;
72 } 72 }
73 return ipt_do_table(skb, hook, in, out, &packet_raw); 73 return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_raw);
74} 74}
75 75
76/* 'raw' is the very first table. */ 76/* 'raw' is the very first table. */
@@ -91,12 +91,31 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
91 }, 91 },
92}; 92};
93 93
94static int __net_init iptable_raw_net_init(struct net *net)
95{
96 /* Register table */
97 net->ipv4.iptable_raw =
98 ipt_register_table(net, &packet_raw, &initial_table.repl);
99 if (IS_ERR(net->ipv4.iptable_raw))
100 return PTR_ERR(net->ipv4.iptable_raw);
101 return 0;
102}
103
104static void __net_exit iptable_raw_net_exit(struct net *net)
105{
106 ipt_unregister_table(net->ipv4.iptable_raw);
107}
108
109static struct pernet_operations iptable_raw_net_ops = {
110 .init = iptable_raw_net_init,
111 .exit = iptable_raw_net_exit,
112};
113
94static int __init iptable_raw_init(void) 114static int __init iptable_raw_init(void)
95{ 115{
96 int ret; 116 int ret;
97 117
98 /* Register table */ 118 ret = register_pernet_subsys(&iptable_raw_net_ops);
99 ret = ipt_register_table(&packet_raw, &initial_table.repl);
100 if (ret < 0) 119 if (ret < 0)
101 return ret; 120 return ret;
102 121
@@ -108,14 +127,14 @@ static int __init iptable_raw_init(void)
108 return ret; 127 return ret;
109 128
110 cleanup_table: 129 cleanup_table:
111 ipt_unregister_table(&packet_raw); 130 unregister_pernet_subsys(&iptable_raw_net_ops);
112 return ret; 131 return ret;
113} 132}
114 133
115static void __exit iptable_raw_fini(void) 134static void __exit iptable_raw_fini(void)
116{ 135{
117 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 136 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
118 ipt_unregister_table(&packet_raw); 137 unregister_pernet_subsys(&iptable_raw_net_ops);
119} 138}
120 139
121module_init(iptable_raw_init); 140module_init(iptable_raw_init);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index ac3d61d8026e..a65b845c5f15 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -27,7 +27,8 @@
27static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 27static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
28 struct nf_conntrack_tuple *tuple) 28 struct nf_conntrack_tuple *tuple)
29{ 29{
30 __be32 _addrs[2], *ap; 30 const __be32 *ap;
31 __be32 _addrs[2];
31 ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr), 32 ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
32 sizeof(u_int32_t) * 2, _addrs); 33 sizeof(u_int32_t) * 2, _addrs);
33 if (ap == NULL) 34 if (ap == NULL)
@@ -76,7 +77,8 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
76static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, 77static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
77 unsigned int *dataoff, u_int8_t *protonum) 78 unsigned int *dataoff, u_int8_t *protonum)
78{ 79{
79 struct iphdr _iph, *iph; 80 const struct iphdr *iph;
81 struct iphdr _iph;
80 82
81 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); 83 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
82 if (iph == NULL) 84 if (iph == NULL)
@@ -111,8 +113,8 @@ static unsigned int ipv4_conntrack_help(unsigned int hooknum,
111{ 113{
112 struct nf_conn *ct; 114 struct nf_conn *ct;
113 enum ip_conntrack_info ctinfo; 115 enum ip_conntrack_info ctinfo;
114 struct nf_conn_help *help; 116 const struct nf_conn_help *help;
115 struct nf_conntrack_helper *helper; 117 const struct nf_conntrack_helper *helper;
116 118
117 /* This is where we call the helper: as the packet goes out. */ 119 /* This is where we call the helper: as the packet goes out. */
118 ct = nf_ct_get(skb, &ctinfo); 120 ct = nf_ct_get(skb, &ctinfo);
@@ -299,8 +301,8 @@ static ctl_table ip_ct_sysctl_table[] = {
299static int 301static int
300getorigdst(struct sock *sk, int optval, void __user *user, int *len) 302getorigdst(struct sock *sk, int optval, void __user *user, int *len)
301{ 303{
302 struct inet_sock *inet = inet_sk(sk); 304 const struct inet_sock *inet = inet_sk(sk);
303 struct nf_conntrack_tuple_hash *h; 305 const struct nf_conntrack_tuple_hash *h;
304 struct nf_conntrack_tuple tuple; 306 struct nf_conntrack_tuple tuple;
305 307
306 NF_CT_TUPLE_U_BLANK(&tuple); 308 NF_CT_TUPLE_U_BLANK(&tuple);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 543c02b74c96..089252e82c01 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -39,12 +39,14 @@ struct ct_iter_state {
39static struct hlist_node *ct_get_first(struct seq_file *seq) 39static struct hlist_node *ct_get_first(struct seq_file *seq)
40{ 40{
41 struct ct_iter_state *st = seq->private; 41 struct ct_iter_state *st = seq->private;
42 struct hlist_node *n;
42 43
43 for (st->bucket = 0; 44 for (st->bucket = 0;
44 st->bucket < nf_conntrack_htable_size; 45 st->bucket < nf_conntrack_htable_size;
45 st->bucket++) { 46 st->bucket++) {
46 if (!hlist_empty(&nf_conntrack_hash[st->bucket])) 47 n = rcu_dereference(nf_conntrack_hash[st->bucket].first);
47 return nf_conntrack_hash[st->bucket].first; 48 if (n)
49 return n;
48 } 50 }
49 return NULL; 51 return NULL;
50} 52}
@@ -54,11 +56,11 @@ static struct hlist_node *ct_get_next(struct seq_file *seq,
54{ 56{
55 struct ct_iter_state *st = seq->private; 57 struct ct_iter_state *st = seq->private;
56 58
57 head = head->next; 59 head = rcu_dereference(head->next);
58 while (head == NULL) { 60 while (head == NULL) {
59 if (++st->bucket >= nf_conntrack_htable_size) 61 if (++st->bucket >= nf_conntrack_htable_size)
60 return NULL; 62 return NULL;
61 head = nf_conntrack_hash[st->bucket].first; 63 head = rcu_dereference(nf_conntrack_hash[st->bucket].first);
62 } 64 }
63 return head; 65 return head;
64} 66}
@@ -74,8 +76,9 @@ static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
74} 76}
75 77
76static void *ct_seq_start(struct seq_file *seq, loff_t *pos) 78static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
79 __acquires(RCU)
77{ 80{
78 read_lock_bh(&nf_conntrack_lock); 81 rcu_read_lock();
79 return ct_get_idx(seq, *pos); 82 return ct_get_idx(seq, *pos);
80} 83}
81 84
@@ -86,16 +89,17 @@ static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
86} 89}
87 90
88static void ct_seq_stop(struct seq_file *s, void *v) 91static void ct_seq_stop(struct seq_file *s, void *v)
92 __releases(RCU)
89{ 93{
90 read_unlock_bh(&nf_conntrack_lock); 94 rcu_read_unlock();
91} 95}
92 96
93static int ct_seq_show(struct seq_file *s, void *v) 97static int ct_seq_show(struct seq_file *s, void *v)
94{ 98{
95 const struct nf_conntrack_tuple_hash *hash = v; 99 const struct nf_conntrack_tuple_hash *hash = v;
96 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); 100 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
97 struct nf_conntrack_l3proto *l3proto; 101 const struct nf_conntrack_l3proto *l3proto;
98 struct nf_conntrack_l4proto *l4proto; 102 const struct nf_conntrack_l4proto *l4proto;
99 103
100 NF_CT_ASSERT(ct); 104 NF_CT_ASSERT(ct);
101 105
@@ -191,10 +195,12 @@ struct ct_expect_iter_state {
191static struct hlist_node *ct_expect_get_first(struct seq_file *seq) 195static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
192{ 196{
193 struct ct_expect_iter_state *st = seq->private; 197 struct ct_expect_iter_state *st = seq->private;
198 struct hlist_node *n;
194 199
195 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { 200 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
196 if (!hlist_empty(&nf_ct_expect_hash[st->bucket])) 201 n = rcu_dereference(nf_ct_expect_hash[st->bucket].first);
197 return nf_ct_expect_hash[st->bucket].first; 202 if (n)
203 return n;
198 } 204 }
199 return NULL; 205 return NULL;
200} 206}
@@ -204,11 +210,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
204{ 210{
205 struct ct_expect_iter_state *st = seq->private; 211 struct ct_expect_iter_state *st = seq->private;
206 212
207 head = head->next; 213 head = rcu_dereference(head->next);
208 while (head == NULL) { 214 while (head == NULL) {
209 if (++st->bucket >= nf_ct_expect_hsize) 215 if (++st->bucket >= nf_ct_expect_hsize)
210 return NULL; 216 return NULL;
211 head = nf_ct_expect_hash[st->bucket].first; 217 head = rcu_dereference(nf_ct_expect_hash[st->bucket].first);
212 } 218 }
213 return head; 219 return head;
214} 220}
@@ -224,8 +230,9 @@ static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
224} 230}
225 231
226static void *exp_seq_start(struct seq_file *seq, loff_t *pos) 232static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
233 __acquires(RCU)
227{ 234{
228 read_lock_bh(&nf_conntrack_lock); 235 rcu_read_lock();
229 return ct_expect_get_idx(seq, *pos); 236 return ct_expect_get_idx(seq, *pos);
230} 237}
231 238
@@ -236,14 +243,15 @@ static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
236} 243}
237 244
238static void exp_seq_stop(struct seq_file *seq, void *v) 245static void exp_seq_stop(struct seq_file *seq, void *v)
246 __releases(RCU)
239{ 247{
240 read_unlock_bh(&nf_conntrack_lock); 248 rcu_read_unlock();
241} 249}
242 250
243static int exp_seq_show(struct seq_file *s, void *v) 251static int exp_seq_show(struct seq_file *s, void *v)
244{ 252{
245 struct nf_conntrack_expect *exp; 253 struct nf_conntrack_expect *exp;
246 struct hlist_node *n = v; 254 const struct hlist_node *n = v;
247 255
248 exp = hlist_entry(n, struct nf_conntrack_expect, hnode); 256 exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
249 257
@@ -324,7 +332,7 @@ static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
324static int ct_cpu_seq_show(struct seq_file *seq, void *v) 332static int ct_cpu_seq_show(struct seq_file *seq, void *v)
325{ 333{
326 unsigned int nr_conntracks = atomic_read(&nf_conntrack_count); 334 unsigned int nr_conntracks = atomic_read(&nf_conntrack_count);
327 struct ip_conntrack_stat *st = v; 335 const struct ip_conntrack_stat *st = v;
328 336
329 if (v == SEQ_START_TOKEN) { 337 if (v == SEQ_START_TOKEN) {
330 seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n"); 338 seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 4004a04c5510..6873fddb3529 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -26,7 +26,8 @@ static int icmp_pkt_to_tuple(const struct sk_buff *skb,
26 unsigned int dataoff, 26 unsigned int dataoff,
27 struct nf_conntrack_tuple *tuple) 27 struct nf_conntrack_tuple *tuple)
28{ 28{
29 struct icmphdr _hdr, *hp; 29 const struct icmphdr *hp;
30 struct icmphdr _hdr;
30 31
31 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 32 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
32 if (hp == NULL) 33 if (hp == NULL)
@@ -100,7 +101,7 @@ static int icmp_packet(struct nf_conn *ct,
100} 101}
101 102
102/* Called when a new connection for this protocol found. */ 103/* Called when a new connection for this protocol found. */
103static int icmp_new(struct nf_conn *conntrack, 104static int icmp_new(struct nf_conn *ct,
104 const struct sk_buff *skb, unsigned int dataoff) 105 const struct sk_buff *skb, unsigned int dataoff)
105{ 106{
106 static const u_int8_t valid_new[] = { 107 static const u_int8_t valid_new[] = {
@@ -110,15 +111,15 @@ static int icmp_new(struct nf_conn *conntrack,
110 [ICMP_ADDRESS] = 1 111 [ICMP_ADDRESS] = 1
111 }; 112 };
112 113
113 if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) 114 if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new)
114 || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) { 115 || !valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) {
115 /* Can't create a new ICMP `conn' with this. */ 116 /* Can't create a new ICMP `conn' with this. */
116 pr_debug("icmp: can't create new conn with type %u\n", 117 pr_debug("icmp: can't create new conn with type %u\n",
117 conntrack->tuplehash[0].tuple.dst.u.icmp.type); 118 ct->tuplehash[0].tuple.dst.u.icmp.type);
118 NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple); 119 NF_CT_DUMP_TUPLE(&ct->tuplehash[0].tuple);
119 return 0; 120 return 0;
120 } 121 }
121 atomic_set(&conntrack->proto.icmp.count, 0); 122 atomic_set(&ct->proto.icmp.count, 0);
122 return 1; 123 return 1;
123} 124}
124 125
@@ -129,8 +130,8 @@ icmp_error_message(struct sk_buff *skb,
129 unsigned int hooknum) 130 unsigned int hooknum)
130{ 131{
131 struct nf_conntrack_tuple innertuple, origtuple; 132 struct nf_conntrack_tuple innertuple, origtuple;
132 struct nf_conntrack_l4proto *innerproto; 133 const struct nf_conntrack_l4proto *innerproto;
133 struct nf_conntrack_tuple_hash *h; 134 const struct nf_conntrack_tuple_hash *h;
134 135
135 NF_CT_ASSERT(skb->nfct == NULL); 136 NF_CT_ASSERT(skb->nfct == NULL);
136 137
@@ -176,7 +177,8 @@ static int
176icmp_error(struct sk_buff *skb, unsigned int dataoff, 177icmp_error(struct sk_buff *skb, unsigned int dataoff,
177 enum ip_conntrack_info *ctinfo, int pf, unsigned int hooknum) 178 enum ip_conntrack_info *ctinfo, int pf, unsigned int hooknum)
178{ 179{
179 struct icmphdr _ih, *icmph; 180 const struct icmphdr *icmph;
181 struct icmphdr _ih;
180 182
181 /* Not enough header? */ 183 /* Not enough header? */
182 icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih); 184 icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index e53ae1ef8f5e..dd07362d2b8f 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -31,7 +31,7 @@
31#include <net/netfilter/nf_conntrack_l3proto.h> 31#include <net/netfilter/nf_conntrack_l3proto.h>
32#include <net/netfilter/nf_conntrack_l4proto.h> 32#include <net/netfilter/nf_conntrack_l4proto.h>
33 33
34static DEFINE_RWLOCK(nf_nat_lock); 34static DEFINE_SPINLOCK(nf_nat_lock);
35 35
36static struct nf_conntrack_l3proto *l3proto __read_mostly; 36static struct nf_conntrack_l3proto *l3proto __read_mostly;
37 37
@@ -154,8 +154,8 @@ find_appropriate_src(const struct nf_conntrack_tuple *tuple,
154 struct nf_conn *ct; 154 struct nf_conn *ct;
155 struct hlist_node *n; 155 struct hlist_node *n;
156 156
157 read_lock_bh(&nf_nat_lock); 157 rcu_read_lock();
158 hlist_for_each_entry(nat, n, &bysource[h], bysource) { 158 hlist_for_each_entry_rcu(nat, n, &bysource[h], bysource) {
159 ct = nat->ct; 159 ct = nat->ct;
160 if (same_src(ct, tuple)) { 160 if (same_src(ct, tuple)) {
161 /* Copy source part from reply tuple. */ 161 /* Copy source part from reply tuple. */
@@ -164,12 +164,12 @@ find_appropriate_src(const struct nf_conntrack_tuple *tuple,
164 result->dst = tuple->dst; 164 result->dst = tuple->dst;
165 165
166 if (in_range(result, range)) { 166 if (in_range(result, range)) {
167 read_unlock_bh(&nf_nat_lock); 167 rcu_read_unlock();
168 return 1; 168 return 1;
169 } 169 }
170 } 170 }
171 } 171 }
172 read_unlock_bh(&nf_nat_lock); 172 rcu_read_unlock();
173 return 0; 173 return 0;
174} 174}
175 175
@@ -330,12 +330,12 @@ nf_nat_setup_info(struct nf_conn *ct,
330 unsigned int srchash; 330 unsigned int srchash;
331 331
332 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 332 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
333 write_lock_bh(&nf_nat_lock); 333 spin_lock_bh(&nf_nat_lock);
334 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 334 /* nf_conntrack_alter_reply might re-allocate exntension aera */
335 nat = nfct_nat(ct); 335 nat = nfct_nat(ct);
336 nat->ct = ct; 336 nat->ct = ct;
337 hlist_add_head(&nat->bysource, &bysource[srchash]); 337 hlist_add_head_rcu(&nat->bysource, &bysource[srchash]);
338 write_unlock_bh(&nf_nat_lock); 338 spin_unlock_bh(&nf_nat_lock);
339 } 339 }
340 340
341 /* It's done. */ 341 /* It's done. */
@@ -521,14 +521,14 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
521{ 521{
522 int ret = 0; 522 int ret = 0;
523 523
524 write_lock_bh(&nf_nat_lock); 524 spin_lock_bh(&nf_nat_lock);
525 if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) { 525 if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
526 ret = -EBUSY; 526 ret = -EBUSY;
527 goto out; 527 goto out;
528 } 528 }
529 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto); 529 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
530 out: 530 out:
531 write_unlock_bh(&nf_nat_lock); 531 spin_unlock_bh(&nf_nat_lock);
532 return ret; 532 return ret;
533} 533}
534EXPORT_SYMBOL(nf_nat_protocol_register); 534EXPORT_SYMBOL(nf_nat_protocol_register);
@@ -536,10 +536,10 @@ EXPORT_SYMBOL(nf_nat_protocol_register);
536/* Noone stores the protocol anywhere; simply delete it. */ 536/* Noone stores the protocol anywhere; simply delete it. */
537void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) 537void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
538{ 538{
539 write_lock_bh(&nf_nat_lock); 539 spin_lock_bh(&nf_nat_lock);
540 rcu_assign_pointer(nf_nat_protos[proto->protonum], 540 rcu_assign_pointer(nf_nat_protos[proto->protonum],
541 &nf_nat_unknown_protocol); 541 &nf_nat_unknown_protocol);
542 write_unlock_bh(&nf_nat_lock); 542 spin_unlock_bh(&nf_nat_lock);
543 synchronize_rcu(); 543 synchronize_rcu();
544} 544}
545EXPORT_SYMBOL(nf_nat_protocol_unregister); 545EXPORT_SYMBOL(nf_nat_protocol_unregister);
@@ -594,10 +594,10 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
594 594
595 NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK); 595 NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
596 596
597 write_lock_bh(&nf_nat_lock); 597 spin_lock_bh(&nf_nat_lock);
598 hlist_del(&nat->bysource); 598 hlist_del_rcu(&nat->bysource);
599 nat->ct = NULL; 599 nat->ct = NULL;
600 write_unlock_bh(&nf_nat_lock); 600 spin_unlock_bh(&nf_nat_lock);
601} 601}
602 602
603static void nf_nat_move_storage(struct nf_conn *conntrack, void *old) 603static void nf_nat_move_storage(struct nf_conn *conntrack, void *old)
@@ -609,10 +609,10 @@ static void nf_nat_move_storage(struct nf_conn *conntrack, void *old)
609 if (!ct || !(ct->status & IPS_NAT_DONE_MASK)) 609 if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
610 return; 610 return;
611 611
612 write_lock_bh(&nf_nat_lock); 612 spin_lock_bh(&nf_nat_lock);
613 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); 613 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
614 new_nat->ct = ct; 614 new_nat->ct = ct;
615 write_unlock_bh(&nf_nat_lock); 615 spin_unlock_bh(&nf_nat_lock);
616} 616}
617 617
618static struct nf_ct_ext_type nat_extend __read_mostly = { 618static struct nf_ct_ext_type nat_extend __read_mostly = {
@@ -646,17 +646,13 @@ static int __init nf_nat_init(void)
646 } 646 }
647 647
648 /* Sew in builtin protocols. */ 648 /* Sew in builtin protocols. */
649 write_lock_bh(&nf_nat_lock); 649 spin_lock_bh(&nf_nat_lock);
650 for (i = 0; i < MAX_IP_NAT_PROTO; i++) 650 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
651 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol); 651 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
652 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); 652 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
653 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); 653 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
654 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); 654 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
655 write_unlock_bh(&nf_nat_lock); 655 spin_unlock_bh(&nf_nat_lock);
656
657 for (i = 0; i < nf_nat_htable_size; i++) {
658 INIT_HLIST_HEAD(&bysource[i]);
659 }
660 656
661 /* Initialize fake conntrack so that NAT will skip it */ 657 /* Initialize fake conntrack so that NAT will skip it */
662 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; 658 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index a121989fdad7..ee47bf28c825 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -32,7 +32,8 @@ static int set_addr(struct sk_buff *skb,
32 __be32 ip; 32 __be32 ip;
33 __be16 port; 33 __be16 port;
34 } __attribute__ ((__packed__)) buf; 34 } __attribute__ ((__packed__)) buf;
35 struct tcphdr _tcph, *th; 35 const struct tcphdr *th;
36 struct tcphdr _tcph;
36 37
37 buf.ip = ip; 38 buf.ip = ip;
38 buf.port = port; 39 buf.port = port;
@@ -99,7 +100,7 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
99 unsigned char **data, 100 unsigned char **data,
100 TransportAddress *taddr, int count) 101 TransportAddress *taddr, int count)
101{ 102{
102 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 103 const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
103 int dir = CTINFO2DIR(ctinfo); 104 int dir = CTINFO2DIR(ctinfo);
104 int i; 105 int i;
105 __be16 port; 106 __be16 port;
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 4c0232842e75..ca57f47bbd25 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -44,8 +44,7 @@ adjust_tcp_sequence(u32 seq,
44 struct nf_nat_seq *this_way, *other_way; 44 struct nf_nat_seq *this_way, *other_way;
45 struct nf_conn_nat *nat = nfct_nat(ct); 45 struct nf_conn_nat *nat = nfct_nat(ct);
46 46
47 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n", 47 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n", seq, seq);
48 ntohl(seq), seq);
49 48
50 dir = CTINFO2DIR(ctinfo); 49 dir = CTINFO2DIR(ctinfo);
51 50
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index e63b944a2ebb..3a1e6d6afc0a 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -40,11 +40,11 @@ MODULE_ALIAS("ip_nat_pptp");
40static void pptp_nat_expected(struct nf_conn *ct, 40static void pptp_nat_expected(struct nf_conn *ct,
41 struct nf_conntrack_expect *exp) 41 struct nf_conntrack_expect *exp)
42{ 42{
43 struct nf_conn *master = ct->master; 43 const struct nf_conn *master = ct->master;
44 struct nf_conntrack_expect *other_exp; 44 struct nf_conntrack_expect *other_exp;
45 struct nf_conntrack_tuple t; 45 struct nf_conntrack_tuple t;
46 struct nf_ct_pptp_master *ct_pptp_info; 46 const struct nf_ct_pptp_master *ct_pptp_info;
47 struct nf_nat_pptp *nat_pptp_info; 47 const struct nf_nat_pptp *nat_pptp_info;
48 struct nf_nat_range range; 48 struct nf_nat_range range;
49 49
50 ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; 50 ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
@@ -186,7 +186,7 @@ static void
186pptp_exp_gre(struct nf_conntrack_expect *expect_orig, 186pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
187 struct nf_conntrack_expect *expect_reply) 187 struct nf_conntrack_expect *expect_reply)
188{ 188{
189 struct nf_conn *ct = expect_orig->master; 189 const struct nf_conn *ct = expect_orig->master;
190 struct nf_ct_pptp_master *ct_pptp_info; 190 struct nf_ct_pptp_master *ct_pptp_info;
191 struct nf_nat_pptp *nat_pptp_info; 191 struct nf_nat_pptp *nat_pptp_info;
192 192
@@ -217,7 +217,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
217 struct PptpControlHeader *ctlh, 217 struct PptpControlHeader *ctlh,
218 union pptp_ctrl_union *pptpReq) 218 union pptp_ctrl_union *pptpReq)
219{ 219{
220 struct nf_nat_pptp *nat_pptp_info; 220 const struct nf_nat_pptp *nat_pptp_info;
221 u_int16_t msg; 221 u_int16_t msg;
222 __be16 new_pcid; 222 __be16 new_pcid;
223 unsigned int pcid_off; 223 unsigned int pcid_off;
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index 9fa272e73113..a1e4da16da2e 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -59,7 +59,7 @@ static int
59gre_unique_tuple(struct nf_conntrack_tuple *tuple, 59gre_unique_tuple(struct nf_conntrack_tuple *tuple,
60 const struct nf_nat_range *range, 60 const struct nf_nat_range *range,
61 enum nf_nat_manip_type maniptype, 61 enum nf_nat_manip_type maniptype,
62 const struct nf_conn *conntrack) 62 const struct nf_conn *ct)
63{ 63{
64 static u_int16_t key; 64 static u_int16_t key;
65 __be16 *keyptr; 65 __be16 *keyptr;
@@ -67,7 +67,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
67 67
68 /* If there is no master conntrack we are not PPTP, 68 /* If there is no master conntrack we are not PPTP,
69 do not change tuples */ 69 do not change tuples */
70 if (!conntrack->master) 70 if (!ct->master)
71 return 0; 71 return 0;
72 72
73 if (maniptype == IP_NAT_MANIP_SRC) 73 if (maniptype == IP_NAT_MANIP_SRC)
@@ -76,7 +76,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
76 keyptr = &tuple->dst.u.gre.key; 76 keyptr = &tuple->dst.u.gre.key;
77 77
78 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { 78 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
79 pr_debug("%p: NATing GRE PPTP\n", conntrack); 79 pr_debug("%p: NATing GRE PPTP\n", ct);
80 min = 1; 80 min = 1;
81 range_size = 0xffff; 81 range_size = 0xffff;
82 } else { 82 } else {
@@ -88,11 +88,11 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
88 88
89 for (i = 0; i < range_size; i++, key++) { 89 for (i = 0; i < range_size; i++, key++) {
90 *keyptr = htons(min + key % range_size); 90 *keyptr = htons(min + key % range_size);
91 if (!nf_nat_used_tuple(tuple, conntrack)) 91 if (!nf_nat_used_tuple(tuple, ct))
92 return 1; 92 return 1;
93 } 93 }
94 94
95 pr_debug("%p: no NAT mapping\n", conntrack); 95 pr_debug("%p: no NAT mapping\n", ct);
96 return 0; 96 return 0;
97} 97}
98 98
@@ -104,7 +104,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
104{ 104{
105 struct gre_hdr *greh; 105 struct gre_hdr *greh;
106 struct gre_hdr_pptp *pgreh; 106 struct gre_hdr_pptp *pgreh;
107 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); 107 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
108 unsigned int hdroff = iphdroff + iph->ihl * 4; 108 unsigned int hdroff = iphdroff + iph->ihl * 4;
109 109
110 /* pgreh includes two optional 32bit fields which are not required 110 /* pgreh includes two optional 32bit fields which are not required
@@ -148,12 +148,12 @@ static const struct nf_nat_protocol gre = {
148#endif 148#endif
149}; 149};
150 150
151int __init nf_nat_proto_gre_init(void) 151static int __init nf_nat_proto_gre_init(void)
152{ 152{
153 return nf_nat_protocol_register(&gre); 153 return nf_nat_protocol_register(&gre);
154} 154}
155 155
156void __exit nf_nat_proto_gre_fini(void) 156static void __exit nf_nat_proto_gre_fini(void)
157{ 157{
158 nf_nat_protocol_unregister(&gre); 158 nf_nat_protocol_unregister(&gre);
159} 159}
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index a0e44c953cb6..03a02969aa57 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -57,7 +57,7 @@ icmp_manip_pkt(struct sk_buff *skb,
57 const struct nf_conntrack_tuple *tuple, 57 const struct nf_conntrack_tuple *tuple,
58 enum nf_nat_manip_type maniptype) 58 enum nf_nat_manip_type maniptype)
59{ 59{
60 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); 60 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
61 struct icmphdr *hdr; 61 struct icmphdr *hdr;
62 unsigned int hdroff = iphdroff + iph->ihl*4; 62 unsigned int hdroff = iphdroff + iph->ihl*4;
63 63
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index da23e9fbe679..ffd5d1589eca 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -93,7 +93,7 @@ tcp_manip_pkt(struct sk_buff *skb,
93 const struct nf_conntrack_tuple *tuple, 93 const struct nf_conntrack_tuple *tuple,
94 enum nf_nat_manip_type maniptype) 94 enum nf_nat_manip_type maniptype)
95{ 95{
96 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); 96 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
97 struct tcphdr *hdr; 97 struct tcphdr *hdr;
98 unsigned int hdroff = iphdroff + iph->ihl*4; 98 unsigned int hdroff = iphdroff + iph->ihl*4;
99 __be32 oldip, newip; 99 __be32 oldip, newip;
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index 10df4db078af..4b8f49910ff2 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -91,7 +91,7 @@ udp_manip_pkt(struct sk_buff *skb,
91 const struct nf_conntrack_tuple *tuple, 91 const struct nf_conntrack_tuple *tuple,
92 enum nf_nat_manip_type maniptype) 92 enum nf_nat_manip_type maniptype)
93{ 93{
94 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); 94 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
95 struct udphdr *hdr; 95 struct udphdr *hdr;
96 unsigned int hdroff = iphdroff + iph->ihl*4; 96 unsigned int hdroff = iphdroff + iph->ihl*4;
97 __be32 oldip, newip; 97 __be32 oldip, newip;
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 519182269e76..f8fda57ba20b 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -58,13 +58,14 @@ static struct
58 .term = IPT_ERROR_INIT, /* ERROR */ 58 .term = IPT_ERROR_INIT, /* ERROR */
59}; 59};
60 60
61static struct xt_table nat_table = { 61static struct xt_table __nat_table = {
62 .name = "nat", 62 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 63 .valid_hooks = NAT_VALID_HOOKS,
64 .lock = RW_LOCK_UNLOCKED, 64 .lock = RW_LOCK_UNLOCKED,
65 .me = THIS_MODULE, 65 .me = THIS_MODULE,
66 .af = AF_INET, 66 .af = AF_INET,
67}; 67};
68static struct xt_table *nat_table;
68 69
69/* Source NAT */ 70/* Source NAT */
70static unsigned int ipt_snat_target(struct sk_buff *skb, 71static unsigned int ipt_snat_target(struct sk_buff *skb,
@@ -214,7 +215,7 @@ int nf_nat_rule_find(struct sk_buff *skb,
214{ 215{
215 int ret; 216 int ret;
216 217
217 ret = ipt_do_table(skb, hooknum, in, out, &nat_table); 218 ret = ipt_do_table(skb, hooknum, in, out, nat_table);
218 219
219 if (ret == NF_ACCEPT) { 220 if (ret == NF_ACCEPT) {
220 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) 221 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
@@ -248,9 +249,10 @@ int __init nf_nat_rule_init(void)
248{ 249{
249 int ret; 250 int ret;
250 251
251 ret = ipt_register_table(&nat_table, &nat_initial_table.repl); 252 nat_table = ipt_register_table(&init_net, &__nat_table,
252 if (ret != 0) 253 &nat_initial_table.repl);
253 return ret; 254 if (IS_ERR(nat_table))
255 return PTR_ERR(nat_table);
254 ret = xt_register_target(&ipt_snat_reg); 256 ret = xt_register_target(&ipt_snat_reg);
255 if (ret != 0) 257 if (ret != 0)
256 goto unregister_table; 258 goto unregister_table;
@@ -264,7 +266,7 @@ int __init nf_nat_rule_init(void)
264 unregister_snat: 266 unregister_snat:
265 xt_unregister_target(&ipt_snat_reg); 267 xt_unregister_target(&ipt_snat_reg);
266 unregister_table: 268 unregister_table:
267 ipt_unregister_table(&nat_table); 269 ipt_unregister_table(nat_table);
268 270
269 return ret; 271 return ret;
270} 272}
@@ -273,5 +275,5 @@ void nf_nat_rule_cleanup(void)
273{ 275{
274 xt_unregister_target(&ipt_dnat_reg); 276 xt_unregister_target(&ipt_dnat_reg);
275 xt_unregister_target(&ipt_snat_reg); 277 xt_unregister_target(&ipt_snat_reg);
276 ipt_unregister_table(&nat_table); 278 ipt_unregister_table(nat_table);
277} 279}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 606a170bf4ca..b4c8d4968bb2 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -35,9 +35,9 @@ struct addr_map {
35 } addr[IP_CT_DIR_MAX]; 35 } addr[IP_CT_DIR_MAX];
36}; 36};
37 37
38static void addr_map_init(struct nf_conn *ct, struct addr_map *map) 38static void addr_map_init(const struct nf_conn *ct, struct addr_map *map)
39{ 39{
40 struct nf_conntrack_tuple *t; 40 const struct nf_conntrack_tuple *t;
41 enum ip_conntrack_dir dir; 41 enum ip_conntrack_dir dir;
42 unsigned int n; 42 unsigned int n;
43 43
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 07f2a49926d4..540ce6ae887c 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -260,7 +260,7 @@ static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
260{ 260{
261 unsigned char ch; 261 unsigned char ch;
262 262
263 if (eoc == 0) { 263 if (eoc == NULL) {
264 if (!asn1_octet_decode(ctx, &ch)) 264 if (!asn1_octet_decode(ctx, &ch))
265 return 0; 265 return 0;
266 266
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 1360a94766dd..b096e81500ae 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -24,7 +24,7 @@ static unsigned int help(struct sk_buff *skb,
24 enum ip_conntrack_info ctinfo, 24 enum ip_conntrack_info ctinfo,
25 struct nf_conntrack_expect *exp) 25 struct nf_conntrack_expect *exp)
26{ 26{
27 struct nf_conn *ct = exp->master; 27 const struct nf_conn *ct = exp->master;
28 28
29 exp->saved_proto.udp.port 29 exp->saved_proto.udp.port
30 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; 30 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 85c08696abbe..a3002fe65b7f 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -352,6 +352,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
352 skb_reserve(skb, hh_len); 352 skb_reserve(skb, hh_len);
353 353
354 skb->priority = sk->sk_priority; 354 skb->priority = sk->sk_priority;
355 skb->mark = sk->sk_mark;
355 skb->dst = dst_clone(&rt->u.dst); 356 skb->dst = dst_clone(&rt->u.dst);
356 357
357 skb_reset_network_header(skb); 358 skb_reset_network_header(skb);
@@ -544,6 +545,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
544 545
545 { 546 {
546 struct flowi fl = { .oif = ipc.oif, 547 struct flowi fl = { .oif = ipc.oif,
548 .mark = sk->sk_mark,
547 .nl_u = { .ip4_u = 549 .nl_u = { .ip4_u =
548 { .daddr = daddr, 550 { .daddr = daddr,
549 .saddr = saddr, 551 .saddr = saddr,
@@ -860,8 +862,7 @@ static struct sock *raw_get_first(struct seq_file *seq)
860 struct hlist_node *node; 862 struct hlist_node *node;
861 863
862 sk_for_each(sk, node, &state->h->ht[state->bucket]) 864 sk_for_each(sk, node, &state->h->ht[state->bucket])
863 if (sk->sk_net == state->p.net && 865 if (sk->sk_net == state->p.net)
864 sk->sk_family == state->family)
865 goto found; 866 goto found;
866 } 867 }
867 sk = NULL; 868 sk = NULL;
@@ -877,8 +878,7 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
877 sk = sk_next(sk); 878 sk = sk_next(sk);
878try_again: 879try_again:
879 ; 880 ;
880 } while (sk && sk->sk_net != state->p.net && 881 } while (sk && sk->sk_net != state->p.net);
881 sk->sk_family != state->family);
882 882
883 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { 883 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
884 sk = sk_head(&state->h->ht[state->bucket]); 884 sk = sk_head(&state->h->ht[state->bucket]);
@@ -927,7 +927,7 @@ void raw_seq_stop(struct seq_file *seq, void *v)
927} 927}
928EXPORT_SYMBOL_GPL(raw_seq_stop); 928EXPORT_SYMBOL_GPL(raw_seq_stop);
929 929
930static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i) 930static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
931{ 931{
932 struct inet_sock *inet = inet_sk(sp); 932 struct inet_sock *inet = inet_sk(sp);
933 __be32 dest = inet->daddr, 933 __be32 dest = inet->daddr,
@@ -935,33 +935,23 @@ static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
935 __u16 destp = 0, 935 __u16 destp = 0,
936 srcp = inet->num; 936 srcp = inet->num;
937 937
938 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" 938 seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
939 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d", 939 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d",
940 i, src, srcp, dest, destp, sp->sk_state, 940 i, src, srcp, dest, destp, sp->sk_state,
941 atomic_read(&sp->sk_wmem_alloc), 941 atomic_read(&sp->sk_wmem_alloc),
942 atomic_read(&sp->sk_rmem_alloc), 942 atomic_read(&sp->sk_rmem_alloc),
943 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 943 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
944 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 944 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
945 return tmpbuf;
946} 945}
947 946
948#define TMPSZ 128
949
950static int raw_seq_show(struct seq_file *seq, void *v) 947static int raw_seq_show(struct seq_file *seq, void *v)
951{ 948{
952 char tmpbuf[TMPSZ+1];
953
954 if (v == SEQ_START_TOKEN) 949 if (v == SEQ_START_TOKEN)
955 seq_printf(seq, "%-*s\n", TMPSZ-1, 950 seq_printf(seq, " sl local_address rem_address st tx_queue "
956 " sl local_address rem_address st tx_queue " 951 "rx_queue tr tm->when retrnsmt uid timeout "
957 "rx_queue tr tm->when retrnsmt uid timeout " 952 "inode drops\n");
958 "inode drops"); 953 else
959 else { 954 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
960 struct raw_iter_state *state = raw_seq_private(seq);
961
962 seq_printf(seq, "%-*s\n", TMPSZ-1,
963 get_raw_sock(v, tmpbuf, state->bucket));
964 }
965 return 0; 955 return 0;
966} 956}
967 957
@@ -972,27 +962,25 @@ static const struct seq_operations raw_seq_ops = {
972 .show = raw_seq_show, 962 .show = raw_seq_show,
973}; 963};
974 964
975int raw_seq_open(struct inode *ino, struct file *file, struct raw_hashinfo *h, 965int raw_seq_open(struct inode *ino, struct file *file,
976 unsigned short family) 966 struct raw_hashinfo *h, const struct seq_operations *ops)
977{ 967{
978 int err; 968 int err;
979 struct raw_iter_state *i; 969 struct raw_iter_state *i;
980 970
981 err = seq_open_net(ino, file, &raw_seq_ops, 971 err = seq_open_net(ino, file, ops, sizeof(struct raw_iter_state));
982 sizeof(struct raw_iter_state));
983 if (err < 0) 972 if (err < 0)
984 return err; 973 return err;
985 974
986 i = raw_seq_private((struct seq_file *)file->private_data); 975 i = raw_seq_private((struct seq_file *)file->private_data);
987 i->h = h; 976 i->h = h;
988 i->family = family;
989 return 0; 977 return 0;
990} 978}
991EXPORT_SYMBOL_GPL(raw_seq_open); 979EXPORT_SYMBOL_GPL(raw_seq_open);
992 980
993static int raw_v4_seq_open(struct inode *inode, struct file *file) 981static int raw_v4_seq_open(struct inode *inode, struct file *file)
994{ 982{
995 return raw_seq_open(inode, file, &raw_v4_hashinfo, PF_INET); 983 return raw_seq_open(inode, file, &raw_v4_hashinfo, &raw_seq_ops);
996} 984}
997 985
998static const struct file_operations raw_seq_fops = { 986static const struct file_operations raw_seq_fops = {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 896c768e41a2..8842ecb9be48 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -117,8 +117,6 @@
117 117
118#define RT_GC_TIMEOUT (300*HZ) 118#define RT_GC_TIMEOUT (300*HZ)
119 119
120static int ip_rt_min_delay = 2 * HZ;
121static int ip_rt_max_delay = 10 * HZ;
122static int ip_rt_max_size; 120static int ip_rt_max_size;
123static int ip_rt_gc_timeout = RT_GC_TIMEOUT; 121static int ip_rt_gc_timeout = RT_GC_TIMEOUT;
124static int ip_rt_gc_interval = 60 * HZ; 122static int ip_rt_gc_interval = 60 * HZ;
@@ -133,12 +131,9 @@ static int ip_rt_mtu_expires = 10 * 60 * HZ;
133static int ip_rt_min_pmtu = 512 + 20 + 20; 131static int ip_rt_min_pmtu = 512 + 20 + 20;
134static int ip_rt_min_advmss = 256; 132static int ip_rt_min_advmss = 256;
135static int ip_rt_secret_interval = 10 * 60 * HZ; 133static int ip_rt_secret_interval = 10 * 60 * HZ;
136static int ip_rt_flush_expected;
137static unsigned long rt_deadline;
138 134
139#define RTprint(a...) printk(KERN_DEBUG a) 135#define RTprint(a...) printk(KERN_DEBUG a)
140 136
141static struct timer_list rt_flush_timer;
142static void rt_worker_func(struct work_struct *work); 137static void rt_worker_func(struct work_struct *work);
143static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); 138static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
144static struct timer_list rt_secret_timer; 139static struct timer_list rt_secret_timer;
@@ -169,6 +164,7 @@ static struct dst_ops ipv4_dst_ops = {
169 .update_pmtu = ip_rt_update_pmtu, 164 .update_pmtu = ip_rt_update_pmtu,
170 .local_out = ip_local_out, 165 .local_out = ip_local_out,
171 .entry_size = sizeof(struct rtable), 166 .entry_size = sizeof(struct rtable),
167 .entries = ATOMIC_INIT(0),
172}; 168};
173 169
174#define ECN_OR_COST(class) TC_PRIO_##class 170#define ECN_OR_COST(class) TC_PRIO_##class
@@ -259,19 +255,16 @@ static inline void rt_hash_lock_init(void)
259static struct rt_hash_bucket *rt_hash_table; 255static struct rt_hash_bucket *rt_hash_table;
260static unsigned rt_hash_mask; 256static unsigned rt_hash_mask;
261static unsigned int rt_hash_log; 257static unsigned int rt_hash_log;
262static unsigned int rt_hash_rnd; 258static atomic_t rt_genid;
263 259
264static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 260static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
265#define RT_CACHE_STAT_INC(field) \ 261#define RT_CACHE_STAT_INC(field) \
266 (__raw_get_cpu_var(rt_cache_stat).field++) 262 (__raw_get_cpu_var(rt_cache_stat).field++)
267 263
268static int rt_intern_hash(unsigned hash, struct rtable *rth,
269 struct rtable **res);
270
271static unsigned int rt_hash_code(u32 daddr, u32 saddr) 264static unsigned int rt_hash_code(u32 daddr, u32 saddr)
272{ 265{
273 return (jhash_2words(daddr, saddr, rt_hash_rnd) 266 return jhash_2words(daddr, saddr, atomic_read(&rt_genid))
274 & rt_hash_mask); 267 & rt_hash_mask;
275} 268}
276 269
277#define rt_hash(daddr, saddr, idx) \ 270#define rt_hash(daddr, saddr, idx) \
@@ -281,27 +274,28 @@ static unsigned int rt_hash_code(u32 daddr, u32 saddr)
281#ifdef CONFIG_PROC_FS 274#ifdef CONFIG_PROC_FS
282struct rt_cache_iter_state { 275struct rt_cache_iter_state {
283 int bucket; 276 int bucket;
277 int genid;
284}; 278};
285 279
286static struct rtable *rt_cache_get_first(struct seq_file *seq) 280static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
287{ 281{
288 struct rtable *r = NULL; 282 struct rtable *r = NULL;
289 struct rt_cache_iter_state *st = seq->private;
290 283
291 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
292 rcu_read_lock_bh(); 285 rcu_read_lock_bh();
293 r = rt_hash_table[st->bucket].chain; 286 r = rcu_dereference(rt_hash_table[st->bucket].chain);
294 if (r) 287 while (r) {
295 break; 288 if (r->rt_genid == st->genid)
289 return r;
290 r = rcu_dereference(r->u.dst.rt_next);
291 }
296 rcu_read_unlock_bh(); 292 rcu_read_unlock_bh();
297 } 293 }
298 return rcu_dereference(r); 294 return r;
299} 295}
300 296
301static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r) 297static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct rtable *r)
302{ 298{
303 struct rt_cache_iter_state *st = seq->private;
304
305 r = r->u.dst.rt_next; 299 r = r->u.dst.rt_next;
306 while (!r) { 300 while (!r) {
307 rcu_read_unlock_bh(); 301 rcu_read_unlock_bh();
@@ -313,29 +307,38 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
313 return rcu_dereference(r); 307 return rcu_dereference(r);
314} 308}
315 309
316static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos) 310static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos)
317{ 311{
318 struct rtable *r = rt_cache_get_first(seq); 312 struct rtable *r = rt_cache_get_first(st);
319 313
320 if (r) 314 if (r)
321 while (pos && (r = rt_cache_get_next(seq, r))) 315 while (pos && (r = rt_cache_get_next(st, r))) {
316 if (r->rt_genid != st->genid)
317 continue;
322 --pos; 318 --pos;
319 }
323 return pos ? NULL : r; 320 return pos ? NULL : r;
324} 321}
325 322
326static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 323static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
327{ 324{
328 return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 325 struct rt_cache_iter_state *st = seq->private;
326
327 if (*pos)
328 return rt_cache_get_idx(st, *pos - 1);
329 st->genid = atomic_read(&rt_genid);
330 return SEQ_START_TOKEN;
329} 331}
330 332
331static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 333static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
332{ 334{
333 struct rtable *r = NULL; 335 struct rtable *r;
336 struct rt_cache_iter_state *st = seq->private;
334 337
335 if (v == SEQ_START_TOKEN) 338 if (v == SEQ_START_TOKEN)
336 r = rt_cache_get_first(seq); 339 r = rt_cache_get_first(st);
337 else 340 else
338 r = rt_cache_get_next(seq, v); 341 r = rt_cache_get_next(st, v);
339 ++*pos; 342 ++*pos;
340 return r; 343 return r;
341} 344}
@@ -708,6 +711,11 @@ static void rt_check_expire(void)
708 continue; 711 continue;
709 spin_lock_bh(rt_hash_lock_addr(i)); 712 spin_lock_bh(rt_hash_lock_addr(i));
710 while ((rth = *rthp) != NULL) { 713 while ((rth = *rthp) != NULL) {
714 if (rth->rt_genid != atomic_read(&rt_genid)) {
715 *rthp = rth->u.dst.rt_next;
716 rt_free(rth);
717 continue;
718 }
711 if (rth->u.dst.expires) { 719 if (rth->u.dst.expires) {
712 /* Entry is expired even if it is in use */ 720 /* Entry is expired even if it is in use */
713 if (time_before_eq(jiffies, rth->u.dst.expires)) { 721 if (time_before_eq(jiffies, rth->u.dst.expires)) {
@@ -732,83 +740,45 @@ static void rt_check_expire(void)
732 740
733/* 741/*
734 * rt_worker_func() is run in process context. 742 * rt_worker_func() is run in process context.
735 * If a whole flush was scheduled, it is done. 743 * we call rt_check_expire() to scan part of the hash table
736 * Else, we call rt_check_expire() to scan part of the hash table
737 */ 744 */
738static void rt_worker_func(struct work_struct *work) 745static void rt_worker_func(struct work_struct *work)
739{ 746{
740 if (ip_rt_flush_expected) { 747 rt_check_expire();
741 ip_rt_flush_expected = 0;
742 rt_do_flush(1);
743 } else
744 rt_check_expire();
745 schedule_delayed_work(&expires_work, ip_rt_gc_interval); 748 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
746} 749}
747 750
748/* This can run from both BH and non-BH contexts, the latter 751/*
749 * in the case of a forced flush event. 752 * Pertubation of rt_genid by a small quantity [1..256]
753 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
754 * many times (2^24) without giving recent rt_genid.
755 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
750 */ 756 */
751static void rt_run_flush(unsigned long process_context) 757static void rt_cache_invalidate(void)
752{ 758{
753 rt_deadline = 0; 759 unsigned char shuffle;
754
755 get_random_bytes(&rt_hash_rnd, 4);
756 760
757 rt_do_flush(process_context); 761 get_random_bytes(&shuffle, sizeof(shuffle));
762 atomic_add(shuffle + 1U, &rt_genid);
758} 763}
759 764
760static DEFINE_SPINLOCK(rt_flush_lock); 765/*
761 766 * delay < 0 : invalidate cache (fast : entries will be deleted later)
767 * delay >= 0 : invalidate & flush cache (can be long)
768 */
762void rt_cache_flush(int delay) 769void rt_cache_flush(int delay)
763{ 770{
764 unsigned long now = jiffies; 771 rt_cache_invalidate();
765 int user_mode = !in_softirq(); 772 if (delay >= 0)
766 773 rt_do_flush(!in_softirq());
767 if (delay < 0)
768 delay = ip_rt_min_delay;
769
770 spin_lock_bh(&rt_flush_lock);
771
772 if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
773 long tmo = (long)(rt_deadline - now);
774
775 /* If flush timer is already running
776 and flush request is not immediate (delay > 0):
777
778 if deadline is not achieved, prolongate timer to "delay",
779 otherwise fire it at deadline time.
780 */
781
782 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
783 tmo = 0;
784
785 if (delay > tmo)
786 delay = tmo;
787 }
788
789 if (delay <= 0) {
790 spin_unlock_bh(&rt_flush_lock);
791 rt_run_flush(user_mode);
792 return;
793 }
794
795 if (rt_deadline == 0)
796 rt_deadline = now + ip_rt_max_delay;
797
798 mod_timer(&rt_flush_timer, now+delay);
799 spin_unlock_bh(&rt_flush_lock);
800} 774}
801 775
802/* 776/*
803 * We change rt_hash_rnd and ask next rt_worker_func() invocation 777 * We change rt_genid and let gc do the cleanup
804 * to perform a flush in process context
805 */ 778 */
806static void rt_secret_rebuild(unsigned long dummy) 779static void rt_secret_rebuild(unsigned long dummy)
807{ 780{
808 get_random_bytes(&rt_hash_rnd, 4); 781 rt_cache_invalidate();
809 ip_rt_flush_expected = 1;
810 cancel_delayed_work(&expires_work);
811 schedule_delayed_work(&expires_work, HZ/10);
812 mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval); 782 mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval);
813} 783}
814 784
@@ -885,7 +855,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
885 rthp = &rt_hash_table[k].chain; 855 rthp = &rt_hash_table[k].chain;
886 spin_lock_bh(rt_hash_lock_addr(k)); 856 spin_lock_bh(rt_hash_lock_addr(k));
887 while ((rth = *rthp) != NULL) { 857 while ((rth = *rthp) != NULL) {
888 if (!rt_may_expire(rth, tmo, expire)) { 858 if (rth->rt_genid == atomic_read(&rt_genid) &&
859 !rt_may_expire(rth, tmo, expire)) {
889 tmo >>= 1; 860 tmo >>= 1;
890 rthp = &rth->u.dst.rt_next; 861 rthp = &rth->u.dst.rt_next;
891 continue; 862 continue;
@@ -966,6 +937,11 @@ restart:
966 937
967 spin_lock_bh(rt_hash_lock_addr(hash)); 938 spin_lock_bh(rt_hash_lock_addr(hash));
968 while ((rth = *rthp) != NULL) { 939 while ((rth = *rthp) != NULL) {
940 if (rth->rt_genid != atomic_read(&rt_genid)) {
941 *rthp = rth->u.dst.rt_next;
942 rt_free(rth);
943 continue;
944 }
969 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { 945 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
970 /* Put it first */ 946 /* Put it first */
971 *rthp = rth->u.dst.rt_next; 947 *rthp = rth->u.dst.rt_next;
@@ -1131,17 +1107,19 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1131 1107
1132static void rt_del(unsigned hash, struct rtable *rt) 1108static void rt_del(unsigned hash, struct rtable *rt)
1133{ 1109{
1134 struct rtable **rthp; 1110 struct rtable **rthp, *aux;
1135 1111
1112 rthp = &rt_hash_table[hash].chain;
1136 spin_lock_bh(rt_hash_lock_addr(hash)); 1113 spin_lock_bh(rt_hash_lock_addr(hash));
1137 ip_rt_put(rt); 1114 ip_rt_put(rt);
1138 for (rthp = &rt_hash_table[hash].chain; *rthp; 1115 while ((aux = *rthp) != NULL) {
1139 rthp = &(*rthp)->u.dst.rt_next) 1116 if (aux == rt || (aux->rt_genid != atomic_read(&rt_genid))) {
1140 if (*rthp == rt) { 1117 *rthp = aux->u.dst.rt_next;
1141 *rthp = rt->u.dst.rt_next; 1118 rt_free(aux);
1142 rt_free(rt); 1119 continue;
1143 break;
1144 } 1120 }
1121 rthp = &aux->u.dst.rt_next;
1122 }
1145 spin_unlock_bh(rt_hash_lock_addr(hash)); 1123 spin_unlock_bh(rt_hash_lock_addr(hash));
1146} 1124}
1147 1125
@@ -1186,7 +1164,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1186 if (rth->fl.fl4_dst != daddr || 1164 if (rth->fl.fl4_dst != daddr ||
1187 rth->fl.fl4_src != skeys[i] || 1165 rth->fl.fl4_src != skeys[i] ||
1188 rth->fl.oif != ikeys[k] || 1166 rth->fl.oif != ikeys[k] ||
1189 rth->fl.iif != 0) { 1167 rth->fl.iif != 0 ||
1168 rth->rt_genid != atomic_read(&rt_genid)) {
1190 rthp = &rth->u.dst.rt_next; 1169 rthp = &rth->u.dst.rt_next;
1191 continue; 1170 continue;
1192 } 1171 }
@@ -1224,7 +1203,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1224 rt->u.dst.neighbour = NULL; 1203 rt->u.dst.neighbour = NULL;
1225 rt->u.dst.hh = NULL; 1204 rt->u.dst.hh = NULL;
1226 rt->u.dst.xfrm = NULL; 1205 rt->u.dst.xfrm = NULL;
1227 1206 rt->rt_genid = atomic_read(&rt_genid);
1228 rt->rt_flags |= RTCF_REDIRECTED; 1207 rt->rt_flags |= RTCF_REDIRECTED;
1229 1208
1230 /* Gateway is different ... */ 1209 /* Gateway is different ... */
@@ -1445,7 +1424,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1445 rth->rt_src == iph->saddr && 1424 rth->rt_src == iph->saddr &&
1446 rth->fl.iif == 0 && 1425 rth->fl.iif == 0 &&
1447 !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) && 1426 !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) &&
1448 rth->u.dst.dev->nd_net == net) { 1427 rth->u.dst.dev->nd_net == net &&
1428 rth->rt_genid == atomic_read(&rt_genid)) {
1449 unsigned short mtu = new_mtu; 1429 unsigned short mtu = new_mtu;
1450 1430
1451 if (new_mtu < 68 || new_mtu >= old_mtu) { 1431 if (new_mtu < 68 || new_mtu >= old_mtu) {
@@ -1680,8 +1660,9 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1680 rth->fl.oif = 0; 1660 rth->fl.oif = 0;
1681 rth->rt_gateway = daddr; 1661 rth->rt_gateway = daddr;
1682 rth->rt_spec_dst= spec_dst; 1662 rth->rt_spec_dst= spec_dst;
1683 rth->rt_type = RTN_MULTICAST; 1663 rth->rt_genid = atomic_read(&rt_genid);
1684 rth->rt_flags = RTCF_MULTICAST; 1664 rth->rt_flags = RTCF_MULTICAST;
1665 rth->rt_type = RTN_MULTICAST;
1685 if (our) { 1666 if (our) {
1686 rth->u.dst.input= ip_local_deliver; 1667 rth->u.dst.input= ip_local_deliver;
1687 rth->rt_flags |= RTCF_LOCAL; 1668 rth->rt_flags |= RTCF_LOCAL;
@@ -1820,6 +1801,7 @@ static inline int __mkroute_input(struct sk_buff *skb,
1820 1801
1821 rth->u.dst.input = ip_forward; 1802 rth->u.dst.input = ip_forward;
1822 rth->u.dst.output = ip_output; 1803 rth->u.dst.output = ip_output;
1804 rth->rt_genid = atomic_read(&rt_genid);
1823 1805
1824 rt_set_nexthop(rth, res, itag); 1806 rt_set_nexthop(rth, res, itag);
1825 1807
@@ -1980,6 +1962,7 @@ local_input:
1980 goto e_nobufs; 1962 goto e_nobufs;
1981 1963
1982 rth->u.dst.output= ip_rt_bug; 1964 rth->u.dst.output= ip_rt_bug;
1965 rth->rt_genid = atomic_read(&rt_genid);
1983 1966
1984 atomic_set(&rth->u.dst.__refcnt, 1); 1967 atomic_set(&rth->u.dst.__refcnt, 1);
1985 rth->u.dst.flags= DST_HOST; 1968 rth->u.dst.flags= DST_HOST;
@@ -2071,7 +2054,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2071 rth->fl.oif == 0 && 2054 rth->fl.oif == 0 &&
2072 rth->fl.mark == skb->mark && 2055 rth->fl.mark == skb->mark &&
2073 rth->fl.fl4_tos == tos && 2056 rth->fl.fl4_tos == tos &&
2074 rth->u.dst.dev->nd_net == net) { 2057 rth->u.dst.dev->nd_net == net &&
2058 rth->rt_genid == atomic_read(&rt_genid)) {
2075 dst_use(&rth->u.dst, jiffies); 2059 dst_use(&rth->u.dst, jiffies);
2076 RT_CACHE_STAT_INC(in_hit); 2060 RT_CACHE_STAT_INC(in_hit);
2077 rcu_read_unlock(); 2061 rcu_read_unlock();
@@ -2199,6 +2183,7 @@ static inline int __mkroute_output(struct rtable **result,
2199 rth->rt_spec_dst= fl->fl4_src; 2183 rth->rt_spec_dst= fl->fl4_src;
2200 2184
2201 rth->u.dst.output=ip_output; 2185 rth->u.dst.output=ip_output;
2186 rth->rt_genid = atomic_read(&rt_genid);
2202 2187
2203 RT_CACHE_STAT_INC(out_slow_tot); 2188 RT_CACHE_STAT_INC(out_slow_tot);
2204 2189
@@ -2471,7 +2456,8 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2471 rth->fl.mark == flp->mark && 2456 rth->fl.mark == flp->mark &&
2472 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2457 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2473 (IPTOS_RT_MASK | RTO_ONLINK)) && 2458 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2474 rth->u.dst.dev->nd_net == net) { 2459 rth->u.dst.dev->nd_net == net &&
2460 rth->rt_genid == atomic_read(&rt_genid)) {
2475 dst_use(&rth->u.dst, jiffies); 2461 dst_use(&rth->u.dst, jiffies);
2476 RT_CACHE_STAT_INC(out_hit); 2462 RT_CACHE_STAT_INC(out_hit);
2477 rcu_read_unlock_bh(); 2463 rcu_read_unlock_bh();
@@ -2498,6 +2484,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2498 .check = ipv4_dst_check, 2484 .check = ipv4_dst_check,
2499 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2485 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2500 .entry_size = sizeof(struct rtable), 2486 .entry_size = sizeof(struct rtable),
2487 .entries = ATOMIC_INIT(0),
2501}; 2488};
2502 2489
2503 2490
@@ -2525,6 +2512,7 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock
2525 rt->idev = ort->idev; 2512 rt->idev = ort->idev;
2526 if (rt->idev) 2513 if (rt->idev)
2527 in_dev_hold(rt->idev); 2514 in_dev_hold(rt->idev);
2515 rt->rt_genid = atomic_read(&rt_genid);
2528 rt->rt_flags = ort->rt_flags; 2516 rt->rt_flags = ort->rt_flags;
2529 rt->rt_type = ort->rt_type; 2517 rt->rt_type = ort->rt_type;
2530 rt->rt_dst = ort->rt_dst; 2518 rt->rt_dst = ort->rt_dst;
@@ -2779,6 +2767,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2779 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2767 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2780 if (idx < s_idx) 2768 if (idx < s_idx)
2781 continue; 2769 continue;
2770 if (rt->rt_genid != atomic_read(&rt_genid))
2771 continue;
2782 skb->dst = dst_clone(&rt->u.dst); 2772 skb->dst = dst_clone(&rt->u.dst);
2783 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 2773 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2784 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 2774 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
@@ -2848,24 +2838,6 @@ ctl_table ipv4_route_table[] = {
2848 .strategy = &ipv4_sysctl_rtcache_flush_strategy, 2838 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
2849 }, 2839 },
2850 { 2840 {
2851 .ctl_name = NET_IPV4_ROUTE_MIN_DELAY,
2852 .procname = "min_delay",
2853 .data = &ip_rt_min_delay,
2854 .maxlen = sizeof(int),
2855 .mode = 0644,
2856 .proc_handler = &proc_dointvec_jiffies,
2857 .strategy = &sysctl_jiffies,
2858 },
2859 {
2860 .ctl_name = NET_IPV4_ROUTE_MAX_DELAY,
2861 .procname = "max_delay",
2862 .data = &ip_rt_max_delay,
2863 .maxlen = sizeof(int),
2864 .mode = 0644,
2865 .proc_handler = &proc_dointvec_jiffies,
2866 .strategy = &sysctl_jiffies,
2867 },
2868 {
2869 .ctl_name = NET_IPV4_ROUTE_GC_THRESH, 2841 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2870 .procname = "gc_thresh", 2842 .procname = "gc_thresh",
2871 .data = &ipv4_dst_ops.gc_thresh, 2843 .data = &ipv4_dst_ops.gc_thresh,
@@ -3023,8 +2995,8 @@ int __init ip_rt_init(void)
3023{ 2995{
3024 int rc = 0; 2996 int rc = 0;
3025 2997
3026 rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^ 2998 atomic_set(&rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^
3027 (jiffies ^ (jiffies >> 7))); 2999 (jiffies ^ (jiffies >> 7))));
3028 3000
3029#ifdef CONFIG_NET_CLS_ROUTE 3001#ifdef CONFIG_NET_CLS_ROUTE
3030 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); 3002 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
@@ -3057,7 +3029,6 @@ int __init ip_rt_init(void)
3057 devinet_init(); 3029 devinet_init();
3058 ip_fib_init(); 3030 ip_fib_init();
3059 3031
3060 setup_timer(&rt_flush_timer, rt_run_flush, 0);
3061 setup_timer(&rt_secret_timer, rt_secret_rebuild, 0); 3032 setup_timer(&rt_secret_timer, rt_secret_rebuild, 0);
3062 3033
3063 /* All the timers, started at system startup tend 3034 /* All the timers, started at system startup tend
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 82cdf23837e3..88286f35d1e2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -185,7 +185,7 @@ static int strategy_allowed_congestion_control(ctl_table *table, int __user *nam
185 185
186 tcp_get_available_congestion_control(tbl.data, tbl.maxlen); 186 tcp_get_available_congestion_control(tbl.data, tbl.maxlen);
187 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen); 187 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen);
188 if (ret == 0 && newval && newlen) 188 if (ret == 1 && newval && newlen)
189 ret = tcp_set_allowed_congestion_control(tbl.data); 189 ret = tcp_set_allowed_congestion_control(tbl.data);
190 kfree(tbl.data); 190 kfree(tbl.data);
191 191
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fa2c85ca5bc3..19c449f62672 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2153,7 +2153,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit)
2153 tp->lost_skb_hint = skb; 2153 tp->lost_skb_hint = skb;
2154 tp->lost_cnt_hint = cnt; 2154 tp->lost_cnt_hint = cnt;
2155 2155
2156 if (tcp_is_fack(tp) || 2156 if (tcp_is_fack(tp) || tcp_is_reno(tp) ||
2157 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2157 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2158 cnt += tcp_skb_pcount(skb); 2158 cnt += tcp_skb_pcount(skb);
2159 2159
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9aea88b8d4fc..77c1939a2b0d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -369,8 +369,8 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
369 return; 369 return;
370 } 370 }
371 371
372 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr, 372 sk = inet_lookup(skb->dev->nd_net, &tcp_hashinfo, iph->daddr, th->dest,
373 th->source, inet_iif(skb)); 373 iph->saddr, th->source, inet_iif(skb));
374 if (!sk) { 374 if (!sk) {
375 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 375 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
376 return; 376 return;
@@ -1503,8 +1503,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1503 if (req) 1503 if (req)
1504 return tcp_check_req(sk, skb, req, prev); 1504 return tcp_check_req(sk, skb, req, prev);
1505 1505
1506 nsk = inet_lookup_established(&tcp_hashinfo, iph->saddr, th->source, 1506 nsk = inet_lookup_established(sk->sk_net, &tcp_hashinfo, iph->saddr,
1507 iph->daddr, th->dest, inet_iif(skb)); 1507 th->source, iph->daddr, th->dest, inet_iif(skb));
1508 1508
1509 if (nsk) { 1509 if (nsk) {
1510 if (nsk->sk_state != TCP_TIME_WAIT) { 1510 if (nsk->sk_state != TCP_TIME_WAIT) {
@@ -1661,8 +1661,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
1661 TCP_SKB_CB(skb)->flags = iph->tos; 1661 TCP_SKB_CB(skb)->flags = iph->tos;
1662 TCP_SKB_CB(skb)->sacked = 0; 1662 TCP_SKB_CB(skb)->sacked = 0;
1663 1663
1664 sk = __inet_lookup(&tcp_hashinfo, iph->saddr, th->source, 1664 sk = __inet_lookup(skb->dev->nd_net, &tcp_hashinfo, iph->saddr,
1665 iph->daddr, th->dest, inet_iif(skb)); 1665 th->source, iph->daddr, th->dest, inet_iif(skb));
1666 if (!sk) 1666 if (!sk)
1667 goto no_tcp_socket; 1667 goto no_tcp_socket;
1668 1668
@@ -1735,7 +1735,8 @@ do_time_wait:
1735 } 1735 }
1736 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1736 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1737 case TCP_TW_SYN: { 1737 case TCP_TW_SYN: {
1738 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, 1738 struct sock *sk2 = inet_lookup_listener(skb->dev->nd_net,
1739 &tcp_hashinfo,
1739 iph->daddr, th->dest, 1740 iph->daddr, th->dest,
1740 inet_iif(skb)); 1741 inet_iif(skb));
1741 if (sk2) { 1742 if (sk2) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 89f0188885c7..ed750f9ceb07 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2564,5 +2564,4 @@ EXPORT_SYMBOL(tcp_connect);
2564EXPORT_SYMBOL(tcp_make_synack); 2564EXPORT_SYMBOL(tcp_make_synack);
2565EXPORT_SYMBOL(tcp_simple_retransmit); 2565EXPORT_SYMBOL(tcp_simple_retransmit);
2566EXPORT_SYMBOL(tcp_sync_mss); 2566EXPORT_SYMBOL(tcp_sync_mss);
2567EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
2568EXPORT_SYMBOL(tcp_mtup_init); 2567EXPORT_SYMBOL(tcp_mtup_init);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 2fb8d731026b..7ea1b67b6de1 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -130,14 +130,14 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
130atomic_t udp_memory_allocated; 130atomic_t udp_memory_allocated;
131EXPORT_SYMBOL(udp_memory_allocated); 131EXPORT_SYMBOL(udp_memory_allocated);
132 132
133static inline int __udp_lib_lport_inuse(__u16 num, 133static inline int __udp_lib_lport_inuse(struct net *net, __u16 num,
134 const struct hlist_head udptable[]) 134 const struct hlist_head udptable[])
135{ 135{
136 struct sock *sk; 136 struct sock *sk;
137 struct hlist_node *node; 137 struct hlist_node *node;
138 138
139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
140 if (sk->sk_hash == num) 140 if (sk->sk_net == net && sk->sk_hash == num)
141 return 1; 141 return 1;
142 return 0; 142 return 0;
143} 143}
@@ -159,6 +159,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
159 struct hlist_head *head; 159 struct hlist_head *head;
160 struct sock *sk2; 160 struct sock *sk2;
161 int error = 1; 161 int error = 1;
162 struct net *net = sk->sk_net;
162 163
163 write_lock_bh(&udp_hash_lock); 164 write_lock_bh(&udp_hash_lock);
164 165
@@ -198,7 +199,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
198 /* 2nd pass: find hole in shortest hash chain */ 199 /* 2nd pass: find hole in shortest hash chain */
199 rover = best; 200 rover = best;
200 for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) { 201 for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) {
201 if (! __udp_lib_lport_inuse(rover, udptable)) 202 if (! __udp_lib_lport_inuse(net, rover, udptable))
202 goto gotit; 203 goto gotit;
203 rover += UDP_HTABLE_SIZE; 204 rover += UDP_HTABLE_SIZE;
204 if (rover > high) 205 if (rover > high)
@@ -218,6 +219,7 @@ gotit:
218 sk_for_each(sk2, node, head) 219 sk_for_each(sk2, node, head)
219 if (sk2->sk_hash == snum && 220 if (sk2->sk_hash == snum &&
220 sk2 != sk && 221 sk2 != sk &&
222 sk2->sk_net == net &&
221 (!sk2->sk_reuse || !sk->sk_reuse) && 223 (!sk2->sk_reuse || !sk->sk_reuse) &&
222 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 224 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
223 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 225 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
@@ -261,9 +263,9 @@ static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
261/* UDP is nearly always wildcards out the wazoo, it makes no sense to try 263/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
262 * harder than this. -DaveM 264 * harder than this. -DaveM
263 */ 265 */
264static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport, 266static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
265 __be32 daddr, __be16 dport, 267 __be16 sport, __be32 daddr, __be16 dport,
266 int dif, struct hlist_head udptable[]) 268 int dif, struct hlist_head udptable[])
267{ 269{
268 struct sock *sk, *result = NULL; 270 struct sock *sk, *result = NULL;
269 struct hlist_node *node; 271 struct hlist_node *node;
@@ -274,7 +276,8 @@ static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
274 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 276 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
275 struct inet_sock *inet = inet_sk(sk); 277 struct inet_sock *inet = inet_sk(sk);
276 278
277 if (sk->sk_hash == hnum && !ipv6_only_sock(sk)) { 279 if (sk->sk_net == net && sk->sk_hash == hnum &&
280 !ipv6_only_sock(sk)) {
278 int score = (sk->sk_family == PF_INET ? 1 : 0); 281 int score = (sk->sk_family == PF_INET ? 1 : 0);
279 if (inet->rcv_saddr) { 282 if (inet->rcv_saddr) {
280 if (inet->rcv_saddr != daddr) 283 if (inet->rcv_saddr != daddr)
@@ -361,8 +364,8 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
361 int harderr; 364 int harderr;
362 int err; 365 int err;
363 366
364 sk = __udp4_lib_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, 367 sk = __udp4_lib_lookup(skb->dev->nd_net, iph->daddr, uh->dest,
365 skb->dev->ifindex, udptable ); 368 iph->saddr, uh->source, skb->dev->ifindex, udptable);
366 if (sk == NULL) { 369 if (sk == NULL) {
367 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 370 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
368 return; /* No socket for error */ 371 return; /* No socket for error */
@@ -1185,8 +1188,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1185 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1188 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1186 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1189 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
1187 1190
1188 sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, 1191 sk = __udp4_lib_lookup(skb->dev->nd_net, saddr, uh->source, daddr,
1189 inet_iif(skb), udptable); 1192 uh->dest, inet_iif(skb), udptable);
1190 1193
1191 if (sk != NULL) { 1194 if (sk != NULL) {
1192 int ret = 0; 1195 int ret = 0;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 3783e3ee56a4..10ed70491434 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -247,6 +247,7 @@ static struct dst_ops xfrm4_dst_ops = {
247 .local_out = __ip_local_out, 247 .local_out = __ip_local_out,
248 .gc_thresh = 1024, 248 .gc_thresh = 1024,
249 .entry_size = sizeof(struct xfrm_dst), 249 .entry_size = sizeof(struct xfrm_dst),
250 .entries = ATOMIC_INIT(0),
250}; 251};
251 252
252static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { 253static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 326845195620..41f5982d2087 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -38,7 +38,7 @@ static void ipip_destroy(struct xfrm_state *x)
38{ 38{
39} 39}
40 40
41static struct xfrm_type ipip_type = { 41static const struct xfrm_type ipip_type = {
42 .description = "IPIP", 42 .description = "IPIP",
43 .owner = THIS_MODULE, 43 .owner = THIS_MODULE,
44 .proto = IPPROTO_IPIP, 44 .proto = IPPROTO_IPIP,
@@ -50,7 +50,7 @@ static struct xfrm_type ipip_type = {
50 50
51static int xfrm_tunnel_rcv(struct sk_buff *skb) 51static int xfrm_tunnel_rcv(struct sk_buff *skb)
52{ 52{
53 return xfrm4_rcv_spi(skb, IPPROTO_IP, ip_hdr(skb)->saddr); 53 return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr);
54} 54}
55 55
56static int xfrm_tunnel_err(struct sk_buff *skb, u32 info) 56static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index eb0b8085949b..3ffb0323668c 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -85,6 +85,7 @@ config INET6_ESP
85 depends on IPV6 85 depends on IPV6
86 select XFRM 86 select XFRM
87 select CRYPTO 87 select CRYPTO
88 select CRYPTO_AEAD
88 select CRYPTO_HMAC 89 select CRYPTO_HMAC
89 select CRYPTO_MD5 90 select CRYPTO_MD5
90 select CRYPTO_CBC 91 select CRYPTO_CBC
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index fb0d07a15e93..379c8e04c36c 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -515,7 +515,7 @@ static void ah6_destroy(struct xfrm_state *x)
515 kfree(ahp); 515 kfree(ahp);
516} 516}
517 517
518static struct xfrm_type ah6_type = 518static const struct xfrm_type ah6_type =
519{ 519{
520 .description = "AH6", 520 .description = "AH6",
521 .owner = THIS_MODULE, 521 .owner = THIS_MODULE,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5bd5292ad9fa..8e0f1428c716 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -24,33 +24,124 @@
24 * This file is derived from net/ipv4/esp.c 24 * This file is derived from net/ipv4/esp.c
25 */ 25 */
26 26
27#include <crypto/aead.h>
28#include <crypto/authenc.h>
27#include <linux/err.h> 29#include <linux/err.h>
28#include <linux/module.h> 30#include <linux/module.h>
29#include <net/ip.h> 31#include <net/ip.h>
30#include <net/xfrm.h> 32#include <net/xfrm.h>
31#include <net/esp.h> 33#include <net/esp.h>
32#include <linux/scatterlist.h> 34#include <linux/scatterlist.h>
33#include <linux/crypto.h>
34#include <linux/kernel.h> 35#include <linux/kernel.h>
35#include <linux/pfkeyv2.h> 36#include <linux/pfkeyv2.h>
36#include <linux/random.h> 37#include <linux/random.h>
38#include <linux/slab.h>
37#include <linux/spinlock.h> 39#include <linux/spinlock.h>
38#include <net/icmp.h> 40#include <net/icmp.h>
39#include <net/ipv6.h> 41#include <net/ipv6.h>
40#include <net/protocol.h> 42#include <net/protocol.h>
41#include <linux/icmpv6.h> 43#include <linux/icmpv6.h>
42 44
45struct esp_skb_cb {
46 struct xfrm_skb_cb xfrm;
47 void *tmp;
48};
49
50#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
51
52/*
53 * Allocate an AEAD request structure with extra space for SG and IV.
54 *
55 * For alignment considerations the IV is placed at the front, followed
56 * by the request and finally the SG list.
57 *
58 * TODO: Use spare space in skb for this where possible.
59 */
60static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
61{
62 unsigned int len;
63
64 len = crypto_aead_ivsize(aead);
65 if (len) {
66 len += crypto_aead_alignmask(aead) &
67 ~(crypto_tfm_ctx_alignment() - 1);
68 len = ALIGN(len, crypto_tfm_ctx_alignment());
69 }
70
71 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
72 len = ALIGN(len, __alignof__(struct scatterlist));
73
74 len += sizeof(struct scatterlist) * nfrags;
75
76 return kmalloc(len, GFP_ATOMIC);
77}
78
79static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
80{
81 return crypto_aead_ivsize(aead) ?
82 PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
83}
84
85static inline struct aead_givcrypt_request *esp_tmp_givreq(
86 struct crypto_aead *aead, u8 *iv)
87{
88 struct aead_givcrypt_request *req;
89
90 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
91 crypto_tfm_ctx_alignment());
92 aead_givcrypt_set_tfm(req, aead);
93 return req;
94}
95
96static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
97{
98 struct aead_request *req;
99
100 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
101 crypto_tfm_ctx_alignment());
102 aead_request_set_tfm(req, aead);
103 return req;
104}
105
106static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
107 struct aead_request *req)
108{
109 return (void *)ALIGN((unsigned long)(req + 1) +
110 crypto_aead_reqsize(aead),
111 __alignof__(struct scatterlist));
112}
113
114static inline struct scatterlist *esp_givreq_sg(
115 struct crypto_aead *aead, struct aead_givcrypt_request *req)
116{
117 return (void *)ALIGN((unsigned long)(req + 1) +
118 crypto_aead_reqsize(aead),
119 __alignof__(struct scatterlist));
120}
121
122static void esp_output_done(struct crypto_async_request *base, int err)
123{
124 struct sk_buff *skb = base->data;
125
126 kfree(ESP_SKB_CB(skb)->tmp);
127 xfrm_output_resume(skb, err);
128}
129
43static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) 130static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
44{ 131{
45 int err; 132 int err;
46 struct ip_esp_hdr *esph; 133 struct ip_esp_hdr *esph;
47 struct crypto_blkcipher *tfm; 134 struct crypto_aead *aead;
48 struct blkcipher_desc desc; 135 struct aead_givcrypt_request *req;
136 struct scatterlist *sg;
137 struct scatterlist *asg;
49 struct sk_buff *trailer; 138 struct sk_buff *trailer;
139 void *tmp;
50 int blksize; 140 int blksize;
51 int clen; 141 int clen;
52 int alen; 142 int alen;
53 int nfrags; 143 int nfrags;
144 u8 *iv;
54 u8 *tail; 145 u8 *tail;
55 struct esp_data *esp = x->data; 146 struct esp_data *esp = x->data;
56 147
@@ -60,18 +151,26 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
60 /* Round to block size */ 151 /* Round to block size */
61 clen = skb->len; 152 clen = skb->len;
62 153
63 alen = esp->auth.icv_trunc_len; 154 aead = esp->aead;
64 tfm = esp->conf.tfm; 155 alen = crypto_aead_authsize(aead);
65 desc.tfm = tfm; 156
66 desc.flags = 0; 157 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
67 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
68 clen = ALIGN(clen + 2, blksize); 158 clen = ALIGN(clen + 2, blksize);
69 if (esp->conf.padlen) 159 if (esp->padlen)
70 clen = ALIGN(clen, esp->conf.padlen); 160 clen = ALIGN(clen, esp->padlen);
71 161
72 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { 162 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
73 goto error; 163 goto error;
74 } 164 nfrags = err;
165
166 tmp = esp_alloc_tmp(aead, nfrags + 1);
167 if (!tmp)
168 goto error;
169
170 iv = esp_tmp_iv(aead, tmp);
171 req = esp_tmp_givreq(aead, iv);
172 asg = esp_givreq_sg(aead, req);
173 sg = asg + 1;
75 174
76 /* Fill padding... */ 175 /* Fill padding... */
77 tail = skb_tail_pointer(trailer); 176 tail = skb_tail_pointer(trailer);
@@ -81,86 +180,113 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
81 tail[i] = i + 1; 180 tail[i] = i + 1;
82 } while (0); 181 } while (0);
83 tail[clen-skb->len - 2] = (clen - skb->len) - 2; 182 tail[clen-skb->len - 2] = (clen - skb->len) - 2;
84 pskb_put(skb, trailer, clen - skb->len); 183 tail[clen - skb->len - 1] = *skb_mac_header(skb);
184 pskb_put(skb, trailer, clen - skb->len + alen);
85 185
86 skb_push(skb, -skb_network_offset(skb)); 186 skb_push(skb, -skb_network_offset(skb));
87 esph = ip_esp_hdr(skb); 187 esph = ip_esp_hdr(skb);
88 *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb);
89 *skb_mac_header(skb) = IPPROTO_ESP; 188 *skb_mac_header(skb) = IPPROTO_ESP;
90 189
91 esph->spi = x->id.spi; 190 esph->spi = x->id.spi;
92 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); 191 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
93 192
94 spin_lock_bh(&x->lock); 193 sg_init_table(sg, nfrags);
194 skb_to_sgvec(skb, sg,
195 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
196 clen + alen);
197 sg_init_one(asg, esph, sizeof(*esph));
95 198
96 if (esp->conf.ivlen) { 199 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
97 if (unlikely(!esp->conf.ivinitted)) { 200 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
98 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 201 aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
99 esp->conf.ivinitted = 1; 202 aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
100 }
101 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
102 }
103 203
104 do { 204 ESP_SKB_CB(skb)->tmp = tmp;
105 struct scatterlist *sg = &esp->sgbuf[0]; 205 err = crypto_aead_givencrypt(req);
206 if (err == -EINPROGRESS)
207 goto error;
106 208
107 if (unlikely(nfrags > ESP_NUM_FAST_SG)) { 209 if (err == -EBUSY)
108 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); 210 err = NET_XMIT_DROP;
109 if (!sg) 211
110 goto unlock; 212 kfree(tmp);
111 } 213
112 sg_init_table(sg, nfrags); 214error:
113 skb_to_sgvec(skb, sg, 215 return err;
114 esph->enc_data + 216}
115 esp->conf.ivlen - 217
116 skb->data, clen); 218static int esp_input_done2(struct sk_buff *skb, int err)
117 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 219{
118 if (unlikely(sg != &esp->sgbuf[0])) 220 struct xfrm_state *x = xfrm_input_state(skb);
119 kfree(sg); 221 struct esp_data *esp = x->data;
120 } while (0); 222 struct crypto_aead *aead = esp->aead;
223 int alen = crypto_aead_authsize(aead);
224 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
225 int elen = skb->len - hlen;
226 int hdr_len = skb_network_header_len(skb);
227 int padlen;
228 u8 nexthdr[2];
229
230 kfree(ESP_SKB_CB(skb)->tmp);
121 231
122 if (unlikely(err)) 232 if (unlikely(err))
123 goto unlock; 233 goto out;
124 234
125 if (esp->conf.ivlen) { 235 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
126 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); 236 BUG();
127 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
128 }
129 237
130 if (esp->auth.icv_full_len) { 238 err = -EINVAL;
131 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, 239 padlen = nexthdr[0];
132 sizeof(*esph) + esp->conf.ivlen + clen); 240 if (padlen + 2 + alen >= elen) {
133 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); 241 LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
242 "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
243 goto out;
134 } 244 }
135 245
136unlock: 246 /* ... check padding bits here. Silly. :-) */
137 spin_unlock_bh(&x->lock);
138 247
139error: 248 pskb_trim(skb, skb->len - alen - padlen - 2);
249 __skb_pull(skb, hlen);
250 skb_set_transport_header(skb, -hdr_len);
251
252 err = nexthdr[1];
253
254 /* RFC4303: Drop dummy packets without any error */
255 if (err == IPPROTO_NONE)
256 err = -EINVAL;
257
258out:
140 return err; 259 return err;
141} 260}
142 261
262static void esp_input_done(struct crypto_async_request *base, int err)
263{
264 struct sk_buff *skb = base->data;
265
266 xfrm_input_resume(skb, esp_input_done2(skb, err));
267}
268
143static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) 269static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
144{ 270{
145 struct ipv6hdr *iph;
146 struct ip_esp_hdr *esph; 271 struct ip_esp_hdr *esph;
147 struct esp_data *esp = x->data; 272 struct esp_data *esp = x->data;
148 struct crypto_blkcipher *tfm = esp->conf.tfm; 273 struct crypto_aead *aead = esp->aead;
149 struct blkcipher_desc desc = { .tfm = tfm }; 274 struct aead_request *req;
150 struct sk_buff *trailer; 275 struct sk_buff *trailer;
151 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 276 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
152 int alen = esp->auth.icv_trunc_len;
153 int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen;
154 int hdr_len = skb_network_header_len(skb);
155 int nfrags; 277 int nfrags;
156 int ret = 0; 278 int ret = 0;
279 void *tmp;
280 u8 *iv;
281 struct scatterlist *sg;
282 struct scatterlist *asg;
157 283
158 if (!pskb_may_pull(skb, sizeof(*esph))) { 284 if (!pskb_may_pull(skb, sizeof(*esph))) {
159 ret = -EINVAL; 285 ret = -EINVAL;
160 goto out; 286 goto out;
161 } 287 }
162 288
163 if (elen <= 0 || (elen & (blksize-1))) { 289 if (elen <= 0) {
164 ret = -EINVAL; 290 ret = -EINVAL;
165 goto out; 291 goto out;
166 } 292 }
@@ -170,86 +296,38 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
170 goto out; 296 goto out;
171 } 297 }
172 298
173 skb->ip_summed = CHECKSUM_NONE; 299 ret = -ENOMEM;
174 300 tmp = esp_alloc_tmp(aead, nfrags + 1);
175 spin_lock(&x->lock); 301 if (!tmp)
176 302 goto out;
177 /* If integrity check is required, do this. */
178 if (esp->auth.icv_full_len) {
179 u8 sum[alen];
180
181 ret = esp_mac_digest(esp, skb, 0, skb->len - alen);
182 if (ret)
183 goto unlock;
184 303
185 if (skb_copy_bits(skb, skb->len - alen, sum, alen)) 304 ESP_SKB_CB(skb)->tmp = tmp;
186 BUG(); 305 iv = esp_tmp_iv(aead, tmp);
306 req = esp_tmp_req(aead, iv);
307 asg = esp_req_sg(aead, req);
308 sg = asg + 1;
187 309
188 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { 310 skb->ip_summed = CHECKSUM_NONE;
189 ret = -EBADMSG;
190 goto unlock;
191 }
192 }
193 311
194 esph = (struct ip_esp_hdr *)skb->data; 312 esph = (struct ip_esp_hdr *)skb->data;
195 iph = ipv6_hdr(skb);
196 313
197 /* Get ivec. This can be wrong, check against another impls. */ 314 /* Get ivec. This can be wrong, check against another impls. */
198 if (esp->conf.ivlen) 315 iv = esph->enc_data;
199 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
200
201 {
202 struct scatterlist *sg = &esp->sgbuf[0];
203
204 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
205 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
206 if (!sg) {
207 ret = -ENOMEM;
208 goto unlock;
209 }
210 }
211 sg_init_table(sg, nfrags);
212 skb_to_sgvec(skb, sg,
213 sizeof(*esph) + esp->conf.ivlen,
214 elen);
215 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
216 if (unlikely(sg != &esp->sgbuf[0]))
217 kfree(sg);
218 }
219 316
220unlock: 317 sg_init_table(sg, nfrags);
221 spin_unlock(&x->lock); 318 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
319 sg_init_one(asg, esph, sizeof(*esph));
222 320
223 if (unlikely(ret)) 321 aead_request_set_callback(req, 0, esp_input_done, skb);
224 goto out; 322 aead_request_set_crypt(req, sg, sg, elen, iv);
225 323 aead_request_set_assoc(req, asg, sizeof(*esph));
226 {
227 u8 nexthdr[2];
228 u8 padlen;
229
230 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
231 BUG();
232
233 padlen = nexthdr[0];
234 if (padlen+2 >= elen) {
235 LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
236 ret = -EINVAL;
237 goto out;
238 }
239 /* ... check padding bits here. Silly. :-) */
240 324
241 /* RFC4303: Drop dummy packets without any error */ 325 ret = crypto_aead_decrypt(req);
242 if (nexthdr[1] == IPPROTO_NONE) { 326 if (ret == -EINPROGRESS)
243 ret = -EINVAL; 327 goto out;
244 goto out;
245 }
246 328
247 pskb_trim(skb, skb->len - alen - padlen - 2); 329 ret = esp_input_done2(skb, ret);
248 ret = nexthdr[1];
249 }
250 330
251 __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
252 skb_set_transport_header(skb, -hdr_len);
253out: 331out:
254 return ret; 332 return ret;
255} 333}
@@ -257,11 +335,11 @@ out:
257static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) 335static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
258{ 336{
259 struct esp_data *esp = x->data; 337 struct esp_data *esp = x->data;
260 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 338 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
261 u32 align = max_t(u32, blksize, esp->conf.padlen); 339 u32 align = max_t(u32, blksize, esp->padlen);
262 u32 rem; 340 u32 rem;
263 341
264 mtu -= x->props.header_len + esp->auth.icv_trunc_len; 342 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
265 rem = mtu & (align - 1); 343 rem = mtu & (align - 1);
266 mtu &= ~(align - 1); 344 mtu &= ~(align - 1);
267 345
@@ -300,81 +378,146 @@ static void esp6_destroy(struct xfrm_state *x)
300 if (!esp) 378 if (!esp)
301 return; 379 return;
302 380
303 crypto_free_blkcipher(esp->conf.tfm); 381 crypto_free_aead(esp->aead);
304 esp->conf.tfm = NULL;
305 kfree(esp->conf.ivec);
306 esp->conf.ivec = NULL;
307 crypto_free_hash(esp->auth.tfm);
308 esp->auth.tfm = NULL;
309 kfree(esp->auth.work_icv);
310 esp->auth.work_icv = NULL;
311 kfree(esp); 382 kfree(esp);
312} 383}
313 384
314static int esp6_init_state(struct xfrm_state *x) 385static int esp_init_aead(struct xfrm_state *x)
386{
387 struct esp_data *esp = x->data;
388 struct crypto_aead *aead;
389 int err;
390
391 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
392 err = PTR_ERR(aead);
393 if (IS_ERR(aead))
394 goto error;
395
396 esp->aead = aead;
397
398 err = crypto_aead_setkey(aead, x->aead->alg_key,
399 (x->aead->alg_key_len + 7) / 8);
400 if (err)
401 goto error;
402
403 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
404 if (err)
405 goto error;
406
407error:
408 return err;
409}
410
411static int esp_init_authenc(struct xfrm_state *x)
315{ 412{
316 struct esp_data *esp = NULL; 413 struct esp_data *esp = x->data;
317 struct crypto_blkcipher *tfm; 414 struct crypto_aead *aead;
415 struct crypto_authenc_key_param *param;
416 struct rtattr *rta;
417 char *key;
418 char *p;
419 char authenc_name[CRYPTO_MAX_ALG_NAME];
420 unsigned int keylen;
421 int err;
318 422
423 err = -EINVAL;
319 if (x->ealg == NULL) 424 if (x->ealg == NULL)
320 goto error; 425 goto error;
321 426
322 if (x->encap) 427 err = -ENAMETOOLONG;
428 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
429 x->aalg ? x->aalg->alg_name : "digest_null",
430 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
323 goto error; 431 goto error;
324 432
325 esp = kzalloc(sizeof(*esp), GFP_KERNEL); 433 aead = crypto_alloc_aead(authenc_name, 0, 0);
326 if (esp == NULL) 434 err = PTR_ERR(aead);
327 return -ENOMEM; 435 if (IS_ERR(aead))
436 goto error;
437
438 esp->aead = aead;
439
440 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
441 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
442 err = -ENOMEM;
443 key = kmalloc(keylen, GFP_KERNEL);
444 if (!key)
445 goto error;
446
447 p = key;
448 rta = (void *)p;
449 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
450 rta->rta_len = RTA_LENGTH(sizeof(*param));
451 param = RTA_DATA(rta);
452 p += RTA_SPACE(sizeof(*param));
328 453
329 if (x->aalg) { 454 if (x->aalg) {
330 struct xfrm_algo_desc *aalg_desc; 455 struct xfrm_algo_desc *aalg_desc;
331 struct crypto_hash *hash;
332
333 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
334 CRYPTO_ALG_ASYNC);
335 if (IS_ERR(hash))
336 goto error;
337 456
338 esp->auth.tfm = hash; 457 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
339 if (crypto_hash_setkey(hash, x->aalg->alg_key, 458 p += (x->aalg->alg_key_len + 7) / 8;
340 (x->aalg->alg_key_len + 7) / 8))
341 goto error;
342 459
343 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 460 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
344 BUG_ON(!aalg_desc); 461 BUG_ON(!aalg_desc);
345 462
463 err = -EINVAL;
346 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 464 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
347 crypto_hash_digestsize(hash)) { 465 crypto_aead_authsize(aead)) {
348 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 466 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
349 x->aalg->alg_name, 467 x->aalg->alg_name,
350 crypto_hash_digestsize(hash), 468 crypto_aead_authsize(aead),
351 aalg_desc->uinfo.auth.icv_fullbits/8); 469 aalg_desc->uinfo.auth.icv_fullbits/8);
352 goto error; 470 goto free_key;
353 } 471 }
354 472
355 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 473 err = crypto_aead_setauthsize(
356 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 474 aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
357 475 if (err)
358 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); 476 goto free_key;
359 if (!esp->auth.work_icv)
360 goto error;
361 }
362 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
363 if (IS_ERR(tfm))
364 goto error;
365 esp->conf.tfm = tfm;
366 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
367 esp->conf.padlen = 0;
368 if (esp->conf.ivlen) {
369 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
370 if (unlikely(esp->conf.ivec == NULL))
371 goto error;
372 esp->conf.ivinitted = 0;
373 } 477 }
374 if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key, 478
375 (x->ealg->alg_key_len + 7) / 8)) 479 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
480 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
481
482 err = crypto_aead_setkey(aead, key, keylen);
483
484free_key:
485 kfree(key);
486
487error:
488 return err;
489}
490
491static int esp6_init_state(struct xfrm_state *x)
492{
493 struct esp_data *esp;
494 struct crypto_aead *aead;
495 u32 align;
496 int err;
497
498 if (x->encap)
499 return -EINVAL;
500
501 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
502 if (esp == NULL)
503 return -ENOMEM;
504
505 x->data = esp;
506
507 if (x->aead)
508 err = esp_init_aead(x);
509 else
510 err = esp_init_authenc(x);
511
512 if (err)
376 goto error; 513 goto error;
377 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 514
515 aead = esp->aead;
516
517 esp->padlen = 0;
518
519 x->props.header_len = sizeof(struct ip_esp_hdr) +
520 crypto_aead_ivsize(aead);
378 switch (x->props.mode) { 521 switch (x->props.mode) {
379 case XFRM_MODE_BEET: 522 case XFRM_MODE_BEET:
380 case XFRM_MODE_TRANSPORT: 523 case XFRM_MODE_TRANSPORT:
@@ -385,17 +528,17 @@ static int esp6_init_state(struct xfrm_state *x)
385 default: 528 default:
386 goto error; 529 goto error;
387 } 530 }
388 x->data = esp; 531
389 return 0; 532 align = ALIGN(crypto_aead_blocksize(aead), 4);
533 if (esp->padlen)
534 align = max_t(u32, align, esp->padlen);
535 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
390 536
391error: 537error:
392 x->data = esp; 538 return err;
393 esp6_destroy(x);
394 x->data = NULL;
395 return -EINVAL;
396} 539}
397 540
398static struct xfrm_type esp6_type = 541static const struct xfrm_type esp6_type =
399{ 542{
400 .description = "ESP6", 543 .description = "ESP6",
401 .owner = THIS_MODULE, 544 .owner = THIS_MODULE,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index a66a7d8e2811..d325a9958909 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -54,7 +54,8 @@ EXPORT_SYMBOL(__inet6_hash);
54 * 54 *
55 * The sockhash lock must be held as a reader here. 55 * The sockhash lock must be held as a reader here.
56 */ 56 */
57struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo, 57struct sock *__inet6_lookup_established(struct net *net,
58 struct inet_hashinfo *hashinfo,
58 const struct in6_addr *saddr, 59 const struct in6_addr *saddr,
59 const __be16 sport, 60 const __be16 sport,
60 const struct in6_addr *daddr, 61 const struct in6_addr *daddr,
@@ -75,22 +76,13 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
75 read_lock(lock); 76 read_lock(lock);
76 sk_for_each(sk, node, &head->chain) { 77 sk_for_each(sk, node, &head->chain) {
77 /* For IPV6 do the cheaper port and family tests first. */ 78 /* For IPV6 do the cheaper port and family tests first. */
78 if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif)) 79 if (INET6_MATCH(sk, net, hash, saddr, daddr, ports, dif))
79 goto hit; /* You sunk my battleship! */ 80 goto hit; /* You sunk my battleship! */
80 } 81 }
81 /* Must check for a TIME_WAIT'er before going to listener hash. */ 82 /* Must check for a TIME_WAIT'er before going to listener hash. */
82 sk_for_each(sk, node, &head->twchain) { 83 sk_for_each(sk, node, &head->twchain) {
83 const struct inet_timewait_sock *tw = inet_twsk(sk); 84 if (INET6_TW_MATCH(sk, net, hash, saddr, daddr, ports, dif))
84 85 goto hit;
85 if(*((__portpair *)&(tw->tw_dport)) == ports &&
86 sk->sk_family == PF_INET6) {
87 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
88
89 if (ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
90 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
91 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
92 goto hit;
93 }
94 } 86 }
95 read_unlock(lock); 87 read_unlock(lock);
96 return NULL; 88 return NULL;
@@ -102,9 +94,9 @@ hit:
102} 94}
103EXPORT_SYMBOL(__inet6_lookup_established); 95EXPORT_SYMBOL(__inet6_lookup_established);
104 96
105struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo, 97struct sock *inet6_lookup_listener(struct net *net,
106 const struct in6_addr *daddr, 98 struct inet_hashinfo *hashinfo, const struct in6_addr *daddr,
107 const unsigned short hnum, const int dif) 99 const unsigned short hnum, const int dif)
108{ 100{
109 struct sock *sk; 101 struct sock *sk;
110 const struct hlist_node *node; 102 const struct hlist_node *node;
@@ -113,7 +105,8 @@ struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo,
113 105
114 read_lock(&hashinfo->lhash_lock); 106 read_lock(&hashinfo->lhash_lock);
115 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) { 107 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) {
116 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { 108 if (sk->sk_net == net && inet_sk(sk)->num == hnum &&
109 sk->sk_family == PF_INET6) {
117 const struct ipv6_pinfo *np = inet6_sk(sk); 110 const struct ipv6_pinfo *np = inet6_sk(sk);
118 111
119 score = 1; 112 score = 1;
@@ -145,7 +138,7 @@ struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo,
145 138
146EXPORT_SYMBOL_GPL(inet6_lookup_listener); 139EXPORT_SYMBOL_GPL(inet6_lookup_listener);
147 140
148struct sock *inet6_lookup(struct inet_hashinfo *hashinfo, 141struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
149 const struct in6_addr *saddr, const __be16 sport, 142 const struct in6_addr *saddr, const __be16 sport,
150 const struct in6_addr *daddr, const __be16 dport, 143 const struct in6_addr *daddr, const __be16 dport,
151 const int dif) 144 const int dif)
@@ -153,7 +146,7 @@ struct sock *inet6_lookup(struct inet_hashinfo *hashinfo,
153 struct sock *sk; 146 struct sock *sk;
154 147
155 local_bh_disable(); 148 local_bh_disable();
156 sk = __inet6_lookup(hashinfo, saddr, sport, daddr, ntohs(dport), dif); 149 sk = __inet6_lookup(net, hashinfo, saddr, sport, daddr, ntohs(dport), dif);
157 local_bh_enable(); 150 local_bh_enable();
158 151
159 return sk; 152 return sk;
@@ -179,21 +172,16 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
179 struct sock *sk2; 172 struct sock *sk2;
180 const struct hlist_node *node; 173 const struct hlist_node *node;
181 struct inet_timewait_sock *tw; 174 struct inet_timewait_sock *tw;
175 struct net *net = sk->sk_net;
182 176
183 prefetch(head->chain.first); 177 prefetch(head->chain.first);
184 write_lock(lock); 178 write_lock(lock);
185 179
186 /* Check TIME-WAIT sockets first. */ 180 /* Check TIME-WAIT sockets first. */
187 sk_for_each(sk2, node, &head->twchain) { 181 sk_for_each(sk2, node, &head->twchain) {
188 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2);
189
190 tw = inet_twsk(sk2); 182 tw = inet_twsk(sk2);
191 183
192 if(*((__portpair *)&(tw->tw_dport)) == ports && 184 if (INET6_TW_MATCH(sk2, net, hash, saddr, daddr, ports, dif)) {
193 sk2->sk_family == PF_INET6 &&
194 ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
195 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
196 (!sk2->sk_bound_dev_if || sk2->sk_bound_dev_if == dif)) {
197 if (twsk_unique(sk, sk2, twp)) 185 if (twsk_unique(sk, sk2, twp))
198 goto unique; 186 goto unique;
199 else 187 else
@@ -204,7 +192,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
204 192
205 /* And established part... */ 193 /* And established part... */
206 sk_for_each(sk2, node, &head->chain) { 194 sk_for_each(sk2, node, &head->chain) {
207 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif)) 195 if (INET6_MATCH(sk2, net, hash, saddr, daddr, ports, dif))
208 goto not_unique; 196 goto not_unique;
209 } 197 }
210 198
@@ -248,97 +236,8 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk)
248int inet6_hash_connect(struct inet_timewait_death_row *death_row, 236int inet6_hash_connect(struct inet_timewait_death_row *death_row,
249 struct sock *sk) 237 struct sock *sk)
250{ 238{
251 struct inet_hashinfo *hinfo = death_row->hashinfo; 239 return __inet_hash_connect(death_row, sk,
252 const unsigned short snum = inet_sk(sk)->num; 240 __inet6_check_established, __inet6_hash);
253 struct inet_bind_hashbucket *head;
254 struct inet_bind_bucket *tb;
255 int ret;
256
257 if (snum == 0) {
258 int i, port, low, high, remaining;
259 static u32 hint;
260 const u32 offset = hint + inet6_sk_port_offset(sk);
261 struct hlist_node *node;
262 struct inet_timewait_sock *tw = NULL;
263
264 inet_get_local_port_range(&low, &high);
265 remaining = (high - low) + 1;
266
267 local_bh_disable();
268 for (i = 1; i <= remaining; i++) {
269 port = low + (i + offset) % remaining;
270 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
271 spin_lock(&head->lock);
272
273 /* Does not bother with rcv_saddr checks,
274 * because the established check is already
275 * unique enough.
276 */
277 inet_bind_bucket_for_each(tb, node, &head->chain) {
278 if (tb->port == port) {
279 BUG_TRAP(!hlist_empty(&tb->owners));
280 if (tb->fastreuse >= 0)
281 goto next_port;
282 if (!__inet6_check_established(death_row,
283 sk, port,
284 &tw))
285 goto ok;
286 goto next_port;
287 }
288 }
289
290 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
291 head, port);
292 if (!tb) {
293 spin_unlock(&head->lock);
294 break;
295 }
296 tb->fastreuse = -1;
297 goto ok;
298
299 next_port:
300 spin_unlock(&head->lock);
301 }
302 local_bh_enable();
303
304 return -EADDRNOTAVAIL;
305
306ok:
307 hint += i;
308
309 /* Head lock still held and bh's disabled */
310 inet_bind_hash(sk, tb, port);
311 if (sk_unhashed(sk)) {
312 inet_sk(sk)->sport = htons(port);
313 __inet6_hash(hinfo, sk);
314 }
315 spin_unlock(&head->lock);
316
317 if (tw) {
318 inet_twsk_deschedule(tw, death_row);
319 inet_twsk_put(tw);
320 }
321
322 ret = 0;
323 goto out;
324 }
325
326 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
327 tb = inet_csk(sk)->icsk_bind_hash;
328 spin_lock_bh(&head->lock);
329
330 if (sk_head(&tb->owners) == sk && sk->sk_bind_node.next == NULL) {
331 __inet6_hash(hinfo, sk);
332 spin_unlock_bh(&head->lock);
333 return 0;
334 } else {
335 spin_unlock(&head->lock);
336 /* No definite answer... Walk to established hash table */
337 ret = __inet6_check_established(death_row, sk, snum, NULL);
338out:
339 local_bh_enable();
340 return ret;
341 }
342} 241}
343 242
344EXPORT_SYMBOL_GPL(inet6_hash_connect); 243EXPORT_SYMBOL_GPL(inet6_hash_connect);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 15c4f6cee3e6..9ac6ca2521c3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -257,6 +257,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
257 ipv6_addr_copy(&hdr->daddr, first_hop); 257 ipv6_addr_copy(&hdr->daddr, first_hop);
258 258
259 skb->priority = sk->sk_priority; 259 skb->priority = sk->sk_priority;
260 skb->mark = sk->sk_mark;
260 261
261 mtu = dst_mtu(dst); 262 mtu = dst_mtu(dst);
262 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { 263 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
@@ -636,6 +637,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
636 637
637 if (skb_shinfo(skb)->frag_list) { 638 if (skb_shinfo(skb)->frag_list) {
638 int first_len = skb_pagelen(skb); 639 int first_len = skb_pagelen(skb);
640 int truesizes = 0;
639 641
640 if (first_len - hlen > mtu || 642 if (first_len - hlen > mtu ||
641 ((first_len - hlen) & 7) || 643 ((first_len - hlen) & 7) ||
@@ -658,7 +660,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
658 sock_hold(skb->sk); 660 sock_hold(skb->sk);
659 frag->sk = skb->sk; 661 frag->sk = skb->sk;
660 frag->destructor = sock_wfree; 662 frag->destructor = sock_wfree;
661 skb->truesize -= frag->truesize; 663 truesizes += frag->truesize;
662 } 664 }
663 } 665 }
664 666
@@ -689,6 +691,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
689 691
690 first_len = skb_pagelen(skb); 692 first_len = skb_pagelen(skb);
691 skb->data_len = first_len - skb_headlen(skb); 693 skb->data_len = first_len - skb_headlen(skb);
694 skb->truesize -= truesizes;
692 skb->len = first_len; 695 skb->len = first_len;
693 ipv6_hdr(skb)->payload_len = htons(first_len - 696 ipv6_hdr(skb)->payload_len = htons(first_len -
694 sizeof(struct ipv6hdr)); 697 sizeof(struct ipv6hdr));
@@ -1437,6 +1440,7 @@ int ip6_push_pending_frames(struct sock *sk)
1437 ipv6_addr_copy(&hdr->daddr, final_dst); 1440 ipv6_addr_copy(&hdr->daddr, final_dst);
1438 1441
1439 skb->priority = sk->sk_priority; 1442 skb->priority = sk->sk_priority;
1443 skb->mark = sk->sk_mark;
1440 1444
1441 skb->dst = dst_clone(&rt->u.dst); 1445 skb->dst = dst_clone(&rt->u.dst);
1442 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); 1446 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index b276d04d6db5..b90039593a7f 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -64,6 +64,7 @@ static LIST_HEAD(ipcomp6_tfms_list);
64 64
65static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) 65static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
66{ 66{
67 int nexthdr;
67 int err = -ENOMEM; 68 int err = -ENOMEM;
68 struct ip_comp_hdr *ipch; 69 struct ip_comp_hdr *ipch;
69 int plen, dlen; 70 int plen, dlen;
@@ -79,6 +80,8 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
79 80
80 /* Remove ipcomp header and decompress original payload */ 81 /* Remove ipcomp header and decompress original payload */
81 ipch = (void *)skb->data; 82 ipch = (void *)skb->data;
83 nexthdr = ipch->nexthdr;
84
82 skb->transport_header = skb->network_header + sizeof(*ipch); 85 skb->transport_header = skb->network_header + sizeof(*ipch);
83 __skb_pull(skb, sizeof(*ipch)); 86 __skb_pull(skb, sizeof(*ipch));
84 87
@@ -108,7 +111,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
108 skb->truesize += dlen - plen; 111 skb->truesize += dlen - plen;
109 __skb_put(skb, dlen - plen); 112 __skb_put(skb, dlen - plen);
110 skb_copy_to_linear_data(skb, scratch, dlen); 113 skb_copy_to_linear_data(skb, scratch, dlen);
111 err = ipch->nexthdr; 114 err = nexthdr;
112 115
113out_put_cpu: 116out_put_cpu:
114 put_cpu(); 117 put_cpu();
@@ -450,7 +453,7 @@ error:
450 goto out; 453 goto out;
451} 454}
452 455
453static struct xfrm_type ipcomp6_type = 456static const struct xfrm_type ipcomp6_type =
454{ 457{
455 .description = "IPCOMP6", 458 .description = "IPCOMP6",
456 .owner = THIS_MODULE, 459 .owner = THIS_MODULE,
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 49d396620eac..cd8a5bda13cd 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -330,7 +330,7 @@ static void mip6_destopt_destroy(struct xfrm_state *x)
330{ 330{
331} 331}
332 332
333static struct xfrm_type mip6_destopt_type = 333static const struct xfrm_type mip6_destopt_type =
334{ 334{
335 .description = "MIP6DESTOPT", 335 .description = "MIP6DESTOPT",
336 .owner = THIS_MODULE, 336 .owner = THIS_MODULE,
@@ -462,7 +462,7 @@ static void mip6_rthdr_destroy(struct xfrm_state *x)
462{ 462{
463} 463}
464 464
465static struct xfrm_type mip6_rthdr_type = 465static const struct xfrm_type mip6_rthdr_type =
466{ 466{
467 .description = "MIP6RT", 467 .description = "MIP6RT",
468 .owner = THIS_MODULE, 468 .owner = THIS_MODULE,
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 56b4ea6d29ed..e869916b05f1 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -515,6 +515,7 @@ static struct notifier_block ipq_nl_notifier = {
515 .notifier_call = ipq_rcv_nl_event, 515 .notifier_call = ipq_rcv_nl_event,
516}; 516};
517 517
518#ifdef CONFIG_SYSCTL
518static struct ctl_table_header *ipq_sysctl_header; 519static struct ctl_table_header *ipq_sysctl_header;
519 520
520static ctl_table ipq_table[] = { 521static ctl_table ipq_table[] = {
@@ -528,7 +529,9 @@ static ctl_table ipq_table[] = {
528 }, 529 },
529 { .ctl_name = 0 } 530 { .ctl_name = 0 }
530}; 531};
532#endif
531 533
534#ifdef CONFIG_PROC_FS
532static int ip6_queue_show(struct seq_file *m, void *v) 535static int ip6_queue_show(struct seq_file *m, void *v)
533{ 536{
534 read_lock_bh(&queue_lock); 537 read_lock_bh(&queue_lock);
@@ -565,6 +568,7 @@ static const struct file_operations ip6_queue_proc_fops = {
565 .release = single_release, 568 .release = single_release,
566 .owner = THIS_MODULE, 569 .owner = THIS_MODULE,
567}; 570};
571#endif
568 572
569static const struct nf_queue_handler nfqh = { 573static const struct nf_queue_handler nfqh = {
570 .name = "ip6_queue", 574 .name = "ip6_queue",
@@ -574,7 +578,7 @@ static const struct nf_queue_handler nfqh = {
574static int __init ip6_queue_init(void) 578static int __init ip6_queue_init(void)
575{ 579{
576 int status = -ENOMEM; 580 int status = -ENOMEM;
577 struct proc_dir_entry *proc; 581 struct proc_dir_entry *proc __maybe_unused;
578 582
579 netlink_register_notifier(&ipq_nl_notifier); 583 netlink_register_notifier(&ipq_nl_notifier);
580 ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0, 584 ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0,
@@ -584,6 +588,7 @@ static int __init ip6_queue_init(void)
584 goto cleanup_netlink_notifier; 588 goto cleanup_netlink_notifier;
585 } 589 }
586 590
591#ifdef CONFIG_PROC_FS
587 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net); 592 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
588 if (proc) { 593 if (proc) {
589 proc->owner = THIS_MODULE; 594 proc->owner = THIS_MODULE;
@@ -592,10 +597,11 @@ static int __init ip6_queue_init(void)
592 printk(KERN_ERR "ip6_queue: failed to create proc entry\n"); 597 printk(KERN_ERR "ip6_queue: failed to create proc entry\n");
593 goto cleanup_ipqnl; 598 goto cleanup_ipqnl;
594 } 599 }
595 600#endif
596 register_netdevice_notifier(&ipq_dev_notifier); 601 register_netdevice_notifier(&ipq_dev_notifier);
602#ifdef CONFIG_SYSCTL
597 ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table); 603 ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table);
598 604#endif
599 status = nf_register_queue_handler(PF_INET6, &nfqh); 605 status = nf_register_queue_handler(PF_INET6, &nfqh);
600 if (status < 0) { 606 if (status < 0) {
601 printk(KERN_ERR "ip6_queue: failed to register queue handler\n"); 607 printk(KERN_ERR "ip6_queue: failed to register queue handler\n");
@@ -604,11 +610,13 @@ static int __init ip6_queue_init(void)
604 return status; 610 return status;
605 611
606cleanup_sysctl: 612cleanup_sysctl:
613#ifdef CONFIG_SYSCTL
607 unregister_sysctl_table(ipq_sysctl_header); 614 unregister_sysctl_table(ipq_sysctl_header);
615#endif
608 unregister_netdevice_notifier(&ipq_dev_notifier); 616 unregister_netdevice_notifier(&ipq_dev_notifier);
609 proc_net_remove(&init_net, IPQ_PROC_FS_NAME); 617 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
610 618
611cleanup_ipqnl: 619cleanup_ipqnl: __maybe_unused
612 netlink_kernel_release(ipqnl); 620 netlink_kernel_release(ipqnl);
613 mutex_lock(&ipqnl_mutex); 621 mutex_lock(&ipqnl_mutex);
614 mutex_unlock(&ipqnl_mutex); 622 mutex_unlock(&ipqnl_mutex);
@@ -624,7 +632,9 @@ static void __exit ip6_queue_fini(void)
624 synchronize_net(); 632 synchronize_net();
625 ipq_flush(NULL, 0); 633 ipq_flush(NULL, 0);
626 634
635#ifdef CONFIG_SYSCTL
627 unregister_sysctl_table(ipq_sysctl_header); 636 unregister_sysctl_table(ipq_sysctl_header);
637#endif
628 unregister_netdevice_notifier(&ipq_dev_notifier); 638 unregister_netdevice_notifier(&ipq_dev_notifier);
629 proc_net_remove(&init_net, IPQ_PROC_FS_NAME); 639 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
630 640
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index dd7860fea61f..bf9bb6e55bb5 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -320,7 +320,7 @@ static void trace_packet(struct sk_buff *skb,
320 unsigned int hook, 320 unsigned int hook,
321 const struct net_device *in, 321 const struct net_device *in,
322 const struct net_device *out, 322 const struct net_device *out,
323 char *tablename, 323 const char *tablename,
324 struct xt_table_info *private, 324 struct xt_table_info *private,
325 struct ip6t_entry *e) 325 struct ip6t_entry *e)
326{ 326{
@@ -1118,7 +1118,7 @@ static int compat_table_info(const struct xt_table_info *info,
1118} 1118}
1119#endif 1119#endif
1120 1120
1121static int get_info(void __user *user, int *len, int compat) 1121static int get_info(struct net *net, void __user *user, int *len, int compat)
1122{ 1122{
1123 char name[IP6T_TABLE_MAXNAMELEN]; 1123 char name[IP6T_TABLE_MAXNAMELEN];
1124 struct xt_table *t; 1124 struct xt_table *t;
@@ -1138,7 +1138,7 @@ static int get_info(void __user *user, int *len, int compat)
1138 if (compat) 1138 if (compat)
1139 xt_compat_lock(AF_INET6); 1139 xt_compat_lock(AF_INET6);
1140#endif 1140#endif
1141 t = try_then_request_module(xt_find_table_lock(AF_INET6, name), 1141 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1142 "ip6table_%s", name); 1142 "ip6table_%s", name);
1143 if (t && !IS_ERR(t)) { 1143 if (t && !IS_ERR(t)) {
1144 struct ip6t_getinfo info; 1144 struct ip6t_getinfo info;
@@ -1178,7 +1178,7 @@ static int get_info(void __user *user, int *len, int compat)
1178} 1178}
1179 1179
1180static int 1180static int
1181get_entries(struct ip6t_get_entries __user *uptr, int *len) 1181get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1182{ 1182{
1183 int ret; 1183 int ret;
1184 struct ip6t_get_entries get; 1184 struct ip6t_get_entries get;
@@ -1196,7 +1196,7 @@ get_entries(struct ip6t_get_entries __user *uptr, int *len)
1196 return -EINVAL; 1196 return -EINVAL;
1197 } 1197 }
1198 1198
1199 t = xt_find_table_lock(AF_INET6, get.name); 1199 t = xt_find_table_lock(net, AF_INET6, get.name);
1200 if (t && !IS_ERR(t)) { 1200 if (t && !IS_ERR(t)) {
1201 struct xt_table_info *private = t->private; 1201 struct xt_table_info *private = t->private;
1202 duprintf("t->private->number = %u\n", private->number); 1202 duprintf("t->private->number = %u\n", private->number);
@@ -1217,7 +1217,7 @@ get_entries(struct ip6t_get_entries __user *uptr, int *len)
1217} 1217}
1218 1218
1219static int 1219static int
1220__do_replace(const char *name, unsigned int valid_hooks, 1220__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1221 struct xt_table_info *newinfo, unsigned int num_counters, 1221 struct xt_table_info *newinfo, unsigned int num_counters,
1222 void __user *counters_ptr) 1222 void __user *counters_ptr)
1223{ 1223{
@@ -1235,7 +1235,7 @@ __do_replace(const char *name, unsigned int valid_hooks,
1235 goto out; 1235 goto out;
1236 } 1236 }
1237 1237
1238 t = try_then_request_module(xt_find_table_lock(AF_INET6, name), 1238 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1239 "ip6table_%s", name); 1239 "ip6table_%s", name);
1240 if (!t || IS_ERR(t)) { 1240 if (!t || IS_ERR(t)) {
1241 ret = t ? PTR_ERR(t) : -ENOENT; 1241 ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1288,7 +1288,7 @@ __do_replace(const char *name, unsigned int valid_hooks,
1288} 1288}
1289 1289
1290static int 1290static int
1291do_replace(void __user *user, unsigned int len) 1291do_replace(struct net *net, void __user *user, unsigned int len)
1292{ 1292{
1293 int ret; 1293 int ret;
1294 struct ip6t_replace tmp; 1294 struct ip6t_replace tmp;
@@ -1322,7 +1322,7 @@ do_replace(void __user *user, unsigned int len)
1322 1322
1323 duprintf("ip_tables: Translated table\n"); 1323 duprintf("ip_tables: Translated table\n");
1324 1324
1325 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo, 1325 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1326 tmp.num_counters, tmp.counters); 1326 tmp.num_counters, tmp.counters);
1327 if (ret) 1327 if (ret)
1328 goto free_newinfo_untrans; 1328 goto free_newinfo_untrans;
@@ -1358,7 +1358,8 @@ add_counter_to_entry(struct ip6t_entry *e,
1358} 1358}
1359 1359
1360static int 1360static int
1361do_add_counters(void __user *user, unsigned int len, int compat) 1361do_add_counters(struct net *net, void __user *user, unsigned int len,
1362 int compat)
1362{ 1363{
1363 unsigned int i; 1364 unsigned int i;
1364 struct xt_counters_info tmp; 1365 struct xt_counters_info tmp;
@@ -1410,7 +1411,7 @@ do_add_counters(void __user *user, unsigned int len, int compat)
1410 goto free; 1411 goto free;
1411 } 1412 }
1412 1413
1413 t = xt_find_table_lock(AF_INET6, name); 1414 t = xt_find_table_lock(net, AF_INET6, name);
1414 if (!t || IS_ERR(t)) { 1415 if (!t || IS_ERR(t)) {
1415 ret = t ? PTR_ERR(t) : -ENOENT; 1416 ret = t ? PTR_ERR(t) : -ENOENT;
1416 goto free; 1417 goto free;
@@ -1456,7 +1457,7 @@ struct compat_ip6t_replace {
1456 1457
1457static int 1458static int
1458compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, 1459compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1459 compat_uint_t *size, struct xt_counters *counters, 1460 unsigned int *size, struct xt_counters *counters,
1460 unsigned int *i) 1461 unsigned int *i)
1461{ 1462{
1462 struct ip6t_entry_target *t; 1463 struct ip6t_entry_target *t;
@@ -1503,7 +1504,7 @@ compat_find_calc_match(struct ip6t_entry_match *m,
1503 const char *name, 1504 const char *name,
1504 const struct ip6t_ip6 *ipv6, 1505 const struct ip6t_ip6 *ipv6,
1505 unsigned int hookmask, 1506 unsigned int hookmask,
1506 int *size, int *i) 1507 int *size, unsigned int *i)
1507{ 1508{
1508 struct xt_match *match; 1509 struct xt_match *match;
1509 1510
@@ -1561,7 +1562,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1561 struct ip6t_entry_target *t; 1562 struct ip6t_entry_target *t;
1562 struct xt_target *target; 1563 struct xt_target *target;
1563 unsigned int entry_offset; 1564 unsigned int entry_offset;
1564 int ret, off, h, j; 1565 unsigned int j;
1566 int ret, off, h;
1565 1567
1566 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1568 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1567 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 1569 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
@@ -1673,7 +1675,8 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1673static int compat_check_entry(struct ip6t_entry *e, const char *name, 1675static int compat_check_entry(struct ip6t_entry *e, const char *name,
1674 unsigned int *i) 1676 unsigned int *i)
1675{ 1677{
1676 int j, ret; 1678 unsigned int j;
1679 int ret;
1677 1680
1678 j = 0; 1681 j = 0;
1679 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6, 1682 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6,
@@ -1815,7 +1818,7 @@ out_unlock:
1815} 1818}
1816 1819
1817static int 1820static int
1818compat_do_replace(void __user *user, unsigned int len) 1821compat_do_replace(struct net *net, void __user *user, unsigned int len)
1819{ 1822{
1820 int ret; 1823 int ret;
1821 struct compat_ip6t_replace tmp; 1824 struct compat_ip6t_replace tmp;
@@ -1852,7 +1855,7 @@ compat_do_replace(void __user *user, unsigned int len)
1852 1855
1853 duprintf("compat_do_replace: Translated table\n"); 1856 duprintf("compat_do_replace: Translated table\n");
1854 1857
1855 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo, 1858 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1856 tmp.num_counters, compat_ptr(tmp.counters)); 1859 tmp.num_counters, compat_ptr(tmp.counters));
1857 if (ret) 1860 if (ret)
1858 goto free_newinfo_untrans; 1861 goto free_newinfo_untrans;
@@ -1876,11 +1879,11 @@ compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1876 1879
1877 switch (cmd) { 1880 switch (cmd) {
1878 case IP6T_SO_SET_REPLACE: 1881 case IP6T_SO_SET_REPLACE:
1879 ret = compat_do_replace(user, len); 1882 ret = compat_do_replace(sk->sk_net, user, len);
1880 break; 1883 break;
1881 1884
1882 case IP6T_SO_SET_ADD_COUNTERS: 1885 case IP6T_SO_SET_ADD_COUNTERS:
1883 ret = do_add_counters(user, len, 1); 1886 ret = do_add_counters(sk->sk_net, user, len, 1);
1884 break; 1887 break;
1885 1888
1886 default: 1889 default:
@@ -1929,7 +1932,8 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1929} 1932}
1930 1933
1931static int 1934static int
1932compat_get_entries(struct compat_ip6t_get_entries __user *uptr, int *len) 1935compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1936 int *len)
1933{ 1937{
1934 int ret; 1938 int ret;
1935 struct compat_ip6t_get_entries get; 1939 struct compat_ip6t_get_entries get;
@@ -1950,7 +1954,7 @@ compat_get_entries(struct compat_ip6t_get_entries __user *uptr, int *len)
1950 } 1954 }
1951 1955
1952 xt_compat_lock(AF_INET6); 1956 xt_compat_lock(AF_INET6);
1953 t = xt_find_table_lock(AF_INET6, get.name); 1957 t = xt_find_table_lock(net, AF_INET6, get.name);
1954 if (t && !IS_ERR(t)) { 1958 if (t && !IS_ERR(t)) {
1955 struct xt_table_info *private = t->private; 1959 struct xt_table_info *private = t->private;
1956 struct xt_table_info info; 1960 struct xt_table_info info;
@@ -1986,10 +1990,10 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1986 1990
1987 switch (cmd) { 1991 switch (cmd) {
1988 case IP6T_SO_GET_INFO: 1992 case IP6T_SO_GET_INFO:
1989 ret = get_info(user, len, 1); 1993 ret = get_info(sk->sk_net, user, len, 1);
1990 break; 1994 break;
1991 case IP6T_SO_GET_ENTRIES: 1995 case IP6T_SO_GET_ENTRIES:
1992 ret = compat_get_entries(user, len); 1996 ret = compat_get_entries(sk->sk_net, user, len);
1993 break; 1997 break;
1994 default: 1998 default:
1995 ret = do_ip6t_get_ctl(sk, cmd, user, len); 1999 ret = do_ip6t_get_ctl(sk, cmd, user, len);
@@ -2008,11 +2012,11 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2008 2012
2009 switch (cmd) { 2013 switch (cmd) {
2010 case IP6T_SO_SET_REPLACE: 2014 case IP6T_SO_SET_REPLACE:
2011 ret = do_replace(user, len); 2015 ret = do_replace(sk->sk_net, user, len);
2012 break; 2016 break;
2013 2017
2014 case IP6T_SO_SET_ADD_COUNTERS: 2018 case IP6T_SO_SET_ADD_COUNTERS:
2015 ret = do_add_counters(user, len, 0); 2019 ret = do_add_counters(sk->sk_net, user, len, 0);
2016 break; 2020 break;
2017 2021
2018 default: 2022 default:
@@ -2033,11 +2037,11 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2033 2037
2034 switch (cmd) { 2038 switch (cmd) {
2035 case IP6T_SO_GET_INFO: 2039 case IP6T_SO_GET_INFO:
2036 ret = get_info(user, len, 0); 2040 ret = get_info(sk->sk_net, user, len, 0);
2037 break; 2041 break;
2038 2042
2039 case IP6T_SO_GET_ENTRIES: 2043 case IP6T_SO_GET_ENTRIES:
2040 ret = get_entries(user, len); 2044 ret = get_entries(sk->sk_net, user, len);
2041 break; 2045 break;
2042 2046
2043 case IP6T_SO_GET_REVISION_MATCH: 2047 case IP6T_SO_GET_REVISION_MATCH:
@@ -2074,17 +2078,21 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2074 return ret; 2078 return ret;
2075} 2079}
2076 2080
2077int ip6t_register_table(struct xt_table *table, const struct ip6t_replace *repl) 2081struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2082 const struct ip6t_replace *repl)
2078{ 2083{
2079 int ret; 2084 int ret;
2080 struct xt_table_info *newinfo; 2085 struct xt_table_info *newinfo;
2081 struct xt_table_info bootstrap 2086 struct xt_table_info bootstrap
2082 = { 0, 0, 0, { 0 }, { 0 }, { } }; 2087 = { 0, 0, 0, { 0 }, { 0 }, { } };
2083 void *loc_cpu_entry; 2088 void *loc_cpu_entry;
2089 struct xt_table *new_table;
2084 2090
2085 newinfo = xt_alloc_table_info(repl->size); 2091 newinfo = xt_alloc_table_info(repl->size);
2086 if (!newinfo) 2092 if (!newinfo) {
2087 return -ENOMEM; 2093 ret = -ENOMEM;
2094 goto out;
2095 }
2088 2096
2089 /* choose the copy on our node/cpu, but dont care about preemption */ 2097 /* choose the copy on our node/cpu, but dont care about preemption */
2090 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2098 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
@@ -2095,30 +2103,35 @@ int ip6t_register_table(struct xt_table *table, const struct ip6t_replace *repl)
2095 repl->num_entries, 2103 repl->num_entries,
2096 repl->hook_entry, 2104 repl->hook_entry,
2097 repl->underflow); 2105 repl->underflow);
2098 if (ret != 0) { 2106 if (ret != 0)
2099 xt_free_table_info(newinfo); 2107 goto out_free;
2100 return ret;
2101 }
2102 2108
2103 ret = xt_register_table(table, &bootstrap, newinfo); 2109 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2104 if (ret != 0) { 2110 if (IS_ERR(new_table)) {
2105 xt_free_table_info(newinfo); 2111 ret = PTR_ERR(new_table);
2106 return ret; 2112 goto out_free;
2107 } 2113 }
2114 return new_table;
2108 2115
2109 return 0; 2116out_free:
2117 xt_free_table_info(newinfo);
2118out:
2119 return ERR_PTR(ret);
2110} 2120}
2111 2121
2112void ip6t_unregister_table(struct xt_table *table) 2122void ip6t_unregister_table(struct xt_table *table)
2113{ 2123{
2114 struct xt_table_info *private; 2124 struct xt_table_info *private;
2115 void *loc_cpu_entry; 2125 void *loc_cpu_entry;
2126 struct module *table_owner = table->me;
2116 2127
2117 private = xt_unregister_table(table); 2128 private = xt_unregister_table(table);
2118 2129
2119 /* Decrease module usage counts and free resources */ 2130 /* Decrease module usage counts and free resources */
2120 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2131 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2121 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); 2132 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2133 if (private->number > private->initial_entries)
2134 module_put(table_owner);
2122 xt_free_table_info(private); 2135 xt_free_table_info(private);
2123} 2136}
2124 2137
@@ -2225,11 +2238,26 @@ static struct xt_match icmp6_matchstruct __read_mostly = {
2225 .family = AF_INET6, 2238 .family = AF_INET6,
2226}; 2239};
2227 2240
2241static int __net_init ip6_tables_net_init(struct net *net)
2242{
2243 return xt_proto_init(net, AF_INET6);
2244}
2245
2246static void __net_exit ip6_tables_net_exit(struct net *net)
2247{
2248 xt_proto_fini(net, AF_INET6);
2249}
2250
2251static struct pernet_operations ip6_tables_net_ops = {
2252 .init = ip6_tables_net_init,
2253 .exit = ip6_tables_net_exit,
2254};
2255
2228static int __init ip6_tables_init(void) 2256static int __init ip6_tables_init(void)
2229{ 2257{
2230 int ret; 2258 int ret;
2231 2259
2232 ret = xt_proto_init(AF_INET6); 2260 ret = register_pernet_subsys(&ip6_tables_net_ops);
2233 if (ret < 0) 2261 if (ret < 0)
2234 goto err1; 2262 goto err1;
2235 2263
@@ -2259,7 +2287,7 @@ err4:
2259err3: 2287err3:
2260 xt_unregister_target(&ip6t_standard_target); 2288 xt_unregister_target(&ip6t_standard_target);
2261err2: 2289err2:
2262 xt_proto_fini(AF_INET6); 2290 unregister_pernet_subsys(&ip6_tables_net_ops);
2263err1: 2291err1:
2264 return ret; 2292 return ret;
2265} 2293}
@@ -2271,7 +2299,8 @@ static void __exit ip6_tables_fini(void)
2271 xt_unregister_match(&icmp6_matchstruct); 2299 xt_unregister_match(&icmp6_matchstruct);
2272 xt_unregister_target(&ip6t_error_target); 2300 xt_unregister_target(&ip6t_error_target);
2273 xt_unregister_target(&ip6t_standard_target); 2301 xt_unregister_target(&ip6t_standard_target);
2274 xt_proto_fini(AF_INET6); 2302
2303 unregister_pernet_subsys(&ip6_tables_net_ops);
2275} 2304}
2276 2305
2277/* 2306/*
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 87d38d08aad0..2d9cd095a72c 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -26,7 +26,7 @@ static struct
26 struct ip6t_replace repl; 26 struct ip6t_replace repl;
27 struct ip6t_standard entries[3]; 27 struct ip6t_standard entries[3];
28 struct ip6t_error term; 28 struct ip6t_error term;
29} initial_table __initdata = { 29} initial_table __net_initdata = {
30 .repl = { 30 .repl = {
31 .name = "filter", 31 .name = "filter",
32 .valid_hooks = FILTER_VALID_HOOKS, 32 .valid_hooks = FILTER_VALID_HOOKS,
@@ -67,7 +67,7 @@ ip6t_hook(unsigned int hook,
67 const struct net_device *out, 67 const struct net_device *out,
68 int (*okfn)(struct sk_buff *)) 68 int (*okfn)(struct sk_buff *))
69{ 69{
70 return ip6t_do_table(skb, hook, in, out, &packet_filter); 70 return ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_filter);
71} 71}
72 72
73static unsigned int 73static unsigned int
@@ -87,7 +87,7 @@ ip6t_local_out_hook(unsigned int hook,
87 } 87 }
88#endif 88#endif
89 89
90 return ip6t_do_table(skb, hook, in, out, &packet_filter); 90 return ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_filter);
91} 91}
92 92
93static struct nf_hook_ops ip6t_ops[] __read_mostly = { 93static struct nf_hook_ops ip6t_ops[] __read_mostly = {
@@ -118,6 +118,26 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = {
118static int forward = NF_ACCEPT; 118static int forward = NF_ACCEPT;
119module_param(forward, bool, 0000); 119module_param(forward, bool, 0000);
120 120
121static int __net_init ip6table_filter_net_init(struct net *net)
122{
123 /* Register table */
124 net->ipv6.ip6table_filter =
125 ip6t_register_table(net, &packet_filter, &initial_table.repl);
126 if (IS_ERR(net->ipv6.ip6table_filter))
127 return PTR_ERR(net->ipv6.ip6table_filter);
128 return 0;
129}
130
131static void __net_exit ip6table_filter_net_exit(struct net *net)
132{
133 ip6t_unregister_table(net->ipv6.ip6table_filter);
134}
135
136static struct pernet_operations ip6table_filter_net_ops = {
137 .init = ip6table_filter_net_init,
138 .exit = ip6table_filter_net_exit,
139};
140
121static int __init ip6table_filter_init(void) 141static int __init ip6table_filter_init(void)
122{ 142{
123 int ret; 143 int ret;
@@ -130,8 +150,7 @@ static int __init ip6table_filter_init(void)
130 /* Entry 1 is the FORWARD hook */ 150 /* Entry 1 is the FORWARD hook */
131 initial_table.entries[1].target.verdict = -forward - 1; 151 initial_table.entries[1].target.verdict = -forward - 1;
132 152
133 /* Register table */ 153 ret = register_pernet_subsys(&ip6table_filter_net_ops);
134 ret = ip6t_register_table(&packet_filter, &initial_table.repl);
135 if (ret < 0) 154 if (ret < 0)
136 return ret; 155 return ret;
137 156
@@ -143,14 +162,14 @@ static int __init ip6table_filter_init(void)
143 return ret; 162 return ret;
144 163
145 cleanup_table: 164 cleanup_table:
146 ip6t_unregister_table(&packet_filter); 165 unregister_pernet_subsys(&ip6table_filter_net_ops);
147 return ret; 166 return ret;
148} 167}
149 168
150static void __exit ip6table_filter_fini(void) 169static void __exit ip6table_filter_fini(void)
151{ 170{
152 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 171 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
153 ip6t_unregister_table(&packet_filter); 172 unregister_pernet_subsys(&ip6table_filter_net_ops);
154} 173}
155 174
156module_init(ip6table_filter_init); 175module_init(ip6table_filter_init);
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index d6082600bc5d..035343a90ffe 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -26,7 +26,7 @@ static struct
26 struct ip6t_replace repl; 26 struct ip6t_replace repl;
27 struct ip6t_standard entries[5]; 27 struct ip6t_standard entries[5];
28 struct ip6t_error term; 28 struct ip6t_error term;
29} initial_table __initdata = { 29} initial_table __net_initdata = {
30 .repl = { 30 .repl = {
31 .name = "mangle", 31 .name = "mangle",
32 .valid_hooks = MANGLE_VALID_HOOKS, 32 .valid_hooks = MANGLE_VALID_HOOKS,
@@ -73,7 +73,7 @@ ip6t_route_hook(unsigned int hook,
73 const struct net_device *out, 73 const struct net_device *out,
74 int (*okfn)(struct sk_buff *)) 74 int (*okfn)(struct sk_buff *))
75{ 75{
76 return ip6t_do_table(skb, hook, in, out, &packet_mangler); 76 return ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_mangle);
77} 77}
78 78
79static unsigned int 79static unsigned int
@@ -108,7 +108,7 @@ ip6t_local_hook(unsigned int hook,
108 /* flowlabel and prio (includes version, which shouldn't change either */ 108 /* flowlabel and prio (includes version, which shouldn't change either */
109 flowlabel = *((u_int32_t *)ipv6_hdr(skb)); 109 flowlabel = *((u_int32_t *)ipv6_hdr(skb));
110 110
111 ret = ip6t_do_table(skb, hook, in, out, &packet_mangler); 111 ret = ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_mangle);
112 112
113 if (ret != NF_DROP && ret != NF_STOLEN 113 if (ret != NF_DROP && ret != NF_STOLEN
114 && (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) 114 && (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr))
@@ -158,12 +158,31 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = {
158 }, 158 },
159}; 159};
160 160
161static int __net_init ip6table_mangle_net_init(struct net *net)
162{
163 /* Register table */
164 net->ipv6.ip6table_mangle =
165 ip6t_register_table(net, &packet_mangler, &initial_table.repl);
166 if (IS_ERR(net->ipv6.ip6table_mangle))
167 return PTR_ERR(net->ipv6.ip6table_mangle);
168 return 0;
169}
170
171static void __net_exit ip6table_mangle_net_exit(struct net *net)
172{
173 ip6t_unregister_table(net->ipv6.ip6table_mangle);
174}
175
176static struct pernet_operations ip6table_mangle_net_ops = {
177 .init = ip6table_mangle_net_init,
178 .exit = ip6table_mangle_net_exit,
179};
180
161static int __init ip6table_mangle_init(void) 181static int __init ip6table_mangle_init(void)
162{ 182{
163 int ret; 183 int ret;
164 184
165 /* Register table */ 185 ret = register_pernet_subsys(&ip6table_mangle_net_ops);
166 ret = ip6t_register_table(&packet_mangler, &initial_table.repl);
167 if (ret < 0) 186 if (ret < 0)
168 return ret; 187 return ret;
169 188
@@ -175,14 +194,14 @@ static int __init ip6table_mangle_init(void)
175 return ret; 194 return ret;
176 195
177 cleanup_table: 196 cleanup_table:
178 ip6t_unregister_table(&packet_mangler); 197 unregister_pernet_subsys(&ip6table_mangle_net_ops);
179 return ret; 198 return ret;
180} 199}
181 200
182static void __exit ip6table_mangle_fini(void) 201static void __exit ip6table_mangle_fini(void)
183{ 202{
184 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 203 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
185 ip6t_unregister_table(&packet_mangler); 204 unregister_pernet_subsys(&ip6table_mangle_net_ops);
186} 205}
187 206
188module_init(ip6table_mangle_init); 207module_init(ip6table_mangle_init);
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index eccbaaa104af..5cd84203abfe 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -13,7 +13,7 @@ static struct
13 struct ip6t_replace repl; 13 struct ip6t_replace repl;
14 struct ip6t_standard entries[2]; 14 struct ip6t_standard entries[2];
15 struct ip6t_error term; 15 struct ip6t_error term;
16} initial_table __initdata = { 16} initial_table __net_initdata = {
17 .repl = { 17 .repl = {
18 .name = "raw", 18 .name = "raw",
19 .valid_hooks = RAW_VALID_HOOKS, 19 .valid_hooks = RAW_VALID_HOOKS,
@@ -51,7 +51,7 @@ ip6t_hook(unsigned int hook,
51 const struct net_device *out, 51 const struct net_device *out,
52 int (*okfn)(struct sk_buff *)) 52 int (*okfn)(struct sk_buff *))
53{ 53{
54 return ip6t_do_table(skb, hook, in, out, &packet_raw); 54 return ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_raw);
55} 55}
56 56
57static struct nf_hook_ops ip6t_ops[] __read_mostly = { 57static struct nf_hook_ops ip6t_ops[] __read_mostly = {
@@ -71,12 +71,31 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = {
71 }, 71 },
72}; 72};
73 73
74static int __net_init ip6table_raw_net_init(struct net *net)
75{
76 /* Register table */
77 net->ipv6.ip6table_raw =
78 ip6t_register_table(net, &packet_raw, &initial_table.repl);
79 if (IS_ERR(net->ipv6.ip6table_raw))
80 return PTR_ERR(net->ipv6.ip6table_raw);
81 return 0;
82}
83
84static void __net_exit ip6table_raw_net_exit(struct net *net)
85{
86 ip6t_unregister_table(net->ipv6.ip6table_raw);
87}
88
89static struct pernet_operations ip6table_raw_net_ops = {
90 .init = ip6table_raw_net_init,
91 .exit = ip6table_raw_net_exit,
92};
93
74static int __init ip6table_raw_init(void) 94static int __init ip6table_raw_init(void)
75{ 95{
76 int ret; 96 int ret;
77 97
78 /* Register table */ 98 ret = register_pernet_subsys(&ip6table_raw_net_ops);
79 ret = ip6t_register_table(&packet_raw, &initial_table.repl);
80 if (ret < 0) 99 if (ret < 0)
81 return ret; 100 return ret;
82 101
@@ -88,14 +107,14 @@ static int __init ip6table_raw_init(void)
88 return ret; 107 return ret;
89 108
90 cleanup_table: 109 cleanup_table:
91 ip6t_unregister_table(&packet_raw); 110 unregister_pernet_subsys(&ip6table_raw_net_ops);
92 return ret; 111 return ret;
93} 112}
94 113
95static void __exit ip6table_raw_fini(void) 114static void __exit ip6table_raw_fini(void)
96{ 115{
97 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 116 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
98 ip6t_unregister_table(&packet_raw); 117 unregister_pernet_subsys(&ip6table_raw_net_ops);
99} 118}
100 119
101module_init(ip6table_raw_init); 120module_init(ip6table_raw_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 2d7b0246475d..3717bdf34f6e 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -30,7 +30,8 @@
30static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 30static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
31 struct nf_conntrack_tuple *tuple) 31 struct nf_conntrack_tuple *tuple)
32{ 32{
33 u_int32_t _addrs[8], *ap; 33 const u_int32_t *ap;
34 u_int32_t _addrs[8];
34 35
35 ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr), 36 ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr),
36 sizeof(_addrs), _addrs); 37 sizeof(_addrs), _addrs);
@@ -146,8 +147,8 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
146 int (*okfn)(struct sk_buff *)) 147 int (*okfn)(struct sk_buff *))
147{ 148{
148 struct nf_conn *ct; 149 struct nf_conn *ct;
149 struct nf_conn_help *help; 150 const struct nf_conn_help *help;
150 struct nf_conntrack_helper *helper; 151 const struct nf_conntrack_helper *helper;
151 enum ip_conntrack_info ctinfo; 152 enum ip_conntrack_info ctinfo;
152 unsigned int ret, protoff; 153 unsigned int ret, protoff;
153 unsigned int extoff = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; 154 unsigned int extoff = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index da924c6b5f06..0897d0f4c4a2 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -32,7 +32,8 @@ static int icmpv6_pkt_to_tuple(const struct sk_buff *skb,
32 unsigned int dataoff, 32 unsigned int dataoff,
33 struct nf_conntrack_tuple *tuple) 33 struct nf_conntrack_tuple *tuple)
34{ 34{
35 struct icmp6hdr _hdr, *hp; 35 const struct icmp6hdr *hp;
36 struct icmp6hdr _hdr;
36 37
37 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 38 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
38 if (hp == NULL) 39 if (hp == NULL)
@@ -45,7 +46,7 @@ static int icmpv6_pkt_to_tuple(const struct sk_buff *skb,
45} 46}
46 47
47/* Add 1; spaces filled with 0. */ 48/* Add 1; spaces filled with 0. */
48static u_int8_t invmap[] = { 49static const u_int8_t invmap[] = {
49 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1, 50 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
50 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1, 51 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
51 [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1, 52 [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1,
@@ -101,24 +102,24 @@ static int icmpv6_packet(struct nf_conn *ct,
101} 102}
102 103
103/* Called when a new connection for this protocol found. */ 104/* Called when a new connection for this protocol found. */
104static int icmpv6_new(struct nf_conn *conntrack, 105static int icmpv6_new(struct nf_conn *ct,
105 const struct sk_buff *skb, 106 const struct sk_buff *skb,
106 unsigned int dataoff) 107 unsigned int dataoff)
107{ 108{
108 static u_int8_t valid_new[] = { 109 static const u_int8_t valid_new[] = {
109 [ICMPV6_ECHO_REQUEST - 128] = 1, 110 [ICMPV6_ECHO_REQUEST - 128] = 1,
110 [ICMPV6_NI_QUERY - 128] = 1 111 [ICMPV6_NI_QUERY - 128] = 1
111 }; 112 };
112 int type = conntrack->tuplehash[0].tuple.dst.u.icmp.type - 128; 113 int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128;
113 114
114 if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) { 115 if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
115 /* Can't create a new ICMPv6 `conn' with this. */ 116 /* Can't create a new ICMPv6 `conn' with this. */
116 pr_debug("icmpv6: can't create new conn with type %u\n", 117 pr_debug("icmpv6: can't create new conn with type %u\n",
117 type + 128); 118 type + 128);
118 NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple); 119 NF_CT_DUMP_TUPLE(&ct->tuplehash[0].tuple);
119 return 0; 120 return 0;
120 } 121 }
121 atomic_set(&conntrack->proto.icmp.count, 0); 122 atomic_set(&ct->proto.icmp.count, 0);
122 return 1; 123 return 1;
123} 124}
124 125
@@ -129,8 +130,8 @@ icmpv6_error_message(struct sk_buff *skb,
129 unsigned int hooknum) 130 unsigned int hooknum)
130{ 131{
131 struct nf_conntrack_tuple intuple, origtuple; 132 struct nf_conntrack_tuple intuple, origtuple;
132 struct nf_conntrack_tuple_hash *h; 133 const struct nf_conntrack_tuple_hash *h;
133 struct nf_conntrack_l4proto *inproto; 134 const struct nf_conntrack_l4proto *inproto;
134 135
135 NF_CT_ASSERT(skb->nfct == NULL); 136 NF_CT_ASSERT(skb->nfct == NULL);
136 137
@@ -176,7 +177,8 @@ static int
176icmpv6_error(struct sk_buff *skb, unsigned int dataoff, 177icmpv6_error(struct sk_buff *skb, unsigned int dataoff,
177 enum ip_conntrack_info *ctinfo, int pf, unsigned int hooknum) 178 enum ip_conntrack_info *ctinfo, int pf, unsigned int hooknum)
178{ 179{
179 struct icmp6hdr _ih, *icmp6h; 180 const struct icmp6hdr *icmp6h;
181 struct icmp6hdr _ih;
180 182
181 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); 183 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
182 if (icmp6h == NULL) { 184 if (icmp6h == NULL) {
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 022da6ce4c0f..2a0d698b24d5 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -39,6 +39,7 @@
39#include <net/rawv6.h> 39#include <net/rawv6.h>
40#include <net/ndisc.h> 40#include <net/ndisc.h>
41#include <net/addrconf.h> 41#include <net/addrconf.h>
42#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
42#include <linux/sysctl.h> 43#include <linux/sysctl.h>
43#include <linux/netfilter.h> 44#include <linux/netfilter.h>
44#include <linux/netfilter_ipv6.h> 45#include <linux/netfilter_ipv6.h>
@@ -680,21 +681,6 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
680 nf_conntrack_put_reasm(skb); 681 nf_conntrack_put_reasm(skb);
681} 682}
682 683
683int nf_ct_frag6_kfree_frags(struct sk_buff *skb)
684{
685 struct sk_buff *s, *s2;
686
687 for (s = NFCT_FRAG6_CB(skb)->orig; s; s = s2) {
688
689 s2 = s->next;
690 kfree_skb(s);
691 }
692
693 kfree_skb(skb);
694
695 return 0;
696}
697
698int nf_ct_frag6_init(void) 684int nf_ct_frag6_init(void)
699{ 685{
700 nf_frags.hashfn = nf_hashfn; 686 nf_frags.hashfn = nf_hashfn;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4d880551fe6a..8897ccf8086a 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -641,6 +641,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
641 skb_reserve(skb, hh_len); 641 skb_reserve(skb, hh_len);
642 642
643 skb->priority = sk->sk_priority; 643 skb->priority = sk->sk_priority;
644 skb->mark = sk->sk_mark;
644 skb->dst = dst_clone(&rt->u.dst); 645 skb->dst = dst_clone(&rt->u.dst);
645 646
646 skb_put(skb, length); 647 skb_put(skb, length);
@@ -767,6 +768,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
767 */ 768 */
768 memset(&fl, 0, sizeof(fl)); 769 memset(&fl, 0, sizeof(fl));
769 770
771 fl.mark = sk->sk_mark;
772
770 if (sin6) { 773 if (sin6) {
771 if (addr_len < SIN6_LEN_RFC2133) 774 if (addr_len < SIN6_LEN_RFC2133)
772 return -EINVAL; 775 return -EINVAL;
@@ -1259,7 +1262,7 @@ static const struct seq_operations raw6_seq_ops = {
1259 1262
1260static int raw6_seq_open(struct inode *inode, struct file *file) 1263static int raw6_seq_open(struct inode *inode, struct file *file)
1261{ 1264{
1262 return raw_seq_open(inode, file, &raw_v6_hashinfo, PF_INET6); 1265 return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops);
1263} 1266}
1264 1267
1265static const struct file_operations raw6_seq_fops = { 1268static const struct file_operations raw6_seq_fops = {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4004c5f0b8d7..513f72e3db0d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -107,6 +107,7 @@ static struct dst_ops ip6_dst_ops = {
107 .update_pmtu = ip6_rt_update_pmtu, 107 .update_pmtu = ip6_rt_update_pmtu,
108 .local_out = ip6_local_out, 108 .local_out = ip6_local_out,
109 .entry_size = sizeof(struct rt6_info), 109 .entry_size = sizeof(struct rt6_info),
110 .entries = ATOMIC_INIT(0),
110}; 111};
111 112
112static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 113static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -120,6 +121,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
120 .check = ip6_dst_check, 121 .check = ip6_dst_check,
121 .update_pmtu = ip6_rt_blackhole_update_pmtu, 122 .update_pmtu = ip6_rt_blackhole_update_pmtu,
122 .entry_size = sizeof(struct rt6_info), 123 .entry_size = sizeof(struct rt6_info),
124 .entries = ATOMIC_INIT(0),
123}; 125};
124 126
125struct rt6_info ip6_null_entry = { 127struct rt6_info ip6_null_entry = {
@@ -1907,7 +1909,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1907 */ 1909 */
1908 if (rt->rt6i_dev == arg->dev && 1910 if (rt->rt6i_dev == arg->dev &&
1909 !dst_metric_locked(&rt->u.dst, RTAX_MTU) && 1911 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1910 (dst_mtu(&rt->u.dst) > arg->mtu || 1912 (dst_mtu(&rt->u.dst) >= arg->mtu ||
1911 (dst_mtu(&rt->u.dst) < arg->mtu && 1913 (dst_mtu(&rt->u.dst) < arg->mtu &&
1912 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) { 1914 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
1913 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; 1915 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
@@ -1960,6 +1962,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1960 1962
1961 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid; 1963 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
1962 cfg->fc_nlinfo.nlh = nlh; 1964 cfg->fc_nlinfo.nlh = nlh;
1965 cfg->fc_nlinfo.nl_net = skb->sk->sk_net;
1963 1966
1964 if (tb[RTA_GATEWAY]) { 1967 if (tb[RTA_GATEWAY]) {
1965 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16); 1968 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 00c08399837d..59d0029e93a7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -330,8 +330,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
330 struct tcp_sock *tp; 330 struct tcp_sock *tp;
331 __u32 seq; 331 __u32 seq;
332 332
333 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr, 333 sk = inet6_lookup(skb->dev->nd_net, &tcp_hashinfo, &hdr->daddr,
334 th->source, skb->dev->ifindex); 334 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
335 335
336 if (sk == NULL) { 336 if (sk == NULL) {
337 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); 337 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
@@ -1208,9 +1208,9 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1208 if (req) 1208 if (req)
1209 return tcp_check_req(sk, skb, req, prev); 1209 return tcp_check_req(sk, skb, req, prev);
1210 1210
1211 nsk = __inet6_lookup_established(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, 1211 nsk = __inet6_lookup_established(sk->sk_net, &tcp_hashinfo,
1212 th->source, &ipv6_hdr(skb)->daddr, 1212 &ipv6_hdr(skb)->saddr, th->source,
1213 ntohs(th->dest), inet6_iif(skb)); 1213 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1214 1214
1215 if (nsk) { 1215 if (nsk) {
1216 if (nsk->sk_state != TCP_TIME_WAIT) { 1216 if (nsk->sk_state != TCP_TIME_WAIT) {
@@ -1710,9 +1710,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1710 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); 1710 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1711 TCP_SKB_CB(skb)->sacked = 0; 1711 TCP_SKB_CB(skb)->sacked = 0;
1712 1712
1713 sk = __inet6_lookup(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source, 1713 sk = __inet6_lookup(skb->dev->nd_net, &tcp_hashinfo,
1714 &ipv6_hdr(skb)->daddr, ntohs(th->dest), 1714 &ipv6_hdr(skb)->saddr, th->source,
1715 inet6_iif(skb)); 1715 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1716 inet6_iif(skb));
1716 1717
1717 if (!sk) 1718 if (!sk)
1718 goto no_tcp_socket; 1719 goto no_tcp_socket;
@@ -1792,7 +1793,7 @@ do_time_wait:
1792 { 1793 {
1793 struct sock *sk2; 1794 struct sock *sk2;
1794 1795
1795 sk2 = inet6_lookup_listener(&tcp_hashinfo, 1796 sk2 = inet6_lookup_listener(skb->dev->nd_net, &tcp_hashinfo,
1796 &ipv6_hdr(skb)->daddr, 1797 &ipv6_hdr(skb)->daddr,
1797 ntohs(th->dest), inet6_iif(skb)); 1798 ntohs(th->dest), inet6_iif(skb));
1798 if (sk2 != NULL) { 1799 if (sk2 != NULL) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bd4b9df8f614..53739de829db 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -56,7 +56,8 @@ static inline int udp_v6_get_port(struct sock *sk, unsigned short snum)
56 return udp_get_port(sk, snum, ipv6_rcv_saddr_equal); 56 return udp_get_port(sk, snum, ipv6_rcv_saddr_equal);
57} 57}
58 58
59static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport, 59static struct sock *__udp6_lib_lookup(struct net *net,
60 struct in6_addr *saddr, __be16 sport,
60 struct in6_addr *daddr, __be16 dport, 61 struct in6_addr *daddr, __be16 dport,
61 int dif, struct hlist_head udptable[]) 62 int dif, struct hlist_head udptable[])
62{ 63{
@@ -69,7 +70,8 @@ static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
69 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 70 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
70 struct inet_sock *inet = inet_sk(sk); 71 struct inet_sock *inet = inet_sk(sk);
71 72
72 if (sk->sk_hash == hnum && sk->sk_family == PF_INET6) { 73 if (sk->sk_net == net && sk->sk_hash == hnum &&
74 sk->sk_family == PF_INET6) {
73 struct ipv6_pinfo *np = inet6_sk(sk); 75 struct ipv6_pinfo *np = inet6_sk(sk);
74 int score = 0; 76 int score = 0;
75 if (inet->dport) { 77 if (inet->dport) {
@@ -233,7 +235,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
233 struct sock *sk; 235 struct sock *sk;
234 int err; 236 int err;
235 237
236 sk = __udp6_lib_lookup(daddr, uh->dest, 238 sk = __udp6_lib_lookup(skb->dev->nd_net, daddr, uh->dest,
237 saddr, uh->source, inet6_iif(skb), udptable); 239 saddr, uh->source, inet6_iif(skb), udptable);
238 if (sk == NULL) 240 if (sk == NULL)
239 return; 241 return;
@@ -478,7 +480,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
478 * check socket cache ... must talk to Alan about his plans 480 * check socket cache ... must talk to Alan about his plans
479 * for sock caches... i'll skip this for now. 481 * for sock caches... i'll skip this for now.
480 */ 482 */
481 sk = __udp6_lib_lookup(saddr, uh->source, 483 sk = __udp6_lib_lookup(skb->dev->nd_net, saddr, uh->source,
482 daddr, uh->dest, inet6_iif(skb), udptable); 484 daddr, uh->dest, inet6_iif(skb), udptable);
483 485
484 if (sk == NULL) { 486 if (sk == NULL) {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index c25a6b527fc4..7d20199ee1f3 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -272,6 +272,7 @@ static struct dst_ops xfrm6_dst_ops = {
272 .local_out = __ip6_local_out, 272 .local_out = __ip6_local_out,
273 .gc_thresh = 1024, 273 .gc_thresh = 1024,
274 .entry_size = sizeof(struct xfrm_dst), 274 .entry_size = sizeof(struct xfrm_dst),
275 .entries = ATOMIC_INIT(0),
275}; 276};
276 277
277static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { 278static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index fae90ff31087..639fe8a6ff1e 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -319,7 +319,7 @@ static void xfrm6_tunnel_destroy(struct xfrm_state *x)
319 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 319 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
320} 320}
321 321
322static struct xfrm_type xfrm6_tunnel_type = { 322static const struct xfrm_type xfrm6_tunnel_type = {
323 .description = "IP6IP6", 323 .description = "IP6IP6",
324 .owner = THIS_MODULE, 324 .owner = THIS_MODULE,
325 .proto = IPPROTO_IPV6, 325 .proto = IPPROTO_IPV6,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 89e1e3070ec1..d44c87269bcb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -340,9 +340,42 @@ static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
340 return load; 340 return load;
341} 341}
342 342
343static ieee80211_txrx_result
344ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx)
345{
346 int hdrlen;
347
348 /*
349 * Drivers are required to align the payload data in a way that
350 * guarantees that the contained IP header is aligned to a four-
351 * byte boundary. In the case of regular frames, this simply means
352 * aligning the payload to a four-byte boundary (because either
353 * the IP header is directly contained, or IV/RFC1042 headers that
354 * have a length divisible by four are in front of it.
355 *
356 * With A-MSDU frames, however, the payload data address must
357 * yield two modulo four because there are 14-byte 802.3 headers
358 * within the A-MSDU frames that push the IP header further back
359 * to a multiple of four again. Thankfully, the specs were sane
360 * enough this time around to require padding each A-MSDU subframe
361 * to a length that is a multiple of four.
362 *
363 * Padding like atheros hardware adds which is inbetween the 802.11
364 * header and the payload is not supported, the driver is required
365 * to move the 802.11 header further back in that case.
366 */
367 hdrlen = ieee80211_get_hdrlen(rx->fc);
368 if (rx->flags & IEEE80211_TXRXD_RX_AMSDU)
369 hdrlen += ETH_HLEN;
370 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3);
371
372 return TXRX_CONTINUE;
373}
374
343ieee80211_rx_handler ieee80211_rx_pre_handlers[] = 375ieee80211_rx_handler ieee80211_rx_pre_handlers[] =
344{ 376{
345 ieee80211_rx_h_parse_qos, 377 ieee80211_rx_h_parse_qos,
378 ieee80211_rx_h_verify_ip_alignment,
346 NULL 379 NULL
347}; 380};
348 381
@@ -1679,7 +1712,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1679 struct ieee80211_sub_if_data *prev = NULL; 1712 struct ieee80211_sub_if_data *prev = NULL;
1680 struct sk_buff *skb_new; 1713 struct sk_buff *skb_new;
1681 u8 *bssid; 1714 u8 *bssid;
1682 int hdrlen;
1683 1715
1684 hdr = (struct ieee80211_hdr *) skb->data; 1716 hdr = (struct ieee80211_hdr *) skb->data;
1685 memset(&rx, 0, sizeof(rx)); 1717 memset(&rx, 0, sizeof(rx));
@@ -1691,18 +1723,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1691 rx.fc = le16_to_cpu(hdr->frame_control); 1723 rx.fc = le16_to_cpu(hdr->frame_control);
1692 type = rx.fc & IEEE80211_FCTL_FTYPE; 1724 type = rx.fc & IEEE80211_FCTL_FTYPE;
1693 1725
1694 /*
1695 * Drivers are required to align the payload data to a four-byte
1696 * boundary, so the last two bits of the address where it starts
1697 * may not be set. The header is required to be directly before
1698 * the payload data, padding like atheros hardware adds which is
1699 * inbetween the 802.11 header and the payload is not supported,
1700 * the driver is required to move the 802.11 header further back
1701 * in that case.
1702 */
1703 hdrlen = ieee80211_get_hdrlen(rx.fc);
1704 WARN_ON_ONCE(((unsigned long)(skb->data + hdrlen)) & 3);
1705
1706 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) 1726 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT)
1707 local->dot11ReceivedFragmentCount++; 1727 local->dot11ReceivedFragmentCount++;
1708 1728
@@ -1952,7 +1972,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
1952 goto end_reorder; 1972 goto end_reorder;
1953 1973
1954 /* null data frames are excluded */ 1974 /* null data frames are excluded */
1955 if (unlikely(fc & IEEE80211_STYPE_QOS_NULLFUNC)) 1975 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC))
1956 goto end_reorder; 1976 goto end_reorder;
1957 1977
1958 /* new un-ordered ampdu frame - process it */ 1978 /* new un-ordered ampdu frame - process it */
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 078fff0335ad..327e847d2702 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -40,7 +40,7 @@
40 40
41#define NF_CONNTRACK_VERSION "0.5.0" 41#define NF_CONNTRACK_VERSION "0.5.0"
42 42
43DEFINE_RWLOCK(nf_conntrack_lock); 43DEFINE_SPINLOCK(nf_conntrack_lock);
44EXPORT_SYMBOL_GPL(nf_conntrack_lock); 44EXPORT_SYMBOL_GPL(nf_conntrack_lock);
45 45
46/* nf_conntrack_standalone needs this */ 46/* nf_conntrack_standalone needs this */
@@ -73,15 +73,19 @@ static unsigned int nf_conntrack_hash_rnd;
73static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 73static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
74 unsigned int size, unsigned int rnd) 74 unsigned int size, unsigned int rnd)
75{ 75{
76 unsigned int a, b; 76 unsigned int n;
77 77 u_int32_t h;
78 a = jhash2(tuple->src.u3.all, ARRAY_SIZE(tuple->src.u3.all), 78
79 (tuple->src.l3num << 16) | tuple->dst.protonum); 79 /* The direction must be ignored, so we hash everything up to the
80 b = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), 80 * destination ports (which is a multiple of 4) and treat the last
81 ((__force __u16)tuple->src.u.all << 16) | 81 * three bytes manually.
82 (__force __u16)tuple->dst.u.all); 82 */
83 83 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
84 return ((u64)jhash_2words(a, b, rnd) * size) >> 32; 84 h = jhash2((u32 *)tuple, n,
85 rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
86 tuple->dst.protonum));
87
88 return ((u64)h * size) >> 32;
85} 89}
86 90
87static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 91static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
@@ -166,8 +170,8 @@ static void
166clean_from_lists(struct nf_conn *ct) 170clean_from_lists(struct nf_conn *ct)
167{ 171{
168 pr_debug("clean_from_lists(%p)\n", ct); 172 pr_debug("clean_from_lists(%p)\n", ct);
169 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 173 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
170 hlist_del(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); 174 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
171 175
172 /* Destroy all pending expectations */ 176 /* Destroy all pending expectations */
173 nf_ct_remove_expectations(ct); 177 nf_ct_remove_expectations(ct);
@@ -199,7 +203,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
199 203
200 rcu_read_unlock(); 204 rcu_read_unlock();
201 205
202 write_lock_bh(&nf_conntrack_lock); 206 spin_lock_bh(&nf_conntrack_lock);
203 /* Expectations will have been removed in clean_from_lists, 207 /* Expectations will have been removed in clean_from_lists,
204 * except TFTP can create an expectation on the first packet, 208 * except TFTP can create an expectation on the first packet,
205 * before connection is in the list, so we need to clean here, 209 * before connection is in the list, so we need to clean here,
@@ -213,7 +217,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
213 } 217 }
214 218
215 NF_CT_STAT_INC(delete); 219 NF_CT_STAT_INC(delete);
216 write_unlock_bh(&nf_conntrack_lock); 220 spin_unlock_bh(&nf_conntrack_lock);
217 221
218 if (ct->master) 222 if (ct->master)
219 nf_ct_put(ct->master); 223 nf_ct_put(ct->master);
@@ -236,26 +240,24 @@ static void death_by_timeout(unsigned long ul_conntrack)
236 rcu_read_unlock(); 240 rcu_read_unlock();
237 } 241 }
238 242
239 write_lock_bh(&nf_conntrack_lock); 243 spin_lock_bh(&nf_conntrack_lock);
240 /* Inside lock so preempt is disabled on module removal path. 244 /* Inside lock so preempt is disabled on module removal path.
241 * Otherwise we can get spurious warnings. */ 245 * Otherwise we can get spurious warnings. */
242 NF_CT_STAT_INC(delete_list); 246 NF_CT_STAT_INC(delete_list);
243 clean_from_lists(ct); 247 clean_from_lists(ct);
244 write_unlock_bh(&nf_conntrack_lock); 248 spin_unlock_bh(&nf_conntrack_lock);
245 nf_ct_put(ct); 249 nf_ct_put(ct);
246} 250}
247 251
248struct nf_conntrack_tuple_hash * 252struct nf_conntrack_tuple_hash *
249__nf_conntrack_find(const struct nf_conntrack_tuple *tuple, 253__nf_conntrack_find(const struct nf_conntrack_tuple *tuple)
250 const struct nf_conn *ignored_conntrack)
251{ 254{
252 struct nf_conntrack_tuple_hash *h; 255 struct nf_conntrack_tuple_hash *h;
253 struct hlist_node *n; 256 struct hlist_node *n;
254 unsigned int hash = hash_conntrack(tuple); 257 unsigned int hash = hash_conntrack(tuple);
255 258
256 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) { 259 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
257 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 260 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
258 nf_ct_tuple_equal(tuple, &h->tuple)) {
259 NF_CT_STAT_INC(found); 261 NF_CT_STAT_INC(found);
260 return h; 262 return h;
261 } 263 }
@@ -271,12 +273,16 @@ struct nf_conntrack_tuple_hash *
271nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple) 273nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple)
272{ 274{
273 struct nf_conntrack_tuple_hash *h; 275 struct nf_conntrack_tuple_hash *h;
276 struct nf_conn *ct;
274 277
275 read_lock_bh(&nf_conntrack_lock); 278 rcu_read_lock();
276 h = __nf_conntrack_find(tuple, NULL); 279 h = __nf_conntrack_find(tuple);
277 if (h) 280 if (h) {
278 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use); 281 ct = nf_ct_tuplehash_to_ctrack(h);
279 read_unlock_bh(&nf_conntrack_lock); 282 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
283 h = NULL;
284 }
285 rcu_read_unlock();
280 286
281 return h; 287 return h;
282} 288}
@@ -286,10 +292,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
286 unsigned int hash, 292 unsigned int hash,
287 unsigned int repl_hash) 293 unsigned int repl_hash)
288{ 294{
289 hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 295 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
290 &nf_conntrack_hash[hash]); 296 &nf_conntrack_hash[hash]);
291 hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, 297 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
292 &nf_conntrack_hash[repl_hash]); 298 &nf_conntrack_hash[repl_hash]);
293} 299}
294 300
295void nf_conntrack_hash_insert(struct nf_conn *ct) 301void nf_conntrack_hash_insert(struct nf_conn *ct)
@@ -299,9 +305,9 @@ void nf_conntrack_hash_insert(struct nf_conn *ct)
299 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 305 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
300 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 306 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
301 307
302 write_lock_bh(&nf_conntrack_lock); 308 spin_lock_bh(&nf_conntrack_lock);
303 __nf_conntrack_hash_insert(ct, hash, repl_hash); 309 __nf_conntrack_hash_insert(ct, hash, repl_hash);
304 write_unlock_bh(&nf_conntrack_lock); 310 spin_unlock_bh(&nf_conntrack_lock);
305} 311}
306EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 312EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
307 313
@@ -338,7 +344,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
338 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 344 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
339 pr_debug("Confirming conntrack %p\n", ct); 345 pr_debug("Confirming conntrack %p\n", ct);
340 346
341 write_lock_bh(&nf_conntrack_lock); 347 spin_lock_bh(&nf_conntrack_lock);
342 348
343 /* See if there's one in the list already, including reverse: 349 /* See if there's one in the list already, including reverse:
344 NAT could have grabbed it without realizing, since we're 350 NAT could have grabbed it without realizing, since we're
@@ -364,7 +370,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
364 atomic_inc(&ct->ct_general.use); 370 atomic_inc(&ct->ct_general.use);
365 set_bit(IPS_CONFIRMED_BIT, &ct->status); 371 set_bit(IPS_CONFIRMED_BIT, &ct->status);
366 NF_CT_STAT_INC(insert); 372 NF_CT_STAT_INC(insert);
367 write_unlock_bh(&nf_conntrack_lock); 373 spin_unlock_bh(&nf_conntrack_lock);
368 help = nfct_help(ct); 374 help = nfct_help(ct);
369 if (help && help->helper) 375 if (help && help->helper)
370 nf_conntrack_event_cache(IPCT_HELPER, skb); 376 nf_conntrack_event_cache(IPCT_HELPER, skb);
@@ -379,7 +385,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
379 385
380out: 386out:
381 NF_CT_STAT_INC(insert_failed); 387 NF_CT_STAT_INC(insert_failed);
382 write_unlock_bh(&nf_conntrack_lock); 388 spin_unlock_bh(&nf_conntrack_lock);
383 return NF_DROP; 389 return NF_DROP;
384} 390}
385EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); 391EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
@@ -391,12 +397,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
391 const struct nf_conn *ignored_conntrack) 397 const struct nf_conn *ignored_conntrack)
392{ 398{
393 struct nf_conntrack_tuple_hash *h; 399 struct nf_conntrack_tuple_hash *h;
400 struct hlist_node *n;
401 unsigned int hash = hash_conntrack(tuple);
394 402
395 read_lock_bh(&nf_conntrack_lock); 403 rcu_read_lock();
396 h = __nf_conntrack_find(tuple, ignored_conntrack); 404 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
397 read_unlock_bh(&nf_conntrack_lock); 405 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
406 nf_ct_tuple_equal(tuple, &h->tuple)) {
407 NF_CT_STAT_INC(found);
408 rcu_read_unlock();
409 return 1;
410 }
411 NF_CT_STAT_INC(searched);
412 }
413 rcu_read_unlock();
398 414
399 return h != NULL; 415 return 0;
400} 416}
401EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); 417EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
402 418
@@ -404,7 +420,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
404 420
405/* There's a small race here where we may free a just-assured 421/* There's a small race here where we may free a just-assured
406 connection. Too bad: we're in trouble anyway. */ 422 connection. Too bad: we're in trouble anyway. */
407static int early_drop(unsigned int hash) 423static noinline int early_drop(unsigned int hash)
408{ 424{
409 /* Use oldest entry, which is roughly LRU */ 425 /* Use oldest entry, which is roughly LRU */
410 struct nf_conntrack_tuple_hash *h; 426 struct nf_conntrack_tuple_hash *h;
@@ -413,21 +429,23 @@ static int early_drop(unsigned int hash)
413 unsigned int i, cnt = 0; 429 unsigned int i, cnt = 0;
414 int dropped = 0; 430 int dropped = 0;
415 431
416 read_lock_bh(&nf_conntrack_lock); 432 rcu_read_lock();
417 for (i = 0; i < nf_conntrack_htable_size; i++) { 433 for (i = 0; i < nf_conntrack_htable_size; i++) {
418 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) { 434 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
435 hnode) {
419 tmp = nf_ct_tuplehash_to_ctrack(h); 436 tmp = nf_ct_tuplehash_to_ctrack(h);
420 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 437 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
421 ct = tmp; 438 ct = tmp;
422 cnt++; 439 cnt++;
423 } 440 }
441
442 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
443 ct = NULL;
424 if (ct || cnt >= NF_CT_EVICTION_RANGE) 444 if (ct || cnt >= NF_CT_EVICTION_RANGE)
425 break; 445 break;
426 hash = (hash + 1) % nf_conntrack_htable_size; 446 hash = (hash + 1) % nf_conntrack_htable_size;
427 } 447 }
428 if (ct) 448 rcu_read_unlock();
429 atomic_inc(&ct->ct_general.use);
430 read_unlock_bh(&nf_conntrack_lock);
431 449
432 if (!ct) 450 if (!ct)
433 return dropped; 451 return dropped;
@@ -444,7 +462,7 @@ static int early_drop(unsigned int hash)
444struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, 462struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
445 const struct nf_conntrack_tuple *repl) 463 const struct nf_conntrack_tuple *repl)
446{ 464{
447 struct nf_conn *conntrack = NULL; 465 struct nf_conn *ct = NULL;
448 466
449 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 467 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
450 get_random_bytes(&nf_conntrack_hash_rnd, 4); 468 get_random_bytes(&nf_conntrack_hash_rnd, 4);
@@ -454,8 +472,8 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
454 /* We don't want any race condition at early drop stage */ 472 /* We don't want any race condition at early drop stage */
455 atomic_inc(&nf_conntrack_count); 473 atomic_inc(&nf_conntrack_count);
456 474
457 if (nf_conntrack_max 475 if (nf_conntrack_max &&
458 && atomic_read(&nf_conntrack_count) > nf_conntrack_max) { 476 unlikely(atomic_read(&nf_conntrack_count) > nf_conntrack_max)) {
459 unsigned int hash = hash_conntrack(orig); 477 unsigned int hash = hash_conntrack(orig);
460 if (!early_drop(hash)) { 478 if (!early_drop(hash)) {
461 atomic_dec(&nf_conntrack_count); 479 atomic_dec(&nf_conntrack_count);
@@ -467,30 +485,37 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
467 } 485 }
468 } 486 }
469 487
470 conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC); 488 ct = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC);
471 if (conntrack == NULL) { 489 if (ct == NULL) {
472 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 490 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
473 atomic_dec(&nf_conntrack_count); 491 atomic_dec(&nf_conntrack_count);
474 return ERR_PTR(-ENOMEM); 492 return ERR_PTR(-ENOMEM);
475 } 493 }
476 494
477 atomic_set(&conntrack->ct_general.use, 1); 495 atomic_set(&ct->ct_general.use, 1);
478 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; 496 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
479 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; 497 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
480 /* Don't set timer yet: wait for confirmation */ 498 /* Don't set timer yet: wait for confirmation */
481 setup_timer(&conntrack->timeout, death_by_timeout, 499 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
482 (unsigned long)conntrack); 500 INIT_RCU_HEAD(&ct->rcu);
483 501
484 return conntrack; 502 return ct;
485} 503}
486EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 504EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
487 505
488void nf_conntrack_free(struct nf_conn *conntrack) 506static void nf_conntrack_free_rcu(struct rcu_head *head)
489{ 507{
490 nf_ct_ext_free(conntrack); 508 struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
491 kmem_cache_free(nf_conntrack_cachep, conntrack); 509
510 nf_ct_ext_free(ct);
511 kmem_cache_free(nf_conntrack_cachep, ct);
492 atomic_dec(&nf_conntrack_count); 512 atomic_dec(&nf_conntrack_count);
493} 513}
514
515void nf_conntrack_free(struct nf_conn *ct)
516{
517 call_rcu(&ct->rcu, nf_conntrack_free_rcu);
518}
494EXPORT_SYMBOL_GPL(nf_conntrack_free); 519EXPORT_SYMBOL_GPL(nf_conntrack_free);
495 520
496/* Allocate a new conntrack: we return -ENOMEM if classification 521/* Allocate a new conntrack: we return -ENOMEM if classification
@@ -502,7 +527,7 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
502 struct sk_buff *skb, 527 struct sk_buff *skb,
503 unsigned int dataoff) 528 unsigned int dataoff)
504{ 529{
505 struct nf_conn *conntrack; 530 struct nf_conn *ct;
506 struct nf_conn_help *help; 531 struct nf_conn_help *help;
507 struct nf_conntrack_tuple repl_tuple; 532 struct nf_conntrack_tuple repl_tuple;
508 struct nf_conntrack_expect *exp; 533 struct nf_conntrack_expect *exp;
@@ -512,46 +537,46 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
512 return NULL; 537 return NULL;
513 } 538 }
514 539
515 conntrack = nf_conntrack_alloc(tuple, &repl_tuple); 540 ct = nf_conntrack_alloc(tuple, &repl_tuple);
516 if (conntrack == NULL || IS_ERR(conntrack)) { 541 if (ct == NULL || IS_ERR(ct)) {
517 pr_debug("Can't allocate conntrack.\n"); 542 pr_debug("Can't allocate conntrack.\n");
518 return (struct nf_conntrack_tuple_hash *)conntrack; 543 return (struct nf_conntrack_tuple_hash *)ct;
519 } 544 }
520 545
521 if (!l4proto->new(conntrack, skb, dataoff)) { 546 if (!l4proto->new(ct, skb, dataoff)) {
522 nf_conntrack_free(conntrack); 547 nf_conntrack_free(ct);
523 pr_debug("init conntrack: can't track with proto module\n"); 548 pr_debug("init conntrack: can't track with proto module\n");
524 return NULL; 549 return NULL;
525 } 550 }
526 551
527 write_lock_bh(&nf_conntrack_lock); 552 spin_lock_bh(&nf_conntrack_lock);
528 exp = nf_ct_find_expectation(tuple); 553 exp = nf_ct_find_expectation(tuple);
529 if (exp) { 554 if (exp) {
530 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 555 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
531 conntrack, exp); 556 ct, exp);
532 /* Welcome, Mr. Bond. We've been expecting you... */ 557 /* Welcome, Mr. Bond. We've been expecting you... */
533 __set_bit(IPS_EXPECTED_BIT, &conntrack->status); 558 __set_bit(IPS_EXPECTED_BIT, &ct->status);
534 conntrack->master = exp->master; 559 ct->master = exp->master;
535 if (exp->helper) { 560 if (exp->helper) {
536 help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC); 561 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
537 if (help) 562 if (help)
538 rcu_assign_pointer(help->helper, exp->helper); 563 rcu_assign_pointer(help->helper, exp->helper);
539 } 564 }
540 565
541#ifdef CONFIG_NF_CONNTRACK_MARK 566#ifdef CONFIG_NF_CONNTRACK_MARK
542 conntrack->mark = exp->master->mark; 567 ct->mark = exp->master->mark;
543#endif 568#endif
544#ifdef CONFIG_NF_CONNTRACK_SECMARK 569#ifdef CONFIG_NF_CONNTRACK_SECMARK
545 conntrack->secmark = exp->master->secmark; 570 ct->secmark = exp->master->secmark;
546#endif 571#endif
547 nf_conntrack_get(&conntrack->master->ct_general); 572 nf_conntrack_get(&ct->master->ct_general);
548 NF_CT_STAT_INC(expect_new); 573 NF_CT_STAT_INC(expect_new);
549 } else { 574 } else {
550 struct nf_conntrack_helper *helper; 575 struct nf_conntrack_helper *helper;
551 576
552 helper = __nf_ct_helper_find(&repl_tuple); 577 helper = __nf_ct_helper_find(&repl_tuple);
553 if (helper) { 578 if (helper) {
554 help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC); 579 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
555 if (help) 580 if (help)
556 rcu_assign_pointer(help->helper, helper); 581 rcu_assign_pointer(help->helper, helper);
557 } 582 }
@@ -559,18 +584,17 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
559 } 584 }
560 585
561 /* Overload tuple linked list to put us in unconfirmed list. */ 586 /* Overload tuple linked list to put us in unconfirmed list. */
562 hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 587 hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, &unconfirmed);
563 &unconfirmed);
564 588
565 write_unlock_bh(&nf_conntrack_lock); 589 spin_unlock_bh(&nf_conntrack_lock);
566 590
567 if (exp) { 591 if (exp) {
568 if (exp->expectfn) 592 if (exp->expectfn)
569 exp->expectfn(conntrack, exp); 593 exp->expectfn(ct, exp);
570 nf_ct_expect_put(exp); 594 nf_ct_expect_put(exp);
571 } 595 }
572 596
573 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL]; 597 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
574} 598}
575 599
576/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 600/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
@@ -729,7 +753,6 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
729 struct nf_conn_help *help = nfct_help(ct); 753 struct nf_conn_help *help = nfct_help(ct);
730 struct nf_conntrack_helper *helper; 754 struct nf_conntrack_helper *helper;
731 755
732 write_lock_bh(&nf_conntrack_lock);
733 /* Should be unconfirmed, so not in hash table yet */ 756 /* Should be unconfirmed, so not in hash table yet */
734 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 757 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
735 758
@@ -738,8 +761,9 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
738 761
739 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 762 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
740 if (ct->master || (help && help->expecting != 0)) 763 if (ct->master || (help && help->expecting != 0))
741 goto out; 764 return;
742 765
766 rcu_read_lock();
743 helper = __nf_ct_helper_find(newreply); 767 helper = __nf_ct_helper_find(newreply);
744 if (helper == NULL) { 768 if (helper == NULL) {
745 if (help) 769 if (help)
@@ -757,7 +781,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
757 781
758 rcu_assign_pointer(help->helper, helper); 782 rcu_assign_pointer(help->helper, helper);
759out: 783out:
760 write_unlock_bh(&nf_conntrack_lock); 784 rcu_read_unlock();
761} 785}
762EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 786EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
763 787
@@ -773,13 +797,11 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
773 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 797 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
774 NF_CT_ASSERT(skb); 798 NF_CT_ASSERT(skb);
775 799
776 write_lock_bh(&nf_conntrack_lock); 800 spin_lock_bh(&nf_conntrack_lock);
777 801
778 /* Only update if this is not a fixed timeout */ 802 /* Only update if this is not a fixed timeout */
779 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) { 803 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
780 write_unlock_bh(&nf_conntrack_lock); 804 goto acct;
781 return;
782 }
783 805
784 /* If not in hash table, timer will not be active yet */ 806 /* If not in hash table, timer will not be active yet */
785 if (!nf_ct_is_confirmed(ct)) { 807 if (!nf_ct_is_confirmed(ct)) {
@@ -799,6 +821,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
799 } 821 }
800 } 822 }
801 823
824acct:
802#ifdef CONFIG_NF_CT_ACCT 825#ifdef CONFIG_NF_CT_ACCT
803 if (do_acct) { 826 if (do_acct) {
804 ct->counters[CTINFO2DIR(ctinfo)].packets++; 827 ct->counters[CTINFO2DIR(ctinfo)].packets++;
@@ -811,7 +834,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
811 } 834 }
812#endif 835#endif
813 836
814 write_unlock_bh(&nf_conntrack_lock); 837 spin_unlock_bh(&nf_conntrack_lock);
815 838
816 /* must be unlocked when calling event cache */ 839 /* must be unlocked when calling event cache */
817 if (event) 840 if (event)
@@ -879,14 +902,6 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
879 nf_conntrack_get(nskb->nfct); 902 nf_conntrack_get(nskb->nfct);
880} 903}
881 904
882static inline int
883do_iter(const struct nf_conntrack_tuple_hash *i,
884 int (*iter)(struct nf_conn *i, void *data),
885 void *data)
886{
887 return iter(nf_ct_tuplehash_to_ctrack(i), data);
888}
889
890/* Bring out ya dead! */ 905/* Bring out ya dead! */
891static struct nf_conn * 906static struct nf_conn *
892get_next_corpse(int (*iter)(struct nf_conn *i, void *data), 907get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
@@ -896,7 +911,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
896 struct nf_conn *ct; 911 struct nf_conn *ct;
897 struct hlist_node *n; 912 struct hlist_node *n;
898 913
899 write_lock_bh(&nf_conntrack_lock); 914 spin_lock_bh(&nf_conntrack_lock);
900 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 915 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
901 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) { 916 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {
902 ct = nf_ct_tuplehash_to_ctrack(h); 917 ct = nf_ct_tuplehash_to_ctrack(h);
@@ -909,11 +924,11 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
909 if (iter(ct, data)) 924 if (iter(ct, data))
910 set_bit(IPS_DYING_BIT, &ct->status); 925 set_bit(IPS_DYING_BIT, &ct->status);
911 } 926 }
912 write_unlock_bh(&nf_conntrack_lock); 927 spin_unlock_bh(&nf_conntrack_lock);
913 return NULL; 928 return NULL;
914found: 929found:
915 atomic_inc(&ct->ct_general.use); 930 atomic_inc(&ct->ct_general.use);
916 write_unlock_bh(&nf_conntrack_lock); 931 spin_unlock_bh(&nf_conntrack_lock);
917 return ct; 932 return ct;
918} 933}
919 934
@@ -939,7 +954,7 @@ static int kill_all(struct nf_conn *i, void *data)
939 return 1; 954 return 1;
940} 955}
941 956
942void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, int size) 957void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size)
943{ 958{
944 if (vmalloced) 959 if (vmalloced)
945 vfree(hash); 960 vfree(hash);
@@ -988,7 +1003,7 @@ void nf_conntrack_cleanup(void)
988 nf_conntrack_expect_fini(); 1003 nf_conntrack_expect_fini();
989} 1004}
990 1005
991struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced) 1006struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
992{ 1007{
993 struct hlist_head *hash; 1008 struct hlist_head *hash;
994 unsigned int size, i; 1009 unsigned int size, i;
@@ -1015,8 +1030,8 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1015 1030
1016int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) 1031int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1017{ 1032{
1018 int i, bucket, hashsize, vmalloced; 1033 int i, bucket, vmalloced, old_vmalloced;
1019 int old_vmalloced, old_size; 1034 unsigned int hashsize, old_size;
1020 int rnd; 1035 int rnd;
1021 struct hlist_head *hash, *old_hash; 1036 struct hlist_head *hash, *old_hash;
1022 struct nf_conntrack_tuple_hash *h; 1037 struct nf_conntrack_tuple_hash *h;
@@ -1025,7 +1040,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1025 if (!nf_conntrack_htable_size) 1040 if (!nf_conntrack_htable_size)
1026 return param_set_uint(val, kp); 1041 return param_set_uint(val, kp);
1027 1042
1028 hashsize = simple_strtol(val, NULL, 0); 1043 hashsize = simple_strtoul(val, NULL, 0);
1029 if (!hashsize) 1044 if (!hashsize)
1030 return -EINVAL; 1045 return -EINVAL;
1031 1046
@@ -1037,12 +1052,17 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1037 * use a newrandom seed */ 1052 * use a newrandom seed */
1038 get_random_bytes(&rnd, 4); 1053 get_random_bytes(&rnd, 4);
1039 1054
1040 write_lock_bh(&nf_conntrack_lock); 1055 /* Lookups in the old hash might happen in parallel, which means we
1056 * might get false negatives during connection lookup. New connections
1057 * created because of a false negative won't make it into the hash
1058 * though since that required taking the lock.
1059 */
1060 spin_lock_bh(&nf_conntrack_lock);
1041 for (i = 0; i < nf_conntrack_htable_size; i++) { 1061 for (i = 0; i < nf_conntrack_htable_size; i++) {
1042 while (!hlist_empty(&nf_conntrack_hash[i])) { 1062 while (!hlist_empty(&nf_conntrack_hash[i])) {
1043 h = hlist_entry(nf_conntrack_hash[i].first, 1063 h = hlist_entry(nf_conntrack_hash[i].first,
1044 struct nf_conntrack_tuple_hash, hnode); 1064 struct nf_conntrack_tuple_hash, hnode);
1045 hlist_del(&h->hnode); 1065 hlist_del_rcu(&h->hnode);
1046 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1066 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1047 hlist_add_head(&h->hnode, &hash[bucket]); 1067 hlist_add_head(&h->hnode, &hash[bucket]);
1048 } 1068 }
@@ -1055,7 +1075,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1055 nf_conntrack_vmalloc = vmalloced; 1075 nf_conntrack_vmalloc = vmalloced;
1056 nf_conntrack_hash = hash; 1076 nf_conntrack_hash = hash;
1057 nf_conntrack_hash_rnd = rnd; 1077 nf_conntrack_hash_rnd = rnd;
1058 write_unlock_bh(&nf_conntrack_lock); 1078 spin_unlock_bh(&nf_conntrack_lock);
1059 1079
1060 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1080 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1061 return 0; 1081 return 0;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index e0cd9d00aa61..e06bf0028bb1 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -50,7 +50,7 @@ void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
50 NF_CT_ASSERT(master_help); 50 NF_CT_ASSERT(master_help);
51 NF_CT_ASSERT(!timer_pending(&exp->timeout)); 51 NF_CT_ASSERT(!timer_pending(&exp->timeout));
52 52
53 hlist_del(&exp->hnode); 53 hlist_del_rcu(&exp->hnode);
54 nf_ct_expect_count--; 54 nf_ct_expect_count--;
55 55
56 hlist_del(&exp->lnode); 56 hlist_del(&exp->lnode);
@@ -65,9 +65,9 @@ static void nf_ct_expectation_timed_out(unsigned long ul_expect)
65{ 65{
66 struct nf_conntrack_expect *exp = (void *)ul_expect; 66 struct nf_conntrack_expect *exp = (void *)ul_expect;
67 67
68 write_lock_bh(&nf_conntrack_lock); 68 spin_lock_bh(&nf_conntrack_lock);
69 nf_ct_unlink_expect(exp); 69 nf_ct_unlink_expect(exp);
70 write_unlock_bh(&nf_conntrack_lock); 70 spin_unlock_bh(&nf_conntrack_lock);
71 nf_ct_expect_put(exp); 71 nf_ct_expect_put(exp);
72} 72}
73 73
@@ -97,7 +97,7 @@ __nf_ct_expect_find(const struct nf_conntrack_tuple *tuple)
97 return NULL; 97 return NULL;
98 98
99 h = nf_ct_expect_dst_hash(tuple); 99 h = nf_ct_expect_dst_hash(tuple);
100 hlist_for_each_entry(i, n, &nf_ct_expect_hash[h], hnode) { 100 hlist_for_each_entry_rcu(i, n, &nf_ct_expect_hash[h], hnode) {
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) 101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
102 return i; 102 return i;
103 } 103 }
@@ -111,11 +111,11 @@ nf_ct_expect_find_get(const struct nf_conntrack_tuple *tuple)
111{ 111{
112 struct nf_conntrack_expect *i; 112 struct nf_conntrack_expect *i;
113 113
114 read_lock_bh(&nf_conntrack_lock); 114 rcu_read_lock();
115 i = __nf_ct_expect_find(tuple); 115 i = __nf_ct_expect_find(tuple);
116 if (i) 116 if (i && !atomic_inc_not_zero(&i->use))
117 atomic_inc(&i->use); 117 i = NULL;
118 read_unlock_bh(&nf_conntrack_lock); 118 rcu_read_unlock();
119 119
120 return i; 120 return i;
121} 121}
@@ -201,12 +201,12 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
201/* Generally a bad idea to call this: could have matched already. */ 201/* Generally a bad idea to call this: could have matched already. */
202void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) 202void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
203{ 203{
204 write_lock_bh(&nf_conntrack_lock); 204 spin_lock_bh(&nf_conntrack_lock);
205 if (del_timer(&exp->timeout)) { 205 if (del_timer(&exp->timeout)) {
206 nf_ct_unlink_expect(exp); 206 nf_ct_unlink_expect(exp);
207 nf_ct_expect_put(exp); 207 nf_ct_expect_put(exp);
208 } 208 }
209 write_unlock_bh(&nf_conntrack_lock); 209 spin_unlock_bh(&nf_conntrack_lock);
210} 210}
211EXPORT_SYMBOL_GPL(nf_ct_unexpect_related); 211EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
212 212
@@ -223,6 +223,7 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
223 223
224 new->master = me; 224 new->master = me;
225 atomic_set(&new->use, 1); 225 atomic_set(&new->use, 1);
226 INIT_RCU_HEAD(&new->rcu);
226 return new; 227 return new;
227} 228}
228EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); 229EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
@@ -278,10 +279,18 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
278} 279}
279EXPORT_SYMBOL_GPL(nf_ct_expect_init); 280EXPORT_SYMBOL_GPL(nf_ct_expect_init);
280 281
282static void nf_ct_expect_free_rcu(struct rcu_head *head)
283{
284 struct nf_conntrack_expect *exp;
285
286 exp = container_of(head, struct nf_conntrack_expect, rcu);
287 kmem_cache_free(nf_ct_expect_cachep, exp);
288}
289
281void nf_ct_expect_put(struct nf_conntrack_expect *exp) 290void nf_ct_expect_put(struct nf_conntrack_expect *exp)
282{ 291{
283 if (atomic_dec_and_test(&exp->use)) 292 if (atomic_dec_and_test(&exp->use))
284 kmem_cache_free(nf_ct_expect_cachep, exp); 293 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
285} 294}
286EXPORT_SYMBOL_GPL(nf_ct_expect_put); 295EXPORT_SYMBOL_GPL(nf_ct_expect_put);
287 296
@@ -295,7 +304,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
295 hlist_add_head(&exp->lnode, &master_help->expectations); 304 hlist_add_head(&exp->lnode, &master_help->expectations);
296 master_help->expecting++; 305 master_help->expecting++;
297 306
298 hlist_add_head(&exp->hnode, &nf_ct_expect_hash[h]); 307 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
299 nf_ct_expect_count++; 308 nf_ct_expect_count++;
300 309
301 setup_timer(&exp->timeout, nf_ct_expectation_timed_out, 310 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
@@ -346,7 +355,7 @@ int nf_ct_expect_related(struct nf_conntrack_expect *expect)
346 355
347 NF_CT_ASSERT(master_help); 356 NF_CT_ASSERT(master_help);
348 357
349 write_lock_bh(&nf_conntrack_lock); 358 spin_lock_bh(&nf_conntrack_lock);
350 if (!master_help->helper) { 359 if (!master_help->helper) {
351 ret = -ESHUTDOWN; 360 ret = -ESHUTDOWN;
352 goto out; 361 goto out;
@@ -381,7 +390,7 @@ int nf_ct_expect_related(struct nf_conntrack_expect *expect)
381 nf_ct_expect_event(IPEXP_NEW, expect); 390 nf_ct_expect_event(IPEXP_NEW, expect);
382 ret = 0; 391 ret = 0;
383out: 392out:
384 write_unlock_bh(&nf_conntrack_lock); 393 spin_unlock_bh(&nf_conntrack_lock);
385 return ret; 394 return ret;
386} 395}
387EXPORT_SYMBOL_GPL(nf_ct_expect_related); 396EXPORT_SYMBOL_GPL(nf_ct_expect_related);
@@ -394,10 +403,12 @@ struct ct_expect_iter_state {
394static struct hlist_node *ct_expect_get_first(struct seq_file *seq) 403static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
395{ 404{
396 struct ct_expect_iter_state *st = seq->private; 405 struct ct_expect_iter_state *st = seq->private;
406 struct hlist_node *n;
397 407
398 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { 408 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
399 if (!hlist_empty(&nf_ct_expect_hash[st->bucket])) 409 n = rcu_dereference(nf_ct_expect_hash[st->bucket].first);
400 return nf_ct_expect_hash[st->bucket].first; 410 if (n)
411 return n;
401 } 412 }
402 return NULL; 413 return NULL;
403} 414}
@@ -407,11 +418,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
407{ 418{
408 struct ct_expect_iter_state *st = seq->private; 419 struct ct_expect_iter_state *st = seq->private;
409 420
410 head = head->next; 421 head = rcu_dereference(head->next);
411 while (head == NULL) { 422 while (head == NULL) {
412 if (++st->bucket >= nf_ct_expect_hsize) 423 if (++st->bucket >= nf_ct_expect_hsize)
413 return NULL; 424 return NULL;
414 head = nf_ct_expect_hash[st->bucket].first; 425 head = rcu_dereference(nf_ct_expect_hash[st->bucket].first);
415 } 426 }
416 return head; 427 return head;
417} 428}
@@ -427,8 +438,9 @@ static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
427} 438}
428 439
429static void *exp_seq_start(struct seq_file *seq, loff_t *pos) 440static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
441 __acquires(RCU)
430{ 442{
431 read_lock_bh(&nf_conntrack_lock); 443 rcu_read_lock();
432 return ct_expect_get_idx(seq, *pos); 444 return ct_expect_get_idx(seq, *pos);
433} 445}
434 446
@@ -439,8 +451,9 @@ static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
439} 451}
440 452
441static void exp_seq_stop(struct seq_file *seq, void *v) 453static void exp_seq_stop(struct seq_file *seq, void *v)
454 __releases(RCU)
442{ 455{
443 read_unlock_bh(&nf_conntrack_lock); 456 rcu_read_unlock();
444} 457}
445 458
446static int exp_seq_show(struct seq_file *s, void *v) 459static int exp_seq_show(struct seq_file *s, void *v)
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index ff66fba514fd..867882313e49 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -87,7 +87,7 @@ typedef struct field_t {
87 unsigned char ub; 87 unsigned char ub;
88 unsigned short attr; 88 unsigned short attr;
89 unsigned short offset; 89 unsigned short offset;
90 struct field_t *fields; 90 const struct field_t *fields;
91} field_t; 91} field_t;
92 92
93/* Bit Stream */ 93/* Bit Stream */
@@ -96,7 +96,7 @@ typedef struct {
96 unsigned char *beg; 96 unsigned char *beg;
97 unsigned char *end; 97 unsigned char *end;
98 unsigned char *cur; 98 unsigned char *cur;
99 unsigned bit; 99 unsigned int bit;
100} bitstr_t; 100} bitstr_t;
101 101
102/* Tool Functions */ 102/* Tool Functions */
@@ -104,29 +104,29 @@ typedef struct {
104#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;} 104#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
105#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;} 105#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
106#define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND) 106#define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND)
107static unsigned get_len(bitstr_t * bs); 107static unsigned int get_len(bitstr_t *bs);
108static unsigned get_bit(bitstr_t * bs); 108static unsigned int get_bit(bitstr_t *bs);
109static unsigned get_bits(bitstr_t * bs, unsigned b); 109static unsigned int get_bits(bitstr_t *bs, unsigned int b);
110static unsigned get_bitmap(bitstr_t * bs, unsigned b); 110static unsigned int get_bitmap(bitstr_t *bs, unsigned int b);
111static unsigned get_uint(bitstr_t * bs, int b); 111static unsigned int get_uint(bitstr_t *bs, int b);
112 112
113/* Decoder Functions */ 113/* Decoder Functions */
114static int decode_nul(bitstr_t * bs, field_t * f, char *base, int level); 114static int decode_nul(bitstr_t *bs, const struct field_t *f, char *base, int level);
115static int decode_bool(bitstr_t * bs, field_t * f, char *base, int level); 115static int decode_bool(bitstr_t *bs, const struct field_t *f, char *base, int level);
116static int decode_oid(bitstr_t * bs, field_t * f, char *base, int level); 116static int decode_oid(bitstr_t *bs, const struct field_t *f, char *base, int level);
117static int decode_int(bitstr_t * bs, field_t * f, char *base, int level); 117static int decode_int(bitstr_t *bs, const struct field_t *f, char *base, int level);
118static int decode_enum(bitstr_t * bs, field_t * f, char *base, int level); 118static int decode_enum(bitstr_t *bs, const struct field_t *f, char *base, int level);
119static int decode_bitstr(bitstr_t * bs, field_t * f, char *base, int level); 119static int decode_bitstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
120static int decode_numstr(bitstr_t * bs, field_t * f, char *base, int level); 120static int decode_numstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
121static int decode_octstr(bitstr_t * bs, field_t * f, char *base, int level); 121static int decode_octstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
122static int decode_bmpstr(bitstr_t * bs, field_t * f, char *base, int level); 122static int decode_bmpstr(bitstr_t *bs, const struct field_t *f, char *base, int level);
123static int decode_seq(bitstr_t * bs, field_t * f, char *base, int level); 123static int decode_seq(bitstr_t *bs, const struct field_t *f, char *base, int level);
124static int decode_seqof(bitstr_t * bs, field_t * f, char *base, int level); 124static int decode_seqof(bitstr_t *bs, const struct field_t *f, char *base, int level);
125static int decode_choice(bitstr_t * bs, field_t * f, char *base, int level); 125static int decode_choice(bitstr_t *bs, const struct field_t *f, char *base, int level);
126 126
127/* Decoder Functions Vector */ 127/* Decoder Functions Vector */
128typedef int (*decoder_t) (bitstr_t *, field_t *, char *, int); 128typedef int (*decoder_t)(bitstr_t *, const struct field_t *, char *, int);
129static decoder_t Decoders[] = { 129static const decoder_t Decoders[] = {
130 decode_nul, 130 decode_nul,
131 decode_bool, 131 decode_bool,
132 decode_oid, 132 decode_oid,
@@ -150,9 +150,9 @@ static decoder_t Decoders[] = {
150 * Functions 150 * Functions
151 ****************************************************************************/ 151 ****************************************************************************/
152/* Assume bs is aligned && v < 16384 */ 152/* Assume bs is aligned && v < 16384 */
153unsigned get_len(bitstr_t * bs) 153static unsigned int get_len(bitstr_t *bs)
154{ 154{
155 unsigned v; 155 unsigned int v;
156 156
157 v = *bs->cur++; 157 v = *bs->cur++;
158 158
@@ -166,9 +166,9 @@ unsigned get_len(bitstr_t * bs)
166} 166}
167 167
168/****************************************************************************/ 168/****************************************************************************/
169unsigned get_bit(bitstr_t * bs) 169static unsigned int get_bit(bitstr_t *bs)
170{ 170{
171 unsigned b = (*bs->cur) & (0x80 >> bs->bit); 171 unsigned int b = (*bs->cur) & (0x80 >> bs->bit);
172 172
173 INC_BIT(bs); 173 INC_BIT(bs);
174 174
@@ -177,9 +177,9 @@ unsigned get_bit(bitstr_t * bs)
177 177
178/****************************************************************************/ 178/****************************************************************************/
179/* Assume b <= 8 */ 179/* Assume b <= 8 */
180unsigned get_bits(bitstr_t * bs, unsigned b) 180static unsigned int get_bits(bitstr_t *bs, unsigned int b)
181{ 181{
182 unsigned v, l; 182 unsigned int v, l;
183 183
184 v = (*bs->cur) & (0xffU >> bs->bit); 184 v = (*bs->cur) & (0xffU >> bs->bit);
185 l = b + bs->bit; 185 l = b + bs->bit;
@@ -203,9 +203,9 @@ unsigned get_bits(bitstr_t * bs, unsigned b)
203 203
204/****************************************************************************/ 204/****************************************************************************/
205/* Assume b <= 32 */ 205/* Assume b <= 32 */
206unsigned get_bitmap(bitstr_t * bs, unsigned b) 206static unsigned int get_bitmap(bitstr_t *bs, unsigned int b)
207{ 207{
208 unsigned v, l, shift, bytes; 208 unsigned int v, l, shift, bytes;
209 209
210 if (!b) 210 if (!b)
211 return 0; 211 return 0;
@@ -213,18 +213,18 @@ unsigned get_bitmap(bitstr_t * bs, unsigned b)
213 l = bs->bit + b; 213 l = bs->bit + b;
214 214
215 if (l < 8) { 215 if (l < 8) {
216 v = (unsigned) (*bs->cur) << (bs->bit + 24); 216 v = (unsigned int)(*bs->cur) << (bs->bit + 24);
217 bs->bit = l; 217 bs->bit = l;
218 } else if (l == 8) { 218 } else if (l == 8) {
219 v = (unsigned) (*bs->cur++) << (bs->bit + 24); 219 v = (unsigned int)(*bs->cur++) << (bs->bit + 24);
220 bs->bit = 0; 220 bs->bit = 0;
221 } else { 221 } else {
222 for (bytes = l >> 3, shift = 24, v = 0; bytes; 222 for (bytes = l >> 3, shift = 24, v = 0; bytes;
223 bytes--, shift -= 8) 223 bytes--, shift -= 8)
224 v |= (unsigned) (*bs->cur++) << shift; 224 v |= (unsigned int)(*bs->cur++) << shift;
225 225
226 if (l < 32) { 226 if (l < 32) {
227 v |= (unsigned) (*bs->cur) << shift; 227 v |= (unsigned int)(*bs->cur) << shift;
228 v <<= bs->bit; 228 v <<= bs->bit;
229 } else if (l > 32) { 229 } else if (l > 32) {
230 v <<= bs->bit; 230 v <<= bs->bit;
@@ -242,9 +242,9 @@ unsigned get_bitmap(bitstr_t * bs, unsigned b)
242/**************************************************************************** 242/****************************************************************************
243 * Assume bs is aligned and sizeof(unsigned int) == 4 243 * Assume bs is aligned and sizeof(unsigned int) == 4
244 ****************************************************************************/ 244 ****************************************************************************/
245unsigned get_uint(bitstr_t * bs, int b) 245static unsigned int get_uint(bitstr_t *bs, int b)
246{ 246{
247 unsigned v = 0; 247 unsigned int v = 0;
248 248
249 switch (b) { 249 switch (b) {
250 case 4: 250 case 4:
@@ -264,7 +264,8 @@ unsigned get_uint(bitstr_t * bs, int b)
264} 264}
265 265
266/****************************************************************************/ 266/****************************************************************************/
267int decode_nul(bitstr_t * bs, field_t * f, char *base, int level) 267static int decode_nul(bitstr_t *bs, const struct field_t *f,
268 char *base, int level)
268{ 269{
269 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 270 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
270 271
@@ -272,7 +273,8 @@ int decode_nul(bitstr_t * bs, field_t * f, char *base, int level)
272} 273}
273 274
274/****************************************************************************/ 275/****************************************************************************/
275int decode_bool(bitstr_t * bs, field_t * f, char *base, int level) 276static int decode_bool(bitstr_t *bs, const struct field_t *f,
277 char *base, int level)
276{ 278{
277 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 279 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
278 280
@@ -283,7 +285,8 @@ int decode_bool(bitstr_t * bs, field_t * f, char *base, int level)
283} 285}
284 286
285/****************************************************************************/ 287/****************************************************************************/
286int decode_oid(bitstr_t * bs, field_t * f, char *base, int level) 288static int decode_oid(bitstr_t *bs, const struct field_t *f,
289 char *base, int level)
287{ 290{
288 int len; 291 int len;
289 292
@@ -299,9 +302,10 @@ int decode_oid(bitstr_t * bs, field_t * f, char *base, int level)
299} 302}
300 303
301/****************************************************************************/ 304/****************************************************************************/
302int decode_int(bitstr_t * bs, field_t * f, char *base, int level) 305static int decode_int(bitstr_t *bs, const struct field_t *f,
306 char *base, int level)
303{ 307{
304 unsigned len; 308 unsigned int len;
305 309
306 PRINT("%*.s%s", level * TAB_SIZE, " ", f->name); 310 PRINT("%*.s%s", level * TAB_SIZE, " ", f->name);
307 311
@@ -318,9 +322,9 @@ int decode_int(bitstr_t * bs, field_t * f, char *base, int level)
318 len = get_bits(bs, 2) + 1; 322 len = get_bits(bs, 2) + 1;
319 BYTE_ALIGN(bs); 323 BYTE_ALIGN(bs);
320 if (base && (f->attr & DECODE)) { /* timeToLive */ 324 if (base && (f->attr & DECODE)) { /* timeToLive */
321 unsigned v = get_uint(bs, len) + f->lb; 325 unsigned int v = get_uint(bs, len) + f->lb;
322 PRINT(" = %u", v); 326 PRINT(" = %u", v);
323 *((unsigned *) (base + f->offset)) = v; 327 *((unsigned int *)(base + f->offset)) = v;
324 } 328 }
325 bs->cur += len; 329 bs->cur += len;
326 break; 330 break;
@@ -342,7 +346,8 @@ int decode_int(bitstr_t * bs, field_t * f, char *base, int level)
342} 346}
343 347
344/****************************************************************************/ 348/****************************************************************************/
345int decode_enum(bitstr_t * bs, field_t * f, char *base, int level) 349static int decode_enum(bitstr_t *bs, const struct field_t *f,
350 char *base, int level)
346{ 351{
347 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 352 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
348 353
@@ -357,9 +362,10 @@ int decode_enum(bitstr_t * bs, field_t * f, char *base, int level)
357} 362}
358 363
359/****************************************************************************/ 364/****************************************************************************/
360int decode_bitstr(bitstr_t * bs, field_t * f, char *base, int level) 365static int decode_bitstr(bitstr_t *bs, const struct field_t *f,
366 char *base, int level)
361{ 367{
362 unsigned len; 368 unsigned int len;
363 369
364 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 370 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
365 371
@@ -390,9 +396,10 @@ int decode_bitstr(bitstr_t * bs, field_t * f, char *base, int level)
390} 396}
391 397
392/****************************************************************************/ 398/****************************************************************************/
393int decode_numstr(bitstr_t * bs, field_t * f, char *base, int level) 399static int decode_numstr(bitstr_t *bs, const struct field_t *f,
400 char *base, int level)
394{ 401{
395 unsigned len; 402 unsigned int len;
396 403
397 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 404 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
398 405
@@ -407,9 +414,10 @@ int decode_numstr(bitstr_t * bs, field_t * f, char *base, int level)
407} 414}
408 415
409/****************************************************************************/ 416/****************************************************************************/
410int decode_octstr(bitstr_t * bs, field_t * f, char *base, int level) 417static int decode_octstr(bitstr_t *bs, const struct field_t *f,
418 char *base, int level)
411{ 419{
412 unsigned len; 420 unsigned int len;
413 421
414 PRINT("%*.s%s", level * TAB_SIZE, " ", f->name); 422 PRINT("%*.s%s", level * TAB_SIZE, " ", f->name);
415 423
@@ -424,7 +432,7 @@ int decode_octstr(bitstr_t * bs, field_t * f, char *base, int level)
424 bs->cur[0], bs->cur[1], 432 bs->cur[0], bs->cur[1],
425 bs->cur[2], bs->cur[3], 433 bs->cur[2], bs->cur[3],
426 bs->cur[4] * 256 + bs->cur[5])); 434 bs->cur[4] * 256 + bs->cur[5]));
427 *((unsigned *) (base + f->offset)) = 435 *((unsigned int *)(base + f->offset)) =
428 bs->cur - bs->buf; 436 bs->cur - bs->buf;
429 } 437 }
430 } 438 }
@@ -455,9 +463,10 @@ int decode_octstr(bitstr_t * bs, field_t * f, char *base, int level)
455} 463}
456 464
457/****************************************************************************/ 465/****************************************************************************/
458int decode_bmpstr(bitstr_t * bs, field_t * f, char *base, int level) 466static int decode_bmpstr(bitstr_t *bs, const struct field_t *f,
467 char *base, int level)
459{ 468{
460 unsigned len; 469 unsigned int len;
461 470
462 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 471 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
463 472
@@ -480,11 +489,12 @@ int decode_bmpstr(bitstr_t * bs, field_t * f, char *base, int level)
480} 489}
481 490
482/****************************************************************************/ 491/****************************************************************************/
483int decode_seq(bitstr_t * bs, field_t * f, char *base, int level) 492static int decode_seq(bitstr_t *bs, const struct field_t *f,
493 char *base, int level)
484{ 494{
485 unsigned ext, bmp, i, opt, len = 0, bmp2, bmp2_len; 495 unsigned int ext, bmp, i, opt, len = 0, bmp2, bmp2_len;
486 int err; 496 int err;
487 field_t *son; 497 const struct field_t *son;
488 unsigned char *beg = NULL; 498 unsigned char *beg = NULL;
489 499
490 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 500 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
@@ -498,7 +508,7 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
498 /* Get fields bitmap */ 508 /* Get fields bitmap */
499 bmp = get_bitmap(bs, f->sz); 509 bmp = get_bitmap(bs, f->sz);
500 if (base) 510 if (base)
501 *(unsigned *) base = bmp; 511 *(unsigned int *)base = bmp;
502 512
503 /* Decode the root components */ 513 /* Decode the root components */
504 for (i = opt = 0, son = f->fields; i < f->lb; i++, son++) { 514 for (i = opt = 0, son = f->fields; i < f->lb; i++, son++) {
@@ -550,7 +560,7 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
550 bmp2 = get_bitmap(bs, bmp2_len); 560 bmp2 = get_bitmap(bs, bmp2_len);
551 bmp |= bmp2 >> f->sz; 561 bmp |= bmp2 >> f->sz;
552 if (base) 562 if (base)
553 *(unsigned *) base = bmp; 563 *(unsigned int *)base = bmp;
554 BYTE_ALIGN(bs); 564 BYTE_ALIGN(bs);
555 565
556 /* Decode the extension components */ 566 /* Decode the extension components */
@@ -596,11 +606,12 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
596} 606}
597 607
598/****************************************************************************/ 608/****************************************************************************/
599int decode_seqof(bitstr_t * bs, field_t * f, char *base, int level) 609static int decode_seqof(bitstr_t *bs, const struct field_t *f,
610 char *base, int level)
600{ 611{
601 unsigned count, effective_count = 0, i, len = 0; 612 unsigned int count, effective_count = 0, i, len = 0;
602 int err; 613 int err;
603 field_t *son; 614 const struct field_t *son;
604 unsigned char *beg = NULL; 615 unsigned char *beg = NULL;
605 616
606 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 617 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
@@ -636,8 +647,8 @@ int decode_seqof(bitstr_t * bs, field_t * f, char *base, int level)
636 /* Write Count */ 647 /* Write Count */
637 if (base) { 648 if (base) {
638 effective_count = count > f->ub ? f->ub : count; 649 effective_count = count > f->ub ? f->ub : count;
639 *(unsigned *) base = effective_count; 650 *(unsigned int *)base = effective_count;
640 base += sizeof(unsigned); 651 base += sizeof(unsigned int);
641 } 652 }
642 653
643 /* Decode nested field */ 654 /* Decode nested field */
@@ -685,11 +696,12 @@ int decode_seqof(bitstr_t * bs, field_t * f, char *base, int level)
685 696
686 697
687/****************************************************************************/ 698/****************************************************************************/
688int decode_choice(bitstr_t * bs, field_t * f, char *base, int level) 699static int decode_choice(bitstr_t *bs, const struct field_t *f,
700 char *base, int level)
689{ 701{
690 unsigned type, ext, len = 0; 702 unsigned int type, ext, len = 0;
691 int err; 703 int err;
692 field_t *son; 704 const struct field_t *son;
693 unsigned char *beg = NULL; 705 unsigned char *beg = NULL;
694 706
695 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 707 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
@@ -710,7 +722,7 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level)
710 722
711 /* Write Type */ 723 /* Write Type */
712 if (base) 724 if (base)
713 *(unsigned *) base = type; 725 *(unsigned int *)base = type;
714 726
715 /* Check Range */ 727 /* Check Range */
716 if (type >= f->ub) { /* Newer version? */ 728 if (type >= f->ub) { /* Newer version? */
@@ -754,9 +766,9 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level)
754} 766}
755 767
756/****************************************************************************/ 768/****************************************************************************/
757int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage * ras) 769int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
758{ 770{
759 static field_t ras_message = { 771 static const struct field_t ras_message = {
760 FNAME("RasMessage") CHOICE, 5, 24, 32, DECODE | EXT, 772 FNAME("RasMessage") CHOICE, 5, 24, 32, DECODE | EXT,
761 0, _RasMessage 773 0, _RasMessage
762 }; 774 };
@@ -771,9 +783,9 @@ int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage * ras)
771 783
772/****************************************************************************/ 784/****************************************************************************/
773static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg, 785static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
774 size_t sz, H323_UserInformation * uuie) 786 size_t sz, H323_UserInformation *uuie)
775{ 787{
776 static field_t h323_userinformation = { 788 static const struct field_t h323_userinformation = {
777 FNAME("H323-UserInformation") SEQ, 1, 2, 2, DECODE | EXT, 789 FNAME("H323-UserInformation") SEQ, 1, 2, 2, DECODE | EXT,
778 0, _H323_UserInformation 790 0, _H323_UserInformation
779 }; 791 };
@@ -792,7 +804,7 @@ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
792 MultimediaSystemControlMessage * 804 MultimediaSystemControlMessage *
793 mscm) 805 mscm)
794{ 806{
795 static field_t multimediasystemcontrolmessage = { 807 static const struct field_t multimediasystemcontrolmessage = {
796 FNAME("MultimediaSystemControlMessage") CHOICE, 2, 4, 4, 808 FNAME("MultimediaSystemControlMessage") CHOICE, 2, 4, 4,
797 DECODE | EXT, 0, _MultimediaSystemControlMessage 809 DECODE | EXT, 0, _MultimediaSystemControlMessage
798 }; 810 };
@@ -807,7 +819,7 @@ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
807} 819}
808 820
809/****************************************************************************/ 821/****************************************************************************/
810int DecodeQ931(unsigned char *buf, size_t sz, Q931 * q931) 822int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931)
811{ 823{
812 unsigned char *p = buf; 824 unsigned char *p = buf;
813 int len; 825 int len;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 872c1aa3124c..62137879e6aa 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -114,7 +114,8 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
114{ 114{
115 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 115 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
116 int dir = CTINFO2DIR(ctinfo); 116 int dir = CTINFO2DIR(ctinfo);
117 struct tcphdr _tcph, *th; 117 const struct tcphdr *th;
118 struct tcphdr _tcph;
118 int tcpdatalen; 119 int tcpdatalen;
119 int tcpdataoff; 120 int tcpdataoff;
120 unsigned char *tpkt; 121 unsigned char *tpkt;
@@ -212,11 +213,11 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
212} 213}
213 214
214/****************************************************************************/ 215/****************************************************************************/
215static int get_h245_addr(struct nf_conn *ct, unsigned char *data, 216static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
216 H245_TransportAddress *taddr, 217 H245_TransportAddress *taddr,
217 union nf_inet_addr *addr, __be16 *port) 218 union nf_inet_addr *addr, __be16 *port)
218{ 219{
219 unsigned char *p; 220 const unsigned char *p;
220 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; 221 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
221 int len; 222 int len;
222 223
@@ -625,7 +626,7 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
625 TransportAddress *taddr, 626 TransportAddress *taddr,
626 union nf_inet_addr *addr, __be16 *port) 627 union nf_inet_addr *addr, __be16 *port)
627{ 628{
628 unsigned char *p; 629 const unsigned char *p;
629 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; 630 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
630 int len; 631 int len;
631 632
@@ -704,9 +705,8 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
704 705
705/* If the calling party is on the same side of the forward-to party, 706/* If the calling party is on the same side of the forward-to party,
706 * we don't need to track the second call */ 707 * we don't need to track the second call */
707static int callforward_do_filter(union nf_inet_addr *src, 708static int callforward_do_filter(const union nf_inet_addr *src,
708 union nf_inet_addr *dst, 709 const union nf_inet_addr *dst, int family)
709 int family)
710{ 710{
711 const struct nf_afinfo *afinfo; 711 const struct nf_afinfo *afinfo;
712 struct flowi fl1, fl2; 712 struct flowi fl1, fl2;
@@ -1185,7 +1185,8 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
1185static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff, 1185static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
1186 int *datalen) 1186 int *datalen)
1187{ 1187{
1188 struct udphdr _uh, *uh; 1188 const struct udphdr *uh;
1189 struct udphdr _uh;
1189 int dataoff; 1190 int dataoff;
1190 1191
1191 uh = skb_header_pointer(skb, protoff, sizeof(_uh), &_uh); 1192 uh = skb_header_pointer(skb, protoff, sizeof(_uh), &_uh);
@@ -1415,7 +1416,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1415 nf_ct_refresh(ct, skb, info->timeout * HZ); 1416 nf_ct_refresh(ct, skb, info->timeout * HZ);
1416 1417
1417 /* Set expect timeout */ 1418 /* Set expect timeout */
1418 read_lock_bh(&nf_conntrack_lock); 1419 spin_lock_bh(&nf_conntrack_lock);
1419 exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, 1420 exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3,
1420 info->sig_port[!dir]); 1421 info->sig_port[!dir]);
1421 if (exp) { 1422 if (exp) {
@@ -1425,7 +1426,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1425 NF_CT_DUMP_TUPLE(&exp->tuple); 1426 NF_CT_DUMP_TUPLE(&exp->tuple);
1426 set_expect_timeout(exp, info->timeout); 1427 set_expect_timeout(exp, info->timeout);
1427 } 1428 }
1428 read_unlock_bh(&nf_conntrack_lock); 1429 spin_unlock_bh(&nf_conntrack_lock);
1429 } 1430 }
1430 1431
1431 return 0; 1432 return 0;
@@ -1468,7 +1469,7 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
1468 enum ip_conntrack_info ctinfo, 1469 enum ip_conntrack_info ctinfo,
1469 unsigned char **data, AdmissionRequest *arq) 1470 unsigned char **data, AdmissionRequest *arq)
1470{ 1471{
1471 struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; 1472 const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
1472 int dir = CTINFO2DIR(ctinfo); 1473 int dir = CTINFO2DIR(ctinfo);
1473 __be16 port; 1474 __be16 port;
1474 union nf_inet_addr addr; 1475 union nf_inet_addr addr;
diff --git a/net/netfilter/nf_conntrack_h323_types.c b/net/netfilter/nf_conntrack_h323_types.c
index 3a21fdf1a265..d880f3523c1d 100644
--- a/net/netfilter/nf_conntrack_h323_types.c
+++ b/net/netfilter/nf_conntrack_h323_types.c
@@ -5,22 +5,22 @@
5 * This source code is licensed under General Public License version 2. 5 * This source code is licensed under General Public License version 2.
6 */ 6 */
7 7
8static field_t _TransportAddress_ipAddress[] = { /* SEQUENCE */ 8static const struct field_t _TransportAddress_ipAddress[] = { /* SEQUENCE */
9 {FNAME("ip") OCTSTR, FIXD, 4, 0, DECODE, 9 {FNAME("ip") OCTSTR, FIXD, 4, 0, DECODE,
10 offsetof(TransportAddress_ipAddress, ip), NULL}, 10 offsetof(TransportAddress_ipAddress, ip), NULL},
11 {FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL}, 11 {FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL},
12}; 12};
13 13
14static field_t _TransportAddress_ipSourceRoute_route[] = { /* SEQUENCE OF */ 14static const struct field_t _TransportAddress_ipSourceRoute_route[] = { /* SEQUENCE OF */
15 {FNAME("item") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL}, 15 {FNAME("item") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
16}; 16};
17 17
18static field_t _TransportAddress_ipSourceRoute_routing[] = { /* CHOICE */ 18static const struct field_t _TransportAddress_ipSourceRoute_routing[] = { /* CHOICE */
19 {FNAME("strict") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 19 {FNAME("strict") NUL, FIXD, 0, 0, SKIP, 0, NULL},
20 {FNAME("loose") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 20 {FNAME("loose") NUL, FIXD, 0, 0, SKIP, 0, NULL},
21}; 21};
22 22
23static field_t _TransportAddress_ipSourceRoute[] = { /* SEQUENCE */ 23static const struct field_t _TransportAddress_ipSourceRoute[] = { /* SEQUENCE */
24 {FNAME("ip") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL}, 24 {FNAME("ip") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
25 {FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL}, 25 {FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL},
26 {FNAME("route") SEQOF, SEMI, 0, 0, SKIP, 0, 26 {FNAME("route") SEQOF, SEMI, 0, 0, SKIP, 0,
@@ -29,37 +29,37 @@ static field_t _TransportAddress_ipSourceRoute[] = { /* SEQUENCE */
29 _TransportAddress_ipSourceRoute_routing}, 29 _TransportAddress_ipSourceRoute_routing},
30}; 30};
31 31
32static field_t _TransportAddress_ipxAddress[] = { /* SEQUENCE */ 32static const struct field_t _TransportAddress_ipxAddress[] = { /* SEQUENCE */
33 {FNAME("node") OCTSTR, FIXD, 6, 0, SKIP, 0, NULL}, 33 {FNAME("node") OCTSTR, FIXD, 6, 0, SKIP, 0, NULL},
34 {FNAME("netnum") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL}, 34 {FNAME("netnum") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
35 {FNAME("port") OCTSTR, FIXD, 2, 0, SKIP, 0, NULL}, 35 {FNAME("port") OCTSTR, FIXD, 2, 0, SKIP, 0, NULL},
36}; 36};
37 37
38static field_t _TransportAddress_ip6Address[] = { /* SEQUENCE */ 38static const struct field_t _TransportAddress_ip6Address[] = { /* SEQUENCE */
39 {FNAME("ip") OCTSTR, FIXD, 16, 0, DECODE, 39 {FNAME("ip") OCTSTR, FIXD, 16, 0, DECODE,
40 offsetof(TransportAddress_ip6Address, ip), NULL}, 40 offsetof(TransportAddress_ip6Address, ip), NULL},
41 {FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL}, 41 {FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL},
42}; 42};
43 43
44static field_t _H221NonStandard[] = { /* SEQUENCE */ 44static const struct field_t _H221NonStandard[] = { /* SEQUENCE */
45 {FNAME("t35CountryCode") INT, BYTE, 0, 0, SKIP, 0, NULL}, 45 {FNAME("t35CountryCode") INT, BYTE, 0, 0, SKIP, 0, NULL},
46 {FNAME("t35Extension") INT, BYTE, 0, 0, SKIP, 0, NULL}, 46 {FNAME("t35Extension") INT, BYTE, 0, 0, SKIP, 0, NULL},
47 {FNAME("manufacturerCode") INT, WORD, 0, 0, SKIP, 0, NULL}, 47 {FNAME("manufacturerCode") INT, WORD, 0, 0, SKIP, 0, NULL},
48}; 48};
49 49
50static field_t _NonStandardIdentifier[] = { /* CHOICE */ 50static const struct field_t _NonStandardIdentifier[] = { /* CHOICE */
51 {FNAME("object") OID, BYTE, 0, 0, SKIP, 0, NULL}, 51 {FNAME("object") OID, BYTE, 0, 0, SKIP, 0, NULL},
52 {FNAME("h221NonStandard") SEQ, 0, 3, 3, SKIP | EXT, 0, 52 {FNAME("h221NonStandard") SEQ, 0, 3, 3, SKIP | EXT, 0,
53 _H221NonStandard}, 53 _H221NonStandard},
54}; 54};
55 55
56static field_t _NonStandardParameter[] = { /* SEQUENCE */ 56static const struct field_t _NonStandardParameter[] = { /* SEQUENCE */
57 {FNAME("nonStandardIdentifier") CHOICE, 1, 2, 2, SKIP | EXT, 0, 57 {FNAME("nonStandardIdentifier") CHOICE, 1, 2, 2, SKIP | EXT, 0,
58 _NonStandardIdentifier}, 58 _NonStandardIdentifier},
59 {FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 59 {FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
60}; 60};
61 61
62static field_t _TransportAddress[] = { /* CHOICE */ 62static const struct field_t _TransportAddress[] = { /* CHOICE */
63 {FNAME("ipAddress") SEQ, 0, 2, 2, DECODE, 63 {FNAME("ipAddress") SEQ, 0, 2, 2, DECODE,
64 offsetof(TransportAddress, ipAddress), _TransportAddress_ipAddress}, 64 offsetof(TransportAddress, ipAddress), _TransportAddress_ipAddress},
65 {FNAME("ipSourceRoute") SEQ, 0, 4, 4, SKIP | EXT, 0, 65 {FNAME("ipSourceRoute") SEQ, 0, 4, 4, SKIP | EXT, 0,
@@ -75,7 +75,7 @@ static field_t _TransportAddress[] = { /* CHOICE */
75 _NonStandardParameter}, 75 _NonStandardParameter},
76}; 76};
77 77
78static field_t _AliasAddress[] = { /* CHOICE */ 78static const struct field_t _AliasAddress[] = { /* CHOICE */
79 {FNAME("dialedDigits") NUMDGT, 7, 1, 0, SKIP, 0, NULL}, 79 {FNAME("dialedDigits") NUMDGT, 7, 1, 0, SKIP, 0, NULL},
80 {FNAME("h323-ID") BMPSTR, BYTE, 1, 0, SKIP, 0, NULL}, 80 {FNAME("h323-ID") BMPSTR, BYTE, 1, 0, SKIP, 0, NULL},
81 {FNAME("url-ID") IA5STR, WORD, 1, 0, SKIP, 0, NULL}, 81 {FNAME("url-ID") IA5STR, WORD, 1, 0, SKIP, 0, NULL},
@@ -85,78 +85,78 @@ static field_t _AliasAddress[] = { /* CHOICE */
85 {FNAME("mobileUIM") CHOICE, 1, 2, 2, SKIP | EXT, 0, NULL}, 85 {FNAME("mobileUIM") CHOICE, 1, 2, 2, SKIP | EXT, 0, NULL},
86}; 86};
87 87
88static field_t _Setup_UUIE_sourceAddress[] = { /* SEQUENCE OF */ 88static const struct field_t _Setup_UUIE_sourceAddress[] = { /* SEQUENCE OF */
89 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 89 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
90}; 90};
91 91
92static field_t _VendorIdentifier[] = { /* SEQUENCE */ 92static const struct field_t _VendorIdentifier[] = { /* SEQUENCE */
93 {FNAME("vendor") SEQ, 0, 3, 3, SKIP | EXT, 0, _H221NonStandard}, 93 {FNAME("vendor") SEQ, 0, 3, 3, SKIP | EXT, 0, _H221NonStandard},
94 {FNAME("productId") OCTSTR, BYTE, 1, 0, SKIP | OPT, 0, NULL}, 94 {FNAME("productId") OCTSTR, BYTE, 1, 0, SKIP | OPT, 0, NULL},
95 {FNAME("versionId") OCTSTR, BYTE, 1, 0, SKIP | OPT, 0, NULL}, 95 {FNAME("versionId") OCTSTR, BYTE, 1, 0, SKIP | OPT, 0, NULL},
96}; 96};
97 97
98static field_t _GatekeeperInfo[] = { /* SEQUENCE */ 98static const struct field_t _GatekeeperInfo[] = { /* SEQUENCE */
99 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 99 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
100 _NonStandardParameter}, 100 _NonStandardParameter},
101}; 101};
102 102
103static field_t _H310Caps[] = { /* SEQUENCE */ 103static const struct field_t _H310Caps[] = { /* SEQUENCE */
104 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 104 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
105 _NonStandardParameter}, 105 _NonStandardParameter},
106 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 106 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
107 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL}, 107 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
108}; 108};
109 109
110static field_t _H320Caps[] = { /* SEQUENCE */ 110static const struct field_t _H320Caps[] = { /* SEQUENCE */
111 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 111 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
112 _NonStandardParameter}, 112 _NonStandardParameter},
113 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 113 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
114 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL}, 114 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
115}; 115};
116 116
117static field_t _H321Caps[] = { /* SEQUENCE */ 117static const struct field_t _H321Caps[] = { /* SEQUENCE */
118 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 118 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
119 _NonStandardParameter}, 119 _NonStandardParameter},
120 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 120 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
121 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL}, 121 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
122}; 122};
123 123
124static field_t _H322Caps[] = { /* SEQUENCE */ 124static const struct field_t _H322Caps[] = { /* SEQUENCE */
125 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 125 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
126 _NonStandardParameter}, 126 _NonStandardParameter},
127 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 127 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
128 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL}, 128 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
129}; 129};
130 130
131static field_t _H323Caps[] = { /* SEQUENCE */ 131static const struct field_t _H323Caps[] = { /* SEQUENCE */
132 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 132 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
133 _NonStandardParameter}, 133 _NonStandardParameter},
134 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 134 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
135 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL}, 135 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
136}; 136};
137 137
138static field_t _H324Caps[] = { /* SEQUENCE */ 138static const struct field_t _H324Caps[] = { /* SEQUENCE */
139 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 139 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
140 _NonStandardParameter}, 140 _NonStandardParameter},
141 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 141 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
142 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL}, 142 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
143}; 143};
144 144
145static field_t _VoiceCaps[] = { /* SEQUENCE */ 145static const struct field_t _VoiceCaps[] = { /* SEQUENCE */
146 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 146 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
147 _NonStandardParameter}, 147 _NonStandardParameter},
148 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 148 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
149 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL}, 149 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
150}; 150};
151 151
152static field_t _T120OnlyCaps[] = { /* SEQUENCE */ 152static const struct field_t _T120OnlyCaps[] = { /* SEQUENCE */
153 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 153 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
154 _NonStandardParameter}, 154 _NonStandardParameter},
155 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 155 {FNAME("dataRatesSupported") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
156 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL}, 156 {FNAME("supportedPrefixes") SEQOF, SEMI, 0, 0, SKIP, 0, NULL},
157}; 157};
158 158
159static field_t _SupportedProtocols[] = { /* CHOICE */ 159static const struct field_t _SupportedProtocols[] = { /* CHOICE */
160 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP, 0, 160 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP, 0,
161 _NonStandardParameter}, 161 _NonStandardParameter},
162 {FNAME("h310") SEQ, 1, 1, 3, SKIP | EXT, 0, _H310Caps}, 162 {FNAME("h310") SEQ, 1, 1, 3, SKIP | EXT, 0, _H310Caps},
@@ -171,29 +171,29 @@ static field_t _SupportedProtocols[] = { /* CHOICE */
171 {FNAME("t38FaxAnnexbOnly") SEQ, 2, 5, 5, SKIP | EXT, 0, NULL}, 171 {FNAME("t38FaxAnnexbOnly") SEQ, 2, 5, 5, SKIP | EXT, 0, NULL},
172}; 172};
173 173
174static field_t _GatewayInfo_protocol[] = { /* SEQUENCE OF */ 174static const struct field_t _GatewayInfo_protocol[] = { /* SEQUENCE OF */
175 {FNAME("item") CHOICE, 4, 9, 11, SKIP | EXT, 0, _SupportedProtocols}, 175 {FNAME("item") CHOICE, 4, 9, 11, SKIP | EXT, 0, _SupportedProtocols},
176}; 176};
177 177
178static field_t _GatewayInfo[] = { /* SEQUENCE */ 178static const struct field_t _GatewayInfo[] = { /* SEQUENCE */
179 {FNAME("protocol") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, 179 {FNAME("protocol") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
180 _GatewayInfo_protocol}, 180 _GatewayInfo_protocol},
181 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 181 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
182 _NonStandardParameter}, 182 _NonStandardParameter},
183}; 183};
184 184
185static field_t _McuInfo[] = { /* SEQUENCE */ 185static const struct field_t _McuInfo[] = { /* SEQUENCE */
186 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 186 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
187 _NonStandardParameter}, 187 _NonStandardParameter},
188 {FNAME("protocol") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 188 {FNAME("protocol") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
189}; 189};
190 190
191static field_t _TerminalInfo[] = { /* SEQUENCE */ 191static const struct field_t _TerminalInfo[] = { /* SEQUENCE */
192 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 192 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
193 _NonStandardParameter}, 193 _NonStandardParameter},
194}; 194};
195 195
196static field_t _EndpointType[] = { /* SEQUENCE */ 196static const struct field_t _EndpointType[] = { /* SEQUENCE */
197 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 197 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
198 _NonStandardParameter}, 198 _NonStandardParameter},
199 {FNAME("vendor") SEQ, 2, 3, 3, SKIP | EXT | OPT, 0, 199 {FNAME("vendor") SEQ, 2, 3, 3, SKIP | EXT | OPT, 0,
@@ -210,19 +210,19 @@ static field_t _EndpointType[] = { /* SEQUENCE */
210 0, NULL}, 210 0, NULL},
211}; 211};
212 212
213static field_t _Setup_UUIE_destinationAddress[] = { /* SEQUENCE OF */ 213static const struct field_t _Setup_UUIE_destinationAddress[] = { /* SEQUENCE OF */
214 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 214 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
215}; 215};
216 216
217static field_t _Setup_UUIE_destExtraCallInfo[] = { /* SEQUENCE OF */ 217static const struct field_t _Setup_UUIE_destExtraCallInfo[] = { /* SEQUENCE OF */
218 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 218 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
219}; 219};
220 220
221static field_t _Setup_UUIE_destExtraCRV[] = { /* SEQUENCE OF */ 221static const struct field_t _Setup_UUIE_destExtraCRV[] = { /* SEQUENCE OF */
222 {FNAME("item") INT, WORD, 0, 0, SKIP, 0, NULL}, 222 {FNAME("item") INT, WORD, 0, 0, SKIP, 0, NULL},
223}; 223};
224 224
225static field_t _Setup_UUIE_conferenceGoal[] = { /* CHOICE */ 225static const struct field_t _Setup_UUIE_conferenceGoal[] = { /* CHOICE */
226 {FNAME("create") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 226 {FNAME("create") NUL, FIXD, 0, 0, SKIP, 0, NULL},
227 {FNAME("join") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 227 {FNAME("join") NUL, FIXD, 0, 0, SKIP, 0, NULL},
228 {FNAME("invite") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 228 {FNAME("invite") NUL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -231,12 +231,12 @@ static field_t _Setup_UUIE_conferenceGoal[] = { /* CHOICE */
231 0, NULL}, 231 0, NULL},
232}; 232};
233 233
234static field_t _Q954Details[] = { /* SEQUENCE */ 234static const struct field_t _Q954Details[] = { /* SEQUENCE */
235 {FNAME("conferenceCalling") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 235 {FNAME("conferenceCalling") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
236 {FNAME("threePartyService") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 236 {FNAME("threePartyService") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
237}; 237};
238 238
239static field_t _QseriesOptions[] = { /* SEQUENCE */ 239static const struct field_t _QseriesOptions[] = { /* SEQUENCE */
240 {FNAME("q932Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 240 {FNAME("q932Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
241 {FNAME("q951Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 241 {FNAME("q951Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
242 {FNAME("q952Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 242 {FNAME("q952Full") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -247,32 +247,32 @@ static field_t _QseriesOptions[] = { /* SEQUENCE */
247 {FNAME("q954Info") SEQ, 0, 2, 2, SKIP | EXT, 0, _Q954Details}, 247 {FNAME("q954Info") SEQ, 0, 2, 2, SKIP | EXT, 0, _Q954Details},
248}; 248};
249 249
250static field_t _CallType[] = { /* CHOICE */ 250static const struct field_t _CallType[] = { /* CHOICE */
251 {FNAME("pointToPoint") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 251 {FNAME("pointToPoint") NUL, FIXD, 0, 0, SKIP, 0, NULL},
252 {FNAME("oneToN") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 252 {FNAME("oneToN") NUL, FIXD, 0, 0, SKIP, 0, NULL},
253 {FNAME("nToOne") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 253 {FNAME("nToOne") NUL, FIXD, 0, 0, SKIP, 0, NULL},
254 {FNAME("nToN") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 254 {FNAME("nToN") NUL, FIXD, 0, 0, SKIP, 0, NULL},
255}; 255};
256 256
257static field_t _H245_NonStandardIdentifier_h221NonStandard[] = { /* SEQUENCE */ 257static const struct field_t _H245_NonStandardIdentifier_h221NonStandard[] = { /* SEQUENCE */
258 {FNAME("t35CountryCode") INT, BYTE, 0, 0, SKIP, 0, NULL}, 258 {FNAME("t35CountryCode") INT, BYTE, 0, 0, SKIP, 0, NULL},
259 {FNAME("t35Extension") INT, BYTE, 0, 0, SKIP, 0, NULL}, 259 {FNAME("t35Extension") INT, BYTE, 0, 0, SKIP, 0, NULL},
260 {FNAME("manufacturerCode") INT, WORD, 0, 0, SKIP, 0, NULL}, 260 {FNAME("manufacturerCode") INT, WORD, 0, 0, SKIP, 0, NULL},
261}; 261};
262 262
263static field_t _H245_NonStandardIdentifier[] = { /* CHOICE */ 263static const struct field_t _H245_NonStandardIdentifier[] = { /* CHOICE */
264 {FNAME("object") OID, BYTE, 0, 0, SKIP, 0, NULL}, 264 {FNAME("object") OID, BYTE, 0, 0, SKIP, 0, NULL},
265 {FNAME("h221NonStandard") SEQ, 0, 3, 3, SKIP, 0, 265 {FNAME("h221NonStandard") SEQ, 0, 3, 3, SKIP, 0,
266 _H245_NonStandardIdentifier_h221NonStandard}, 266 _H245_NonStandardIdentifier_h221NonStandard},
267}; 267};
268 268
269static field_t _H245_NonStandardParameter[] = { /* SEQUENCE */ 269static const struct field_t _H245_NonStandardParameter[] = { /* SEQUENCE */
270 {FNAME("nonStandardIdentifier") CHOICE, 1, 2, 2, SKIP, 0, 270 {FNAME("nonStandardIdentifier") CHOICE, 1, 2, 2, SKIP, 0,
271 _H245_NonStandardIdentifier}, 271 _H245_NonStandardIdentifier},
272 {FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 272 {FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
273}; 273};
274 274
275static field_t _H261VideoCapability[] = { /* SEQUENCE */ 275static const struct field_t _H261VideoCapability[] = { /* SEQUENCE */
276 {FNAME("qcifMPI") INT, 2, 1, 0, SKIP | OPT, 0, NULL}, 276 {FNAME("qcifMPI") INT, 2, 1, 0, SKIP | OPT, 0, NULL},
277 {FNAME("cifMPI") INT, 2, 1, 0, SKIP | OPT, 0, NULL}, 277 {FNAME("cifMPI") INT, 2, 1, 0, SKIP | OPT, 0, NULL},
278 {FNAME("temporalSpatialTradeOffCapability") BOOL, FIXD, 0, 0, SKIP, 0, 278 {FNAME("temporalSpatialTradeOffCapability") BOOL, FIXD, 0, 0, SKIP, 0,
@@ -282,7 +282,7 @@ static field_t _H261VideoCapability[] = { /* SEQUENCE */
282 {FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 282 {FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
283}; 283};
284 284
285static field_t _H262VideoCapability[] = { /* SEQUENCE */ 285static const struct field_t _H262VideoCapability[] = { /* SEQUENCE */
286 {FNAME("profileAndLevel-SPatML") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 286 {FNAME("profileAndLevel-SPatML") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
287 {FNAME("profileAndLevel-MPatLL") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 287 {FNAME("profileAndLevel-MPatLL") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
288 {FNAME("profileAndLevel-MPatML") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 288 {FNAME("profileAndLevel-MPatML") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -304,7 +304,7 @@ static field_t _H262VideoCapability[] = { /* SEQUENCE */
304 {FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 304 {FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
305}; 305};
306 306
307static field_t _H263VideoCapability[] = { /* SEQUENCE */ 307static const struct field_t _H263VideoCapability[] = { /* SEQUENCE */
308 {FNAME("sqcifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL}, 308 {FNAME("sqcifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL},
309 {FNAME("qcifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL}, 309 {FNAME("qcifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL},
310 {FNAME("cifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL}, 310 {FNAME("cifMPI") INT, 5, 1, 0, SKIP | OPT, 0, NULL},
@@ -330,7 +330,7 @@ static field_t _H263VideoCapability[] = { /* SEQUENCE */
330 {FNAME("h263Options") SEQ, 5, 29, 31, SKIP | EXT | OPT, 0, NULL}, 330 {FNAME("h263Options") SEQ, 5, 29, 31, SKIP | EXT | OPT, 0, NULL},
331}; 331};
332 332
333static field_t _IS11172VideoCapability[] = { /* SEQUENCE */ 333static const struct field_t _IS11172VideoCapability[] = { /* SEQUENCE */
334 {FNAME("constrainedBitstream") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 334 {FNAME("constrainedBitstream") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
335 {FNAME("videoBitRate") INT, CONS, 0, 0, SKIP | OPT, 0, NULL}, 335 {FNAME("videoBitRate") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
336 {FNAME("vbvBufferSize") INT, CONS, 0, 0, SKIP | OPT, 0, NULL}, 336 {FNAME("vbvBufferSize") INT, CONS, 0, 0, SKIP | OPT, 0, NULL},
@@ -341,7 +341,7 @@ static field_t _IS11172VideoCapability[] = { /* SEQUENCE */
341 {FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 341 {FNAME("videoBadMBsCap") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
342}; 342};
343 343
344static field_t _VideoCapability[] = { /* CHOICE */ 344static const struct field_t _VideoCapability[] = { /* CHOICE */
345 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, 345 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
346 _H245_NonStandardParameter}, 346 _H245_NonStandardParameter},
347 {FNAME("h261VideoCapability") SEQ, 2, 5, 6, SKIP | EXT, 0, 347 {FNAME("h261VideoCapability") SEQ, 2, 5, 6, SKIP | EXT, 0,
@@ -355,12 +355,12 @@ static field_t _VideoCapability[] = { /* CHOICE */
355 {FNAME("genericVideoCapability") SEQ, 5, 6, 6, SKIP | EXT, 0, NULL}, 355 {FNAME("genericVideoCapability") SEQ, 5, 6, 6, SKIP | EXT, 0, NULL},
356}; 356};
357 357
358static field_t _AudioCapability_g7231[] = { /* SEQUENCE */ 358static const struct field_t _AudioCapability_g7231[] = { /* SEQUENCE */
359 {FNAME("maxAl-sduAudioFrames") INT, BYTE, 1, 0, SKIP, 0, NULL}, 359 {FNAME("maxAl-sduAudioFrames") INT, BYTE, 1, 0, SKIP, 0, NULL},
360 {FNAME("silenceSuppression") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 360 {FNAME("silenceSuppression") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
361}; 361};
362 362
363static field_t _IS11172AudioCapability[] = { /* SEQUENCE */ 363static const struct field_t _IS11172AudioCapability[] = { /* SEQUENCE */
364 {FNAME("audioLayer1") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 364 {FNAME("audioLayer1") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
365 {FNAME("audioLayer2") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 365 {FNAME("audioLayer2") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
366 {FNAME("audioLayer3") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 366 {FNAME("audioLayer3") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -372,7 +372,7 @@ static field_t _IS11172AudioCapability[] = { /* SEQUENCE */
372 {FNAME("bitRate") INT, WORD, 1, 0, SKIP, 0, NULL}, 372 {FNAME("bitRate") INT, WORD, 1, 0, SKIP, 0, NULL},
373}; 373};
374 374
375static field_t _IS13818AudioCapability[] = { /* SEQUENCE */ 375static const struct field_t _IS13818AudioCapability[] = { /* SEQUENCE */
376 {FNAME("audioLayer1") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 376 {FNAME("audioLayer1") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
377 {FNAME("audioLayer2") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 377 {FNAME("audioLayer2") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
378 {FNAME("audioLayer3") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 378 {FNAME("audioLayer3") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -396,7 +396,7 @@ static field_t _IS13818AudioCapability[] = { /* SEQUENCE */
396 {FNAME("bitRate") INT, WORD, 1, 0, SKIP, 0, NULL}, 396 {FNAME("bitRate") INT, WORD, 1, 0, SKIP, 0, NULL},
397}; 397};
398 398
399static field_t _AudioCapability[] = { /* CHOICE */ 399static const struct field_t _AudioCapability[] = { /* CHOICE */
400 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, 400 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
401 _H245_NonStandardParameter}, 401 _H245_NonStandardParameter},
402 {FNAME("g711Alaw64k") INT, BYTE, 1, 0, SKIP, 0, NULL}, 402 {FNAME("g711Alaw64k") INT, BYTE, 1, 0, SKIP, 0, NULL},
@@ -424,7 +424,7 @@ static field_t _AudioCapability[] = { /* CHOICE */
424 {FNAME("g729Extensions") SEQ, 1, 8, 8, SKIP | EXT, 0, NULL}, 424 {FNAME("g729Extensions") SEQ, 1, 8, 8, SKIP | EXT, 0, NULL},
425}; 425};
426 426
427static field_t _DataProtocolCapability[] = { /* CHOICE */ 427static const struct field_t _DataProtocolCapability[] = { /* CHOICE */
428 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, 428 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
429 _H245_NonStandardParameter}, 429 _H245_NonStandardParameter},
430 {FNAME("v14buffered") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 430 {FNAME("v14buffered") NUL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -442,7 +442,7 @@ static field_t _DataProtocolCapability[] = { /* CHOICE */
442 {FNAME("udp") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 442 {FNAME("udp") NUL, FIXD, 0, 0, SKIP, 0, NULL},
443}; 443};
444 444
445static field_t _T84Profile_t84Restricted[] = { /* SEQUENCE */ 445static const struct field_t _T84Profile_t84Restricted[] = { /* SEQUENCE */
446 {FNAME("qcif") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 446 {FNAME("qcif") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
447 {FNAME("cif") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 447 {FNAME("cif") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
448 {FNAME("ccir601Seq") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 448 {FNAME("ccir601Seq") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -464,25 +464,25 @@ static field_t _T84Profile_t84Restricted[] = { /* SEQUENCE */
464 {FNAME("digPhotoHighProg") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 464 {FNAME("digPhotoHighProg") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
465}; 465};
466 466
467static field_t _T84Profile[] = { /* CHOICE */ 467static const struct field_t _T84Profile[] = { /* CHOICE */
468 {FNAME("t84Unrestricted") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 468 {FNAME("t84Unrestricted") NUL, FIXD, 0, 0, SKIP, 0, NULL},
469 {FNAME("t84Restricted") SEQ, 0, 19, 19, SKIP | EXT, 0, 469 {FNAME("t84Restricted") SEQ, 0, 19, 19, SKIP | EXT, 0,
470 _T84Profile_t84Restricted}, 470 _T84Profile_t84Restricted},
471}; 471};
472 472
473static field_t _DataApplicationCapability_application_t84[] = { /* SEQUENCE */ 473static const struct field_t _DataApplicationCapability_application_t84[] = { /* SEQUENCE */
474 {FNAME("t84Protocol") CHOICE, 3, 7, 14, SKIP | EXT, 0, 474 {FNAME("t84Protocol") CHOICE, 3, 7, 14, SKIP | EXT, 0,
475 _DataProtocolCapability}, 475 _DataProtocolCapability},
476 {FNAME("t84Profile") CHOICE, 1, 2, 2, SKIP, 0, _T84Profile}, 476 {FNAME("t84Profile") CHOICE, 1, 2, 2, SKIP, 0, _T84Profile},
477}; 477};
478 478
479static field_t _DataApplicationCapability_application_nlpid[] = { /* SEQUENCE */ 479static const struct field_t _DataApplicationCapability_application_nlpid[] = { /* SEQUENCE */
480 {FNAME("nlpidProtocol") CHOICE, 3, 7, 14, SKIP | EXT, 0, 480 {FNAME("nlpidProtocol") CHOICE, 3, 7, 14, SKIP | EXT, 0,
481 _DataProtocolCapability}, 481 _DataProtocolCapability},
482 {FNAME("nlpidData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 482 {FNAME("nlpidData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
483}; 483};
484 484
485static field_t _DataApplicationCapability_application[] = { /* CHOICE */ 485static const struct field_t _DataApplicationCapability_application[] = { /* CHOICE */
486 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, 486 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
487 _H245_NonStandardParameter}, 487 _H245_NonStandardParameter},
488 {FNAME("t120") CHOICE, 3, 7, 14, DECODE | EXT, 488 {FNAME("t120") CHOICE, 3, 7, 14, DECODE | EXT,
@@ -509,20 +509,20 @@ static field_t _DataApplicationCapability_application[] = { /* CHOICE */
509 {FNAME("genericDataCapability") SEQ, 5, 6, 6, SKIP | EXT, 0, NULL}, 509 {FNAME("genericDataCapability") SEQ, 5, 6, 6, SKIP | EXT, 0, NULL},
510}; 510};
511 511
512static field_t _DataApplicationCapability[] = { /* SEQUENCE */ 512static const struct field_t _DataApplicationCapability[] = { /* SEQUENCE */
513 {FNAME("application") CHOICE, 4, 10, 14, DECODE | EXT, 513 {FNAME("application") CHOICE, 4, 10, 14, DECODE | EXT,
514 offsetof(DataApplicationCapability, application), 514 offsetof(DataApplicationCapability, application),
515 _DataApplicationCapability_application}, 515 _DataApplicationCapability_application},
516 {FNAME("maxBitRate") INT, CONS, 0, 0, SKIP, 0, NULL}, 516 {FNAME("maxBitRate") INT, CONS, 0, 0, SKIP, 0, NULL},
517}; 517};
518 518
519static field_t _EncryptionMode[] = { /* CHOICE */ 519static const struct field_t _EncryptionMode[] = { /* CHOICE */
520 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, 520 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
521 _H245_NonStandardParameter}, 521 _H245_NonStandardParameter},
522 {FNAME("h233Encryption") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 522 {FNAME("h233Encryption") NUL, FIXD, 0, 0, SKIP, 0, NULL},
523}; 523};
524 524
525static field_t _DataType[] = { /* CHOICE */ 525static const struct field_t _DataType[] = { /* CHOICE */
526 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, 526 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
527 _H245_NonStandardParameter}, 527 _H245_NonStandardParameter},
528 {FNAME("nullData") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 528 {FNAME("nullData") NUL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -538,7 +538,7 @@ static field_t _DataType[] = { /* CHOICE */
538 {FNAME("multiplexedStream") SEQ, 0, 2, 2, SKIP | EXT, 0, NULL}, 538 {FNAME("multiplexedStream") SEQ, 0, 2, 2, SKIP | EXT, 0, NULL},
539}; 539};
540 540
541static field_t _H222LogicalChannelParameters[] = { /* SEQUENCE */ 541static const struct field_t _H222LogicalChannelParameters[] = { /* SEQUENCE */
542 {FNAME("resourceID") INT, WORD, 0, 0, SKIP, 0, NULL}, 542 {FNAME("resourceID") INT, WORD, 0, 0, SKIP, 0, NULL},
543 {FNAME("subChannelID") INT, WORD, 0, 0, SKIP, 0, NULL}, 543 {FNAME("subChannelID") INT, WORD, 0, 0, SKIP, 0, NULL},
544 {FNAME("pcr-pid") INT, WORD, 0, 0, SKIP | OPT, 0, NULL}, 544 {FNAME("pcr-pid") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
@@ -546,12 +546,12 @@ static field_t _H222LogicalChannelParameters[] = { /* SEQUENCE */
546 {FNAME("streamDescriptors") OCTSTR, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 546 {FNAME("streamDescriptors") OCTSTR, SEMI, 0, 0, SKIP | OPT, 0, NULL},
547}; 547};
548 548
549static field_t _H223LogicalChannelParameters_adaptationLayerType_al3[] = { /* SEQUENCE */ 549static const struct field_t _H223LogicalChannelParameters_adaptationLayerType_al3[] = { /* SEQUENCE */
550 {FNAME("controlFieldOctets") INT, 2, 0, 0, SKIP, 0, NULL}, 550 {FNAME("controlFieldOctets") INT, 2, 0, 0, SKIP, 0, NULL},
551 {FNAME("sendBufferSize") INT, CONS, 0, 0, SKIP, 0, NULL}, 551 {FNAME("sendBufferSize") INT, CONS, 0, 0, SKIP, 0, NULL},
552}; 552};
553 553
554static field_t _H223LogicalChannelParameters_adaptationLayerType[] = { /* CHOICE */ 554static const struct field_t _H223LogicalChannelParameters_adaptationLayerType[] = { /* CHOICE */
555 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, 555 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0,
556 _H245_NonStandardParameter}, 556 _H245_NonStandardParameter},
557 {FNAME("al1Framed") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 557 {FNAME("al1Framed") NUL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -565,53 +565,53 @@ static field_t _H223LogicalChannelParameters_adaptationLayerType[] = { /* CHOICE
565 {FNAME("al3M") SEQ, 0, 5, 6, SKIP | EXT, 0, NULL}, 565 {FNAME("al3M") SEQ, 0, 5, 6, SKIP | EXT, 0, NULL},
566}; 566};
567 567
568static field_t _H223LogicalChannelParameters[] = { /* SEQUENCE */ 568static const struct field_t _H223LogicalChannelParameters[] = { /* SEQUENCE */
569 {FNAME("adaptationLayerType") CHOICE, 3, 6, 9, SKIP | EXT, 0, 569 {FNAME("adaptationLayerType") CHOICE, 3, 6, 9, SKIP | EXT, 0,
570 _H223LogicalChannelParameters_adaptationLayerType}, 570 _H223LogicalChannelParameters_adaptationLayerType},
571 {FNAME("segmentableFlag") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 571 {FNAME("segmentableFlag") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
572}; 572};
573 573
574static field_t _CRCLength[] = { /* CHOICE */ 574static const struct field_t _CRCLength[] = { /* CHOICE */
575 {FNAME("crc8bit") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 575 {FNAME("crc8bit") NUL, FIXD, 0, 0, SKIP, 0, NULL},
576 {FNAME("crc16bit") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 576 {FNAME("crc16bit") NUL, FIXD, 0, 0, SKIP, 0, NULL},
577 {FNAME("crc32bit") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 577 {FNAME("crc32bit") NUL, FIXD, 0, 0, SKIP, 0, NULL},
578}; 578};
579 579
580static field_t _V76HDLCParameters[] = { /* SEQUENCE */ 580static const struct field_t _V76HDLCParameters[] = { /* SEQUENCE */
581 {FNAME("crcLength") CHOICE, 2, 3, 3, SKIP | EXT, 0, _CRCLength}, 581 {FNAME("crcLength") CHOICE, 2, 3, 3, SKIP | EXT, 0, _CRCLength},
582 {FNAME("n401") INT, WORD, 1, 0, SKIP, 0, NULL}, 582 {FNAME("n401") INT, WORD, 1, 0, SKIP, 0, NULL},
583 {FNAME("loopbackTestProcedure") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 583 {FNAME("loopbackTestProcedure") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
584}; 584};
585 585
586static field_t _V76LogicalChannelParameters_suspendResume[] = { /* CHOICE */ 586static const struct field_t _V76LogicalChannelParameters_suspendResume[] = { /* CHOICE */
587 {FNAME("noSuspendResume") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 587 {FNAME("noSuspendResume") NUL, FIXD, 0, 0, SKIP, 0, NULL},
588 {FNAME("suspendResumewAddress") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 588 {FNAME("suspendResumewAddress") NUL, FIXD, 0, 0, SKIP, 0, NULL},
589 {FNAME("suspendResumewoAddress") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 589 {FNAME("suspendResumewoAddress") NUL, FIXD, 0, 0, SKIP, 0, NULL},
590}; 590};
591 591
592static field_t _V76LogicalChannelParameters_mode_eRM_recovery[] = { /* CHOICE */ 592static const struct field_t _V76LogicalChannelParameters_mode_eRM_recovery[] = { /* CHOICE */
593 {FNAME("rej") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 593 {FNAME("rej") NUL, FIXD, 0, 0, SKIP, 0, NULL},
594 {FNAME("sREJ") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 594 {FNAME("sREJ") NUL, FIXD, 0, 0, SKIP, 0, NULL},
595 {FNAME("mSREJ") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 595 {FNAME("mSREJ") NUL, FIXD, 0, 0, SKIP, 0, NULL},
596}; 596};
597 597
598static field_t _V76LogicalChannelParameters_mode_eRM[] = { /* SEQUENCE */ 598static const struct field_t _V76LogicalChannelParameters_mode_eRM[] = { /* SEQUENCE */
599 {FNAME("windowSize") INT, 7, 1, 0, SKIP, 0, NULL}, 599 {FNAME("windowSize") INT, 7, 1, 0, SKIP, 0, NULL},
600 {FNAME("recovery") CHOICE, 2, 3, 3, SKIP | EXT, 0, 600 {FNAME("recovery") CHOICE, 2, 3, 3, SKIP | EXT, 0,
601 _V76LogicalChannelParameters_mode_eRM_recovery}, 601 _V76LogicalChannelParameters_mode_eRM_recovery},
602}; 602};
603 603
604static field_t _V76LogicalChannelParameters_mode[] = { /* CHOICE */ 604static const struct field_t _V76LogicalChannelParameters_mode[] = { /* CHOICE */
605 {FNAME("eRM") SEQ, 0, 2, 2, SKIP | EXT, 0, 605 {FNAME("eRM") SEQ, 0, 2, 2, SKIP | EXT, 0,
606 _V76LogicalChannelParameters_mode_eRM}, 606 _V76LogicalChannelParameters_mode_eRM},
607 {FNAME("uNERM") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 607 {FNAME("uNERM") NUL, FIXD, 0, 0, SKIP, 0, NULL},
608}; 608};
609 609
610static field_t _V75Parameters[] = { /* SEQUENCE */ 610static const struct field_t _V75Parameters[] = { /* SEQUENCE */
611 {FNAME("audioHeaderPresent") BOOL, FIXD, 0, 0, SKIP, 0, NULL}, 611 {FNAME("audioHeaderPresent") BOOL, FIXD, 0, 0, SKIP, 0, NULL},
612}; 612};
613 613
614static field_t _V76LogicalChannelParameters[] = { /* SEQUENCE */ 614static const struct field_t _V76LogicalChannelParameters[] = { /* SEQUENCE */
615 {FNAME("hdlcParameters") SEQ, 0, 3, 3, SKIP | EXT, 0, 615 {FNAME("hdlcParameters") SEQ, 0, 3, 3, SKIP | EXT, 0,
616 _V76HDLCParameters}, 616 _V76HDLCParameters},
617 {FNAME("suspendResume") CHOICE, 2, 3, 3, SKIP | EXT, 0, 617 {FNAME("suspendResume") CHOICE, 2, 3, 3, SKIP | EXT, 0,
@@ -622,38 +622,38 @@ static field_t _V76LogicalChannelParameters[] = { /* SEQUENCE */
622 {FNAME("v75Parameters") SEQ, 0, 1, 1, SKIP | EXT, 0, _V75Parameters}, 622 {FNAME("v75Parameters") SEQ, 0, 1, 1, SKIP | EXT, 0, _V75Parameters},
623}; 623};
624 624
625static field_t _H2250LogicalChannelParameters_nonStandard[] = { /* SEQUENCE OF */ 625static const struct field_t _H2250LogicalChannelParameters_nonStandard[] = { /* SEQUENCE OF */
626 {FNAME("item") SEQ, 0, 2, 2, SKIP, 0, _H245_NonStandardParameter}, 626 {FNAME("item") SEQ, 0, 2, 2, SKIP, 0, _H245_NonStandardParameter},
627}; 627};
628 628
629static field_t _UnicastAddress_iPAddress[] = { /* SEQUENCE */ 629static const struct field_t _UnicastAddress_iPAddress[] = { /* SEQUENCE */
630 {FNAME("network") OCTSTR, FIXD, 4, 0, DECODE, 630 {FNAME("network") OCTSTR, FIXD, 4, 0, DECODE,
631 offsetof(UnicastAddress_iPAddress, network), NULL}, 631 offsetof(UnicastAddress_iPAddress, network), NULL},
632 {FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL}, 632 {FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
633}; 633};
634 634
635static field_t _UnicastAddress_iPXAddress[] = { /* SEQUENCE */ 635static const struct field_t _UnicastAddress_iPXAddress[] = { /* SEQUENCE */
636 {FNAME("node") OCTSTR, FIXD, 6, 0, SKIP, 0, NULL}, 636 {FNAME("node") OCTSTR, FIXD, 6, 0, SKIP, 0, NULL},
637 {FNAME("netnum") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL}, 637 {FNAME("netnum") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
638 {FNAME("tsapIdentifier") OCTSTR, FIXD, 2, 0, SKIP, 0, NULL}, 638 {FNAME("tsapIdentifier") OCTSTR, FIXD, 2, 0, SKIP, 0, NULL},
639}; 639};
640 640
641static field_t _UnicastAddress_iP6Address[] = { /* SEQUENCE */ 641static const struct field_t _UnicastAddress_iP6Address[] = { /* SEQUENCE */
642 {FNAME("network") OCTSTR, FIXD, 16, 0, DECODE, 642 {FNAME("network") OCTSTR, FIXD, 16, 0, DECODE,
643 offsetof(UnicastAddress_iP6Address, network), NULL}, 643 offsetof(UnicastAddress_iP6Address, network), NULL},
644 {FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL}, 644 {FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
645}; 645};
646 646
647static field_t _UnicastAddress_iPSourceRouteAddress_routing[] = { /* CHOICE */ 647static const struct field_t _UnicastAddress_iPSourceRouteAddress_routing[] = { /* CHOICE */
648 {FNAME("strict") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 648 {FNAME("strict") NUL, FIXD, 0, 0, SKIP, 0, NULL},
649 {FNAME("loose") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 649 {FNAME("loose") NUL, FIXD, 0, 0, SKIP, 0, NULL},
650}; 650};
651 651
652static field_t _UnicastAddress_iPSourceRouteAddress_route[] = { /* SEQUENCE OF */ 652static const struct field_t _UnicastAddress_iPSourceRouteAddress_route[] = { /* SEQUENCE OF */
653 {FNAME("item") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL}, 653 {FNAME("item") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
654}; 654};
655 655
656static field_t _UnicastAddress_iPSourceRouteAddress[] = { /* SEQUENCE */ 656static const struct field_t _UnicastAddress_iPSourceRouteAddress[] = { /* SEQUENCE */
657 {FNAME("routing") CHOICE, 1, 2, 2, SKIP, 0, 657 {FNAME("routing") CHOICE, 1, 2, 2, SKIP, 0,
658 _UnicastAddress_iPSourceRouteAddress_routing}, 658 _UnicastAddress_iPSourceRouteAddress_routing},
659 {FNAME("network") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL}, 659 {FNAME("network") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
@@ -662,7 +662,7 @@ static field_t _UnicastAddress_iPSourceRouteAddress[] = { /* SEQUENCE */
662 _UnicastAddress_iPSourceRouteAddress_route}, 662 _UnicastAddress_iPSourceRouteAddress_route},
663}; 663};
664 664
665static field_t _UnicastAddress[] = { /* CHOICE */ 665static const struct field_t _UnicastAddress[] = { /* CHOICE */
666 {FNAME("iPAddress") SEQ, 0, 2, 2, DECODE | EXT, 666 {FNAME("iPAddress") SEQ, 0, 2, 2, DECODE | EXT,
667 offsetof(UnicastAddress, iPAddress), _UnicastAddress_iPAddress}, 667 offsetof(UnicastAddress, iPAddress), _UnicastAddress_iPAddress},
668 {FNAME("iPXAddress") SEQ, 0, 3, 3, SKIP | EXT, 0, 668 {FNAME("iPXAddress") SEQ, 0, 3, 3, SKIP | EXT, 0,
@@ -676,17 +676,17 @@ static field_t _UnicastAddress[] = { /* CHOICE */
676 {FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0, NULL}, 676 {FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0, NULL},
677}; 677};
678 678
679static field_t _MulticastAddress_iPAddress[] = { /* SEQUENCE */ 679static const struct field_t _MulticastAddress_iPAddress[] = { /* SEQUENCE */
680 {FNAME("network") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL}, 680 {FNAME("network") OCTSTR, FIXD, 4, 0, SKIP, 0, NULL},
681 {FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL}, 681 {FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
682}; 682};
683 683
684static field_t _MulticastAddress_iP6Address[] = { /* SEQUENCE */ 684static const struct field_t _MulticastAddress_iP6Address[] = { /* SEQUENCE */
685 {FNAME("network") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL}, 685 {FNAME("network") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
686 {FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL}, 686 {FNAME("tsapIdentifier") INT, WORD, 0, 0, SKIP, 0, NULL},
687}; 687};
688 688
689static field_t _MulticastAddress[] = { /* CHOICE */ 689static const struct field_t _MulticastAddress[] = { /* CHOICE */
690 {FNAME("iPAddress") SEQ, 0, 2, 2, SKIP | EXT, 0, 690 {FNAME("iPAddress") SEQ, 0, 2, 2, SKIP | EXT, 0,
691 _MulticastAddress_iPAddress}, 691 _MulticastAddress_iPAddress},
692 {FNAME("iP6Address") SEQ, 0, 2, 2, SKIP | EXT, 0, 692 {FNAME("iP6Address") SEQ, 0, 2, 2, SKIP | EXT, 0,
@@ -695,14 +695,14 @@ static field_t _MulticastAddress[] = { /* CHOICE */
695 {FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0, NULL}, 695 {FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0, NULL},
696}; 696};
697 697
698static field_t _H245_TransportAddress[] = { /* CHOICE */ 698static const struct field_t _H245_TransportAddress[] = { /* CHOICE */
699 {FNAME("unicastAddress") CHOICE, 3, 5, 7, DECODE | EXT, 699 {FNAME("unicastAddress") CHOICE, 3, 5, 7, DECODE | EXT,
700 offsetof(H245_TransportAddress, unicastAddress), _UnicastAddress}, 700 offsetof(H245_TransportAddress, unicastAddress), _UnicastAddress},
701 {FNAME("multicastAddress") CHOICE, 1, 2, 4, SKIP | EXT, 0, 701 {FNAME("multicastAddress") CHOICE, 1, 2, 4, SKIP | EXT, 0,
702 _MulticastAddress}, 702 _MulticastAddress},
703}; 703};
704 704
705static field_t _H2250LogicalChannelParameters[] = { /* SEQUENCE */ 705static const struct field_t _H2250LogicalChannelParameters[] = { /* SEQUENCE */
706 {FNAME("nonStandard") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, 706 {FNAME("nonStandard") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
707 _H2250LogicalChannelParameters_nonStandard}, 707 _H2250LogicalChannelParameters_nonStandard},
708 {FNAME("sessionID") INT, BYTE, 0, 0, SKIP, 0, NULL}, 708 {FNAME("sessionID") INT, BYTE, 0, 0, SKIP, 0, NULL},
@@ -728,7 +728,7 @@ static field_t _H2250LogicalChannelParameters[] = { /* SEQUENCE */
728 {FNAME("source") SEQ, 0, 2, 2, SKIP | EXT | OPT, 0, NULL}, 728 {FNAME("source") SEQ, 0, 2, 2, SKIP | EXT | OPT, 0, NULL},
729}; 729};
730 730
731static field_t _OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */ 731static const struct field_t _OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */
732 {FNAME("h222LogicalChannelParameters") SEQ, 3, 5, 5, SKIP | EXT, 0, 732 {FNAME("h222LogicalChannelParameters") SEQ, 3, 5, 5, SKIP | EXT, 0,
733 _H222LogicalChannelParameters}, 733 _H222LogicalChannelParameters},
734 {FNAME("h223LogicalChannelParameters") SEQ, 0, 2, 2, SKIP | EXT, 0, 734 {FNAME("h223LogicalChannelParameters") SEQ, 0, 2, 2, SKIP | EXT, 0,
@@ -742,7 +742,7 @@ static field_t _OpenLogicalChannel_forwardLogicalChannelParameters_multiplexPara
742 {FNAME("none") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 742 {FNAME("none") NUL, FIXD, 0, 0, SKIP, 0, NULL},
743}; 743};
744 744
745static field_t _OpenLogicalChannel_forwardLogicalChannelParameters[] = { /* SEQUENCE */ 745static const struct field_t _OpenLogicalChannel_forwardLogicalChannelParameters[] = { /* SEQUENCE */
746 {FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL}, 746 {FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
747 {FNAME("dataType") CHOICE, 3, 6, 9, DECODE | EXT, 747 {FNAME("dataType") CHOICE, 3, 6, 9, DECODE | EXT,
748 offsetof(OpenLogicalChannel_forwardLogicalChannelParameters, 748 offsetof(OpenLogicalChannel_forwardLogicalChannelParameters,
@@ -756,7 +756,7 @@ static field_t _OpenLogicalChannel_forwardLogicalChannelParameters[] = { /* SEQU
756 {FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL}, 756 {FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
757}; 757};
758 758
759static field_t _OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */ 759static const struct field_t _OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */
760 {FNAME("h223LogicalChannelParameters") SEQ, 0, 2, 2, SKIP | EXT, 0, 760 {FNAME("h223LogicalChannelParameters") SEQ, 0, 2, 2, SKIP | EXT, 0,
761 _H223LogicalChannelParameters}, 761 _H223LogicalChannelParameters},
762 {FNAME("v76LogicalChannelParameters") SEQ, 0, 5, 5, SKIP | EXT, 0, 762 {FNAME("v76LogicalChannelParameters") SEQ, 0, 5, 5, SKIP | EXT, 0,
@@ -767,7 +767,7 @@ static field_t _OpenLogicalChannel_reverseLogicalChannelParameters_multiplexPara
767 h2250LogicalChannelParameters), _H2250LogicalChannelParameters}, 767 h2250LogicalChannelParameters), _H2250LogicalChannelParameters},
768}; 768};
769 769
770static field_t _OpenLogicalChannel_reverseLogicalChannelParameters[] = { /* SEQUENCE */ 770static const struct field_t _OpenLogicalChannel_reverseLogicalChannelParameters[] = { /* SEQUENCE */
771 {FNAME("dataType") CHOICE, 3, 6, 9, SKIP | EXT, 0, _DataType}, 771 {FNAME("dataType") CHOICE, 3, 6, 9, SKIP | EXT, 0, _DataType},
772 {FNAME("multiplexParameters") CHOICE, 1, 2, 3, DECODE | EXT | OPT, 772 {FNAME("multiplexParameters") CHOICE, 1, 2, 3, DECODE | EXT | OPT,
773 offsetof(OpenLogicalChannel_reverseLogicalChannelParameters, 773 offsetof(OpenLogicalChannel_reverseLogicalChannelParameters,
@@ -778,23 +778,23 @@ static field_t _OpenLogicalChannel_reverseLogicalChannelParameters[] = { /* SEQU
778 {FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL}, 778 {FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
779}; 779};
780 780
781static field_t _NetworkAccessParameters_distribution[] = { /* CHOICE */ 781static const struct field_t _NetworkAccessParameters_distribution[] = { /* CHOICE */
782 {FNAME("unicast") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 782 {FNAME("unicast") NUL, FIXD, 0, 0, SKIP, 0, NULL},
783 {FNAME("multicast") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 783 {FNAME("multicast") NUL, FIXD, 0, 0, SKIP, 0, NULL},
784}; 784};
785 785
786static field_t _Q2931Address_address[] = { /* CHOICE */ 786static const struct field_t _Q2931Address_address[] = { /* CHOICE */
787 {FNAME("internationalNumber") NUMSTR, 4, 1, 0, SKIP, 0, NULL}, 787 {FNAME("internationalNumber") NUMSTR, 4, 1, 0, SKIP, 0, NULL},
788 {FNAME("nsapAddress") OCTSTR, 5, 1, 0, SKIP, 0, NULL}, 788 {FNAME("nsapAddress") OCTSTR, 5, 1, 0, SKIP, 0, NULL},
789}; 789};
790 790
791static field_t _Q2931Address[] = { /* SEQUENCE */ 791static const struct field_t _Q2931Address[] = { /* SEQUENCE */
792 {FNAME("address") CHOICE, 1, 2, 2, SKIP | EXT, 0, 792 {FNAME("address") CHOICE, 1, 2, 2, SKIP | EXT, 0,
793 _Q2931Address_address}, 793 _Q2931Address_address},
794 {FNAME("subaddress") OCTSTR, 5, 1, 0, SKIP | OPT, 0, NULL}, 794 {FNAME("subaddress") OCTSTR, 5, 1, 0, SKIP | OPT, 0, NULL},
795}; 795};
796 796
797static field_t _NetworkAccessParameters_networkAddress[] = { /* CHOICE */ 797static const struct field_t _NetworkAccessParameters_networkAddress[] = { /* CHOICE */
798 {FNAME("q2931Address") SEQ, 1, 2, 2, SKIP | EXT, 0, _Q2931Address}, 798 {FNAME("q2931Address") SEQ, 1, 2, 2, SKIP | EXT, 0, _Q2931Address},
799 {FNAME("e164Address") NUMDGT, 7, 1, 0, SKIP, 0, NULL}, 799 {FNAME("e164Address") NUMDGT, 7, 1, 0, SKIP, 0, NULL},
800 {FNAME("localAreaAddress") CHOICE, 1, 2, 2, DECODE | EXT, 800 {FNAME("localAreaAddress") CHOICE, 1, 2, 2, DECODE | EXT,
@@ -802,7 +802,7 @@ static field_t _NetworkAccessParameters_networkAddress[] = { /* CHOICE */
802 _H245_TransportAddress}, 802 _H245_TransportAddress},
803}; 803};
804 804
805static field_t _NetworkAccessParameters[] = { /* SEQUENCE */ 805static const struct field_t _NetworkAccessParameters[] = { /* SEQUENCE */
806 {FNAME("distribution") CHOICE, 1, 2, 2, SKIP | EXT | OPT, 0, 806 {FNAME("distribution") CHOICE, 1, 2, 2, SKIP | EXT | OPT, 0,
807 _NetworkAccessParameters_distribution}, 807 _NetworkAccessParameters_distribution},
808 {FNAME("networkAddress") CHOICE, 2, 3, 3, DECODE | EXT, 808 {FNAME("networkAddress") CHOICE, 2, 3, 3, DECODE | EXT,
@@ -814,7 +814,7 @@ static field_t _NetworkAccessParameters[] = { /* SEQUENCE */
814 NULL}, 814 NULL},
815}; 815};
816 816
817static field_t _OpenLogicalChannel[] = { /* SEQUENCE */ 817static const struct field_t _OpenLogicalChannel[] = { /* SEQUENCE */
818 {FNAME("forwardLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL}, 818 {FNAME("forwardLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL},
819 {FNAME("forwardLogicalChannelParameters") SEQ, 1, 3, 5, DECODE | EXT, 819 {FNAME("forwardLogicalChannelParameters") SEQ, 1, 3, 5, DECODE | EXT,
820 offsetof(OpenLogicalChannel, forwardLogicalChannelParameters), 820 offsetof(OpenLogicalChannel, forwardLogicalChannelParameters),
@@ -829,13 +829,13 @@ static field_t _OpenLogicalChannel[] = { /* SEQUENCE */
829 {FNAME("encryptionSync") SEQ, 2, 4, 4, STOP | EXT | OPT, 0, NULL}, 829 {FNAME("encryptionSync") SEQ, 2, 4, 4, STOP | EXT | OPT, 0, NULL},
830}; 830};
831 831
832static field_t _Setup_UUIE_fastStart[] = { /* SEQUENCE OF */ 832static const struct field_t _Setup_UUIE_fastStart[] = { /* SEQUENCE OF */
833 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT, 833 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
834 sizeof(OpenLogicalChannel), _OpenLogicalChannel} 834 sizeof(OpenLogicalChannel), _OpenLogicalChannel}
835 , 835 ,
836}; 836};
837 837
838static field_t _Setup_UUIE[] = { /* SEQUENCE */ 838static const struct field_t _Setup_UUIE[] = { /* SEQUENCE */
839 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 839 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
840 {FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT, 840 {FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
841 offsetof(Setup_UUIE, h245Address), _TransportAddress}, 841 offsetof(Setup_UUIE, h245Address), _TransportAddress},
@@ -894,13 +894,13 @@ static field_t _Setup_UUIE[] = { /* SEQUENCE */
894 NULL}, 894 NULL},
895}; 895};
896 896
897static field_t _CallProceeding_UUIE_fastStart[] = { /* SEQUENCE OF */ 897static const struct field_t _CallProceeding_UUIE_fastStart[] = { /* SEQUENCE OF */
898 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT, 898 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
899 sizeof(OpenLogicalChannel), _OpenLogicalChannel} 899 sizeof(OpenLogicalChannel), _OpenLogicalChannel}
900 , 900 ,
901}; 901};
902 902
903static field_t _CallProceeding_UUIE[] = { /* SEQUENCE */ 903static const struct field_t _CallProceeding_UUIE[] = { /* SEQUENCE */
904 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 904 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
905 {FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0, 905 {FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0,
906 _EndpointType}, 906 _EndpointType},
@@ -920,13 +920,13 @@ static field_t _CallProceeding_UUIE[] = { /* SEQUENCE */
920 {FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL}, 920 {FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
921}; 921};
922 922
923static field_t _Connect_UUIE_fastStart[] = { /* SEQUENCE OF */ 923static const struct field_t _Connect_UUIE_fastStart[] = { /* SEQUENCE OF */
924 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT, 924 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
925 sizeof(OpenLogicalChannel), _OpenLogicalChannel} 925 sizeof(OpenLogicalChannel), _OpenLogicalChannel}
926 , 926 ,
927}; 927};
928 928
929static field_t _Connect_UUIE[] = { /* SEQUENCE */ 929static const struct field_t _Connect_UUIE[] = { /* SEQUENCE */
930 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 930 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
931 {FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT, 931 {FNAME("h245Address") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
932 offsetof(Connect_UUIE, h245Address), _TransportAddress}, 932 offsetof(Connect_UUIE, h245Address), _TransportAddress},
@@ -954,13 +954,13 @@ static field_t _Connect_UUIE[] = { /* SEQUENCE */
954 {FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL}, 954 {FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
955}; 955};
956 956
957static field_t _Alerting_UUIE_fastStart[] = { /* SEQUENCE OF */ 957static const struct field_t _Alerting_UUIE_fastStart[] = { /* SEQUENCE OF */
958 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT, 958 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
959 sizeof(OpenLogicalChannel), _OpenLogicalChannel} 959 sizeof(OpenLogicalChannel), _OpenLogicalChannel}
960 , 960 ,
961}; 961};
962 962
963static field_t _Alerting_UUIE[] = { /* SEQUENCE */ 963static const struct field_t _Alerting_UUIE[] = { /* SEQUENCE */
964 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 964 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
965 {FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0, 965 {FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0,
966 _EndpointType}, 966 _EndpointType},
@@ -986,7 +986,7 @@ static field_t _Alerting_UUIE[] = { /* SEQUENCE */
986 {FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL}, 986 {FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
987}; 987};
988 988
989static field_t _Information_UUIE[] = { /* SEQUENCE */ 989static const struct field_t _Information_UUIE[] = { /* SEQUENCE */
990 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 990 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
991 {FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL}, 991 {FNAME("callIdentifier") SEQ, 0, 1, 1, SKIP | EXT, 0, NULL},
992 {FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL}, 992 {FNAME("tokens") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, NULL},
@@ -996,7 +996,7 @@ static field_t _Information_UUIE[] = { /* SEQUENCE */
996 {FNAME("circuitInfo") SEQ, 3, 3, 3, SKIP | EXT | OPT, 0, NULL}, 996 {FNAME("circuitInfo") SEQ, 3, 3, 3, SKIP | EXT | OPT, 0, NULL},
997}; 997};
998 998
999static field_t _ReleaseCompleteReason[] = { /* CHOICE */ 999static const struct field_t _ReleaseCompleteReason[] = { /* CHOICE */
1000 {FNAME("noBandwidth") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1000 {FNAME("noBandwidth") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1001 {FNAME("gatekeeperResources") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1001 {FNAME("gatekeeperResources") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1002 {FNAME("unreachableDestination") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1002 {FNAME("unreachableDestination") NUL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -1022,7 +1022,7 @@ static field_t _ReleaseCompleteReason[] = { /* CHOICE */
1022 {FNAME("tunnelledSignallingRejected") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1022 {FNAME("tunnelledSignallingRejected") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1023}; 1023};
1024 1024
1025static field_t _ReleaseComplete_UUIE[] = { /* SEQUENCE */ 1025static const struct field_t _ReleaseComplete_UUIE[] = { /* SEQUENCE */
1026 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1026 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
1027 {FNAME("reason") CHOICE, 4, 12, 22, SKIP | EXT | OPT, 0, 1027 {FNAME("reason") CHOICE, 4, 12, 22, SKIP | EXT | OPT, 0,
1028 _ReleaseCompleteReason}, 1028 _ReleaseCompleteReason},
@@ -1039,11 +1039,11 @@ static field_t _ReleaseComplete_UUIE[] = { /* SEQUENCE */
1039 {FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL}, 1039 {FNAME("featureSet") SEQ, 3, 4, 4, SKIP | EXT | OPT, 0, NULL},
1040}; 1040};
1041 1041
1042static field_t _Facility_UUIE_alternativeAliasAddress[] = { /* SEQUENCE OF */ 1042static const struct field_t _Facility_UUIE_alternativeAliasAddress[] = { /* SEQUENCE OF */
1043 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 1043 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
1044}; 1044};
1045 1045
1046static field_t _FacilityReason[] = { /* CHOICE */ 1046static const struct field_t _FacilityReason[] = { /* CHOICE */
1047 {FNAME("routeCallToGatekeeper") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1047 {FNAME("routeCallToGatekeeper") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1048 {FNAME("callForwarded") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1048 {FNAME("callForwarded") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1049 {FNAME("routeCallToMC") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1049 {FNAME("routeCallToMC") NUL, FIXD, 0, 0, SKIP, 0, NULL},
@@ -1057,13 +1057,13 @@ static field_t _FacilityReason[] = { /* CHOICE */
1057 {FNAME("transportedInformation") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1057 {FNAME("transportedInformation") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1058}; 1058};
1059 1059
1060static field_t _Facility_UUIE_fastStart[] = { /* SEQUENCE OF */ 1060static const struct field_t _Facility_UUIE_fastStart[] = { /* SEQUENCE OF */
1061 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT, 1061 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
1062 sizeof(OpenLogicalChannel), _OpenLogicalChannel} 1062 sizeof(OpenLogicalChannel), _OpenLogicalChannel}
1063 , 1063 ,
1064}; 1064};
1065 1065
1066static field_t _Facility_UUIE[] = { /* SEQUENCE */ 1066static const struct field_t _Facility_UUIE[] = { /* SEQUENCE */
1067 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1067 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
1068 {FNAME("alternativeAddress") CHOICE, 3, 7, 7, DECODE | EXT | OPT, 1068 {FNAME("alternativeAddress") CHOICE, 3, 7, 7, DECODE | EXT | OPT,
1069 offsetof(Facility_UUIE, alternativeAddress), _TransportAddress}, 1069 offsetof(Facility_UUIE, alternativeAddress), _TransportAddress},
@@ -1094,17 +1094,17 @@ static field_t _Facility_UUIE[] = { /* SEQUENCE */
1094 NULL}, 1094 NULL},
1095}; 1095};
1096 1096
1097static field_t _CallIdentifier[] = { /* SEQUENCE */ 1097static const struct field_t _CallIdentifier[] = { /* SEQUENCE */
1098 {FNAME("guid") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL}, 1098 {FNAME("guid") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
1099}; 1099};
1100 1100
1101static field_t _SecurityServiceMode[] = { /* CHOICE */ 1101static const struct field_t _SecurityServiceMode[] = { /* CHOICE */
1102 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, _NonStandardParameter}, 1102 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, _NonStandardParameter},
1103 {FNAME("none") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1103 {FNAME("none") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1104 {FNAME("default") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1104 {FNAME("default") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1105}; 1105};
1106 1106
1107static field_t _SecurityCapabilities[] = { /* SEQUENCE */ 1107static const struct field_t _SecurityCapabilities[] = { /* SEQUENCE */
1108 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP | OPT, 0, 1108 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP | OPT, 0,
1109 _NonStandardParameter}, 1109 _NonStandardParameter},
1110 {FNAME("encryption") CHOICE, 2, 3, 3, SKIP | EXT, 0, 1110 {FNAME("encryption") CHOICE, 2, 3, 3, SKIP | EXT, 0,
@@ -1115,30 +1115,30 @@ static field_t _SecurityCapabilities[] = { /* SEQUENCE */
1115 _SecurityServiceMode}, 1115 _SecurityServiceMode},
1116}; 1116};
1117 1117
1118static field_t _H245Security[] = { /* CHOICE */ 1118static const struct field_t _H245Security[] = { /* CHOICE */
1119 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, _NonStandardParameter}, 1119 {FNAME("nonStandard") SEQ, 0, 2, 2, SKIP, 0, _NonStandardParameter},
1120 {FNAME("noSecurity") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1120 {FNAME("noSecurity") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1121 {FNAME("tls") SEQ, 1, 4, 4, SKIP | EXT, 0, _SecurityCapabilities}, 1121 {FNAME("tls") SEQ, 1, 4, 4, SKIP | EXT, 0, _SecurityCapabilities},
1122 {FNAME("ipsec") SEQ, 1, 4, 4, SKIP | EXT, 0, _SecurityCapabilities}, 1122 {FNAME("ipsec") SEQ, 1, 4, 4, SKIP | EXT, 0, _SecurityCapabilities},
1123}; 1123};
1124 1124
1125static field_t _DHset[] = { /* SEQUENCE */ 1125static const struct field_t _DHset[] = { /* SEQUENCE */
1126 {FNAME("halfkey") BITSTR, WORD, 0, 0, SKIP, 0, NULL}, 1126 {FNAME("halfkey") BITSTR, WORD, 0, 0, SKIP, 0, NULL},
1127 {FNAME("modSize") BITSTR, WORD, 0, 0, SKIP, 0, NULL}, 1127 {FNAME("modSize") BITSTR, WORD, 0, 0, SKIP, 0, NULL},
1128 {FNAME("generator") BITSTR, WORD, 0, 0, SKIP, 0, NULL}, 1128 {FNAME("generator") BITSTR, WORD, 0, 0, SKIP, 0, NULL},
1129}; 1129};
1130 1130
1131static field_t _TypedCertificate[] = { /* SEQUENCE */ 1131static const struct field_t _TypedCertificate[] = { /* SEQUENCE */
1132 {FNAME("type") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1132 {FNAME("type") OID, BYTE, 0, 0, SKIP, 0, NULL},
1133 {FNAME("certificate") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1133 {FNAME("certificate") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
1134}; 1134};
1135 1135
1136static field_t _H235_NonStandardParameter[] = { /* SEQUENCE */ 1136static const struct field_t _H235_NonStandardParameter[] = { /* SEQUENCE */
1137 {FNAME("nonStandardIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1137 {FNAME("nonStandardIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
1138 {FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1138 {FNAME("data") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
1139}; 1139};
1140 1140
1141static field_t _ClearToken[] = { /* SEQUENCE */ 1141static const struct field_t _ClearToken[] = { /* SEQUENCE */
1142 {FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1142 {FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1143 {FNAME("timeStamp") INT, CONS, 1, 0, SKIP | OPT, 0, NULL}, 1143 {FNAME("timeStamp") INT, CONS, 1, 0, SKIP | OPT, 0, NULL},
1144 {FNAME("password") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL}, 1144 {FNAME("password") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
@@ -1154,120 +1154,120 @@ static field_t _ClearToken[] = { /* SEQUENCE */
1154 {FNAME("sendersID") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL}, 1154 {FNAME("sendersID") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
1155}; 1155};
1156 1156
1157static field_t _Progress_UUIE_tokens[] = { /* SEQUENCE OF */ 1157static const struct field_t _Progress_UUIE_tokens[] = { /* SEQUENCE OF */
1158 {FNAME("item") SEQ, 8, 9, 11, SKIP | EXT, 0, _ClearToken}, 1158 {FNAME("item") SEQ, 8, 9, 11, SKIP | EXT, 0, _ClearToken},
1159}; 1159};
1160 1160
1161static field_t _Params[] = { /* SEQUENCE */ 1161static const struct field_t _Params[] = { /* SEQUENCE */
1162 {FNAME("ranInt") INT, UNCO, 0, 0, SKIP | OPT, 0, NULL}, 1162 {FNAME("ranInt") INT, UNCO, 0, 0, SKIP | OPT, 0, NULL},
1163 {FNAME("iv8") OCTSTR, FIXD, 8, 0, SKIP | OPT, 0, NULL}, 1163 {FNAME("iv8") OCTSTR, FIXD, 8, 0, SKIP | OPT, 0, NULL},
1164 {FNAME("iv16") OCTSTR, FIXD, 16, 0, SKIP | OPT, 0, NULL}, 1164 {FNAME("iv16") OCTSTR, FIXD, 16, 0, SKIP | OPT, 0, NULL},
1165}; 1165};
1166 1166
1167static field_t _CryptoH323Token_cryptoEPPwdHash_token[] = { /* SEQUENCE */ 1167static const struct field_t _CryptoH323Token_cryptoEPPwdHash_token[] = { /* SEQUENCE */
1168 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1168 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1169 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1169 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1170 {FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1170 {FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
1171}; 1171};
1172 1172
1173static field_t _CryptoH323Token_cryptoEPPwdHash[] = { /* SEQUENCE */ 1173static const struct field_t _CryptoH323Token_cryptoEPPwdHash[] = { /* SEQUENCE */
1174 {FNAME("alias") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 1174 {FNAME("alias") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
1175 {FNAME("timeStamp") INT, CONS, 1, 0, SKIP, 0, NULL}, 1175 {FNAME("timeStamp") INT, CONS, 1, 0, SKIP, 0, NULL},
1176 {FNAME("token") SEQ, 0, 3, 3, SKIP, 0, 1176 {FNAME("token") SEQ, 0, 3, 3, SKIP, 0,
1177 _CryptoH323Token_cryptoEPPwdHash_token}, 1177 _CryptoH323Token_cryptoEPPwdHash_token},
1178}; 1178};
1179 1179
1180static field_t _CryptoH323Token_cryptoGKPwdHash_token[] = { /* SEQUENCE */ 1180static const struct field_t _CryptoH323Token_cryptoGKPwdHash_token[] = { /* SEQUENCE */
1181 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1181 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1182 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1182 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1183 {FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1183 {FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
1184}; 1184};
1185 1185
1186static field_t _CryptoH323Token_cryptoGKPwdHash[] = { /* SEQUENCE */ 1186static const struct field_t _CryptoH323Token_cryptoGKPwdHash[] = { /* SEQUENCE */
1187 {FNAME("gatekeeperId") BMPSTR, 7, 1, 0, SKIP, 0, NULL}, 1187 {FNAME("gatekeeperId") BMPSTR, 7, 1, 0, SKIP, 0, NULL},
1188 {FNAME("timeStamp") INT, CONS, 1, 0, SKIP, 0, NULL}, 1188 {FNAME("timeStamp") INT, CONS, 1, 0, SKIP, 0, NULL},
1189 {FNAME("token") SEQ, 0, 3, 3, SKIP, 0, 1189 {FNAME("token") SEQ, 0, 3, 3, SKIP, 0,
1190 _CryptoH323Token_cryptoGKPwdHash_token}, 1190 _CryptoH323Token_cryptoGKPwdHash_token},
1191}; 1191};
1192 1192
1193static field_t _CryptoH323Token_cryptoEPPwdEncr[] = { /* SEQUENCE */ 1193static const struct field_t _CryptoH323Token_cryptoEPPwdEncr[] = { /* SEQUENCE */
1194 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1194 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1195 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1195 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1196 {FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1196 {FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
1197}; 1197};
1198 1198
1199static field_t _CryptoH323Token_cryptoGKPwdEncr[] = { /* SEQUENCE */ 1199static const struct field_t _CryptoH323Token_cryptoGKPwdEncr[] = { /* SEQUENCE */
1200 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1200 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1201 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1201 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1202 {FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1202 {FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
1203}; 1203};
1204 1204
1205static field_t _CryptoH323Token_cryptoEPCert[] = { /* SEQUENCE */ 1205static const struct field_t _CryptoH323Token_cryptoEPCert[] = { /* SEQUENCE */
1206 {FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL}, 1206 {FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL},
1207 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1207 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1208 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1208 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1209 {FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1209 {FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
1210}; 1210};
1211 1211
1212static field_t _CryptoH323Token_cryptoGKCert[] = { /* SEQUENCE */ 1212static const struct field_t _CryptoH323Token_cryptoGKCert[] = { /* SEQUENCE */
1213 {FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL}, 1213 {FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL},
1214 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1214 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1215 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1215 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1216 {FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1216 {FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
1217}; 1217};
1218 1218
1219static field_t _CryptoH323Token_cryptoFastStart[] = { /* SEQUENCE */ 1219static const struct field_t _CryptoH323Token_cryptoFastStart[] = { /* SEQUENCE */
1220 {FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL}, 1220 {FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL},
1221 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1221 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1222 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1222 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1223 {FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1223 {FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
1224}; 1224};
1225 1225
1226static field_t _CryptoToken_cryptoEncryptedToken_token[] = { /* SEQUENCE */ 1226static const struct field_t _CryptoToken_cryptoEncryptedToken_token[] = { /* SEQUENCE */
1227 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1227 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1228 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1228 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1229 {FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1229 {FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
1230}; 1230};
1231 1231
1232static field_t _CryptoToken_cryptoEncryptedToken[] = { /* SEQUENCE */ 1232static const struct field_t _CryptoToken_cryptoEncryptedToken[] = { /* SEQUENCE */
1233 {FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1233 {FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1234 {FNAME("token") SEQ, 0, 3, 3, SKIP, 0, 1234 {FNAME("token") SEQ, 0, 3, 3, SKIP, 0,
1235 _CryptoToken_cryptoEncryptedToken_token}, 1235 _CryptoToken_cryptoEncryptedToken_token},
1236}; 1236};
1237 1237
1238static field_t _CryptoToken_cryptoSignedToken_token[] = { /* SEQUENCE */ 1238static const struct field_t _CryptoToken_cryptoSignedToken_token[] = { /* SEQUENCE */
1239 {FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL}, 1239 {FNAME("toBeSigned") SEQ, 8, 9, 11, SKIP | OPEN | EXT, 0, NULL},
1240 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1240 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1241 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1241 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1242 {FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1242 {FNAME("signature") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
1243}; 1243};
1244 1244
1245static field_t _CryptoToken_cryptoSignedToken[] = { /* SEQUENCE */ 1245static const struct field_t _CryptoToken_cryptoSignedToken[] = { /* SEQUENCE */
1246 {FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1246 {FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1247 {FNAME("token") SEQ, 0, 4, 4, SKIP, 0, 1247 {FNAME("token") SEQ, 0, 4, 4, SKIP, 0,
1248 _CryptoToken_cryptoSignedToken_token}, 1248 _CryptoToken_cryptoSignedToken_token},
1249}; 1249};
1250 1250
1251static field_t _CryptoToken_cryptoHashedToken_token[] = { /* SEQUENCE */ 1251static const struct field_t _CryptoToken_cryptoHashedToken_token[] = { /* SEQUENCE */
1252 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1252 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1253 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1253 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1254 {FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1254 {FNAME("hash") BITSTR, SEMI, 0, 0, SKIP, 0, NULL},
1255}; 1255};
1256 1256
1257static field_t _CryptoToken_cryptoHashedToken[] = { /* SEQUENCE */ 1257static const struct field_t _CryptoToken_cryptoHashedToken[] = { /* SEQUENCE */
1258 {FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1258 {FNAME("tokenOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1259 {FNAME("hashedVals") SEQ, 8, 9, 11, SKIP | EXT, 0, _ClearToken}, 1259 {FNAME("hashedVals") SEQ, 8, 9, 11, SKIP | EXT, 0, _ClearToken},
1260 {FNAME("token") SEQ, 0, 3, 3, SKIP, 0, 1260 {FNAME("token") SEQ, 0, 3, 3, SKIP, 0,
1261 _CryptoToken_cryptoHashedToken_token}, 1261 _CryptoToken_cryptoHashedToken_token},
1262}; 1262};
1263 1263
1264static field_t _CryptoToken_cryptoPwdEncr[] = { /* SEQUENCE */ 1264static const struct field_t _CryptoToken_cryptoPwdEncr[] = { /* SEQUENCE */
1265 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1265 {FNAME("algorithmOID") OID, BYTE, 0, 0, SKIP, 0, NULL},
1266 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params}, 1266 {FNAME("paramS") SEQ, 2, 2, 3, SKIP | EXT, 0, _Params},
1267 {FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL}, 1267 {FNAME("encryptedData") OCTSTR, SEMI, 0, 0, SKIP, 0, NULL},
1268}; 1268};
1269 1269
1270static field_t _CryptoToken[] = { /* CHOICE */ 1270static const struct field_t _CryptoToken[] = { /* CHOICE */
1271 {FNAME("cryptoEncryptedToken") SEQ, 0, 2, 2, SKIP, 0, 1271 {FNAME("cryptoEncryptedToken") SEQ, 0, 2, 2, SKIP, 0,
1272 _CryptoToken_cryptoEncryptedToken}, 1272 _CryptoToken_cryptoEncryptedToken},
1273 {FNAME("cryptoSignedToken") SEQ, 0, 2, 2, SKIP, 0, 1273 {FNAME("cryptoSignedToken") SEQ, 0, 2, 2, SKIP, 0,
@@ -1278,7 +1278,7 @@ static field_t _CryptoToken[] = { /* CHOICE */
1278 _CryptoToken_cryptoPwdEncr}, 1278 _CryptoToken_cryptoPwdEncr},
1279}; 1279};
1280 1280
1281static field_t _CryptoH323Token[] = { /* CHOICE */ 1281static const struct field_t _CryptoH323Token[] = { /* CHOICE */
1282 {FNAME("cryptoEPPwdHash") SEQ, 0, 3, 3, SKIP, 0, 1282 {FNAME("cryptoEPPwdHash") SEQ, 0, 3, 3, SKIP, 0,
1283 _CryptoH323Token_cryptoEPPwdHash}, 1283 _CryptoH323Token_cryptoEPPwdHash},
1284 {FNAME("cryptoGKPwdHash") SEQ, 0, 3, 3, SKIP, 0, 1284 {FNAME("cryptoGKPwdHash") SEQ, 0, 3, 3, SKIP, 0,
@@ -1297,17 +1297,17 @@ static field_t _CryptoH323Token[] = { /* CHOICE */
1297 _CryptoToken}, 1297 _CryptoToken},
1298}; 1298};
1299 1299
1300static field_t _Progress_UUIE_cryptoTokens[] = { /* SEQUENCE OF */ 1300static const struct field_t _Progress_UUIE_cryptoTokens[] = { /* SEQUENCE OF */
1301 {FNAME("item") CHOICE, 3, 8, 8, SKIP | EXT, 0, _CryptoH323Token}, 1301 {FNAME("item") CHOICE, 3, 8, 8, SKIP | EXT, 0, _CryptoH323Token},
1302}; 1302};
1303 1303
1304static field_t _Progress_UUIE_fastStart[] = { /* SEQUENCE OF */ 1304static const struct field_t _Progress_UUIE_fastStart[] = { /* SEQUENCE OF */
1305 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT, 1305 {FNAME("item") SEQ, 1, 3, 5, DECODE | OPEN | EXT,
1306 sizeof(OpenLogicalChannel), _OpenLogicalChannel} 1306 sizeof(OpenLogicalChannel), _OpenLogicalChannel}
1307 , 1307 ,
1308}; 1308};
1309 1309
1310static field_t _Progress_UUIE[] = { /* SEQUENCE */ 1310static const struct field_t _Progress_UUIE[] = { /* SEQUENCE */
1311 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1311 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
1312 {FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0, 1312 {FNAME("destinationInfo") SEQ, 6, 8, 10, SKIP | EXT, 0,
1313 _EndpointType}, 1313 _EndpointType},
@@ -1328,7 +1328,7 @@ static field_t _Progress_UUIE[] = { /* SEQUENCE */
1328 {FNAME("fastConnectRefused") NUL, FIXD, 0, 0, SKIP | OPT, 0, NULL}, 1328 {FNAME("fastConnectRefused") NUL, FIXD, 0, 0, SKIP | OPT, 0, NULL},
1329}; 1329};
1330 1330
1331static field_t _H323_UU_PDU_h323_message_body[] = { /* CHOICE */ 1331static const struct field_t _H323_UU_PDU_h323_message_body[] = { /* CHOICE */
1332 {FNAME("setup") SEQ, 7, 13, 39, DECODE | EXT, 1332 {FNAME("setup") SEQ, 7, 13, 39, DECODE | EXT,
1333 offsetof(H323_UU_PDU_h323_message_body, setup), _Setup_UUIE}, 1333 offsetof(H323_UU_PDU_h323_message_body, setup), _Setup_UUIE},
1334 {FNAME("callProceeding") SEQ, 1, 3, 12, DECODE | EXT, 1334 {FNAME("callProceeding") SEQ, 1, 3, 12, DECODE | EXT,
@@ -1352,7 +1352,7 @@ static field_t _H323_UU_PDU_h323_message_body[] = { /* CHOICE */
1352 {FNAME("notify") SEQ, 2, 4, 4, SKIP | EXT, 0, NULL}, 1352 {FNAME("notify") SEQ, 2, 4, 4, SKIP | EXT, 0, NULL},
1353}; 1353};
1354 1354
1355static field_t _RequestMessage[] = { /* CHOICE */ 1355static const struct field_t _RequestMessage[] = { /* CHOICE */
1356 {FNAME("nonStandard") SEQ, 0, 1, 1, STOP | EXT, 0, NULL}, 1356 {FNAME("nonStandard") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
1357 {FNAME("masterSlaveDetermination") SEQ, 0, 2, 2, STOP | EXT, 0, NULL}, 1357 {FNAME("masterSlaveDetermination") SEQ, 0, 2, 2, STOP | EXT, 0, NULL},
1358 {FNAME("terminalCapabilitySet") SEQ, 3, 5, 5, STOP | EXT, 0, NULL}, 1358 {FNAME("terminalCapabilitySet") SEQ, 3, 5, 5, STOP | EXT, 0, NULL},
@@ -1372,7 +1372,7 @@ static field_t _RequestMessage[] = { /* CHOICE */
1372 NULL}, 1372 NULL},
1373}; 1373};
1374 1374
1375static field_t _OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */ 1375static const struct field_t _OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters[] = { /* CHOICE */
1376 {FNAME("h222LogicalChannelParameters") SEQ, 3, 5, 5, SKIP | EXT, 0, 1376 {FNAME("h222LogicalChannelParameters") SEQ, 3, 5, 5, SKIP | EXT, 0,
1377 _H222LogicalChannelParameters}, 1377 _H222LogicalChannelParameters},
1378 {FNAME("h2250LogicalChannelParameters") SEQ, 10, 11, 14, DECODE | EXT, 1378 {FNAME("h2250LogicalChannelParameters") SEQ, 10, 11, 14, DECODE | EXT,
@@ -1381,7 +1381,7 @@ static field_t _OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexP
1381 h2250LogicalChannelParameters), _H2250LogicalChannelParameters}, 1381 h2250LogicalChannelParameters), _H2250LogicalChannelParameters},
1382}; 1382};
1383 1383
1384static field_t _OpenLogicalChannelAck_reverseLogicalChannelParameters[] = { /* SEQUENCE */ 1384static const struct field_t _OpenLogicalChannelAck_reverseLogicalChannelParameters[] = { /* SEQUENCE */
1385 {FNAME("reverseLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL}, 1385 {FNAME("reverseLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL},
1386 {FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL}, 1386 {FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
1387 {FNAME("multiplexParameters") CHOICE, 0, 1, 2, DECODE | EXT | OPT, 1387 {FNAME("multiplexParameters") CHOICE, 0, 1, 2, DECODE | EXT | OPT,
@@ -1391,11 +1391,11 @@ static field_t _OpenLogicalChannelAck_reverseLogicalChannelParameters[] = { /* S
1391 {FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL}, 1391 {FNAME("replacementFor") INT, WORD, 1, 0, SKIP | OPT, 0, NULL},
1392}; 1392};
1393 1393
1394static field_t _H2250LogicalChannelAckParameters_nonStandard[] = { /* SEQUENCE OF */ 1394static const struct field_t _H2250LogicalChannelAckParameters_nonStandard[] = { /* SEQUENCE OF */
1395 {FNAME("item") SEQ, 0, 2, 2, SKIP, 0, _H245_NonStandardParameter}, 1395 {FNAME("item") SEQ, 0, 2, 2, SKIP, 0, _H245_NonStandardParameter},
1396}; 1396};
1397 1397
1398static field_t _H2250LogicalChannelAckParameters[] = { /* SEQUENCE */ 1398static const struct field_t _H2250LogicalChannelAckParameters[] = { /* SEQUENCE */
1399 {FNAME("nonStandard") SEQOF, SEMI, 0, 0, SKIP | OPT, 0, 1399 {FNAME("nonStandard") SEQOF, SEMI, 0, 0, SKIP | OPT, 0,
1400 _H2250LogicalChannelAckParameters_nonStandard}, 1400 _H2250LogicalChannelAckParameters_nonStandard},
1401 {FNAME("sessionID") INT, 8, 1, 0, SKIP | OPT, 0, NULL}, 1401 {FNAME("sessionID") INT, 8, 1, 0, SKIP | OPT, 0, NULL},
@@ -1410,14 +1410,14 @@ static field_t _H2250LogicalChannelAckParameters[] = { /* SEQUENCE */
1410 {FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL}, 1410 {FNAME("portNumber") INT, WORD, 0, 0, SKIP | OPT, 0, NULL},
1411}; 1411};
1412 1412
1413static field_t _OpenLogicalChannelAck_forwardMultiplexAckParameters[] = { /* CHOICE */ 1413static const struct field_t _OpenLogicalChannelAck_forwardMultiplexAckParameters[] = { /* CHOICE */
1414 {FNAME("h2250LogicalChannelAckParameters") SEQ, 5, 5, 7, DECODE | EXT, 1414 {FNAME("h2250LogicalChannelAckParameters") SEQ, 5, 5, 7, DECODE | EXT,
1415 offsetof(OpenLogicalChannelAck_forwardMultiplexAckParameters, 1415 offsetof(OpenLogicalChannelAck_forwardMultiplexAckParameters,
1416 h2250LogicalChannelAckParameters), 1416 h2250LogicalChannelAckParameters),
1417 _H2250LogicalChannelAckParameters}, 1417 _H2250LogicalChannelAckParameters},
1418}; 1418};
1419 1419
1420static field_t _OpenLogicalChannelAck[] = { /* SEQUENCE */ 1420static const struct field_t _OpenLogicalChannelAck[] = { /* SEQUENCE */
1421 {FNAME("forwardLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL}, 1421 {FNAME("forwardLogicalChannelNumber") INT, WORD, 1, 0, SKIP, 0, NULL},
1422 {FNAME("reverseLogicalChannelParameters") SEQ, 2, 3, 4, 1422 {FNAME("reverseLogicalChannelParameters") SEQ, 2, 3, 4,
1423 DECODE | EXT | OPT, offsetof(OpenLogicalChannelAck, 1423 DECODE | EXT | OPT, offsetof(OpenLogicalChannelAck,
@@ -1433,7 +1433,7 @@ static field_t _OpenLogicalChannelAck[] = { /* SEQUENCE */
1433 {FNAME("encryptionSync") SEQ, 2, 4, 4, STOP | EXT | OPT, 0, NULL}, 1433 {FNAME("encryptionSync") SEQ, 2, 4, 4, STOP | EXT | OPT, 0, NULL},
1434}; 1434};
1435 1435
1436static field_t _ResponseMessage[] = { /* CHOICE */ 1436static const struct field_t _ResponseMessage[] = { /* CHOICE */
1437 {FNAME("nonStandard") SEQ, 0, 1, 1, STOP | EXT, 0, NULL}, 1437 {FNAME("nonStandard") SEQ, 0, 1, 1, STOP | EXT, 0, NULL},
1438 {FNAME("masterSlaveDeterminationAck") SEQ, 0, 1, 1, STOP | EXT, 0, 1438 {FNAME("masterSlaveDeterminationAck") SEQ, 0, 1, 1, STOP | EXT, 0,
1439 NULL}, 1439 NULL},
@@ -1469,7 +1469,7 @@ static field_t _ResponseMessage[] = { /* CHOICE */
1469 {FNAME("logicalChannelRateReject") SEQ, 1, 4, 4, STOP | EXT, 0, NULL}, 1469 {FNAME("logicalChannelRateReject") SEQ, 1, 4, 4, STOP | EXT, 0, NULL},
1470}; 1470};
1471 1471
1472static field_t _MultimediaSystemControlMessage[] = { /* CHOICE */ 1472static const struct field_t _MultimediaSystemControlMessage[] = { /* CHOICE */
1473 {FNAME("request") CHOICE, 4, 11, 15, DECODE | EXT, 1473 {FNAME("request") CHOICE, 4, 11, 15, DECODE | EXT,
1474 offsetof(MultimediaSystemControlMessage, request), _RequestMessage}, 1474 offsetof(MultimediaSystemControlMessage, request), _RequestMessage},
1475 {FNAME("response") CHOICE, 5, 19, 24, DECODE | EXT, 1475 {FNAME("response") CHOICE, 5, 19, 24, DECODE | EXT,
@@ -1479,14 +1479,14 @@ static field_t _MultimediaSystemControlMessage[] = { /* CHOICE */
1479 {FNAME("indication") CHOICE, 4, 14, 23, STOP | EXT, 0, NULL}, 1479 {FNAME("indication") CHOICE, 4, 14, 23, STOP | EXT, 0, NULL},
1480}; 1480};
1481 1481
1482static field_t _H323_UU_PDU_h245Control[] = { /* SEQUENCE OF */ 1482static const struct field_t _H323_UU_PDU_h245Control[] = { /* SEQUENCE OF */
1483 {FNAME("item") CHOICE, 2, 4, 4, DECODE | OPEN | EXT, 1483 {FNAME("item") CHOICE, 2, 4, 4, DECODE | OPEN | EXT,
1484 sizeof(MultimediaSystemControlMessage), 1484 sizeof(MultimediaSystemControlMessage),
1485 _MultimediaSystemControlMessage} 1485 _MultimediaSystemControlMessage}
1486 , 1486 ,
1487}; 1487};
1488 1488
1489static field_t _H323_UU_PDU[] = { /* SEQUENCE */ 1489static const struct field_t _H323_UU_PDU[] = { /* SEQUENCE */
1490 {FNAME("h323-message-body") CHOICE, 3, 7, 13, DECODE | EXT, 1490 {FNAME("h323-message-body") CHOICE, 3, 7, 13, DECODE | EXT,
1491 offsetof(H323_UU_PDU, h323_message_body), 1491 offsetof(H323_UU_PDU, h323_message_body),
1492 _H323_UU_PDU_h323_message_body}, 1492 _H323_UU_PDU_h323_message_body},
@@ -1507,13 +1507,13 @@ static field_t _H323_UU_PDU[] = { /* SEQUENCE */
1507 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1507 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1508}; 1508};
1509 1509
1510static field_t _H323_UserInformation[] = { /* SEQUENCE */ 1510static const struct field_t _H323_UserInformation[] = { /* SEQUENCE */
1511 {FNAME("h323-uu-pdu") SEQ, 1, 2, 11, DECODE | EXT, 1511 {FNAME("h323-uu-pdu") SEQ, 1, 2, 11, DECODE | EXT,
1512 offsetof(H323_UserInformation, h323_uu_pdu), _H323_UU_PDU}, 1512 offsetof(H323_UserInformation, h323_uu_pdu), _H323_UU_PDU},
1513 {FNAME("user-data") SEQ, 0, 2, 2, STOP | EXT | OPT, 0, NULL}, 1513 {FNAME("user-data") SEQ, 0, 2, 2, STOP | EXT | OPT, 0, NULL},
1514}; 1514};
1515 1515
1516static field_t _GatekeeperRequest[] = { /* SEQUENCE */ 1516static const struct field_t _GatekeeperRequest[] = { /* SEQUENCE */
1517 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1517 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1518 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1518 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
1519 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 1519 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
@@ -1537,7 +1537,7 @@ static field_t _GatekeeperRequest[] = { /* SEQUENCE */
1537 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1537 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1538}; 1538};
1539 1539
1540static field_t _GatekeeperConfirm[] = { /* SEQUENCE */ 1540static const struct field_t _GatekeeperConfirm[] = { /* SEQUENCE */
1541 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1541 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1542 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1542 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
1543 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 1543 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
@@ -1557,23 +1557,23 @@ static field_t _GatekeeperConfirm[] = { /* SEQUENCE */
1557 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1557 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1558}; 1558};
1559 1559
1560static field_t _RegistrationRequest_callSignalAddress[] = { /* SEQUENCE OF */ 1560static const struct field_t _RegistrationRequest_callSignalAddress[] = { /* SEQUENCE OF */
1561 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT, 1561 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
1562 sizeof(TransportAddress), _TransportAddress} 1562 sizeof(TransportAddress), _TransportAddress}
1563 , 1563 ,
1564}; 1564};
1565 1565
1566static field_t _RegistrationRequest_rasAddress[] = { /* SEQUENCE OF */ 1566static const struct field_t _RegistrationRequest_rasAddress[] = { /* SEQUENCE OF */
1567 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT, 1567 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
1568 sizeof(TransportAddress), _TransportAddress} 1568 sizeof(TransportAddress), _TransportAddress}
1569 , 1569 ,
1570}; 1570};
1571 1571
1572static field_t _RegistrationRequest_terminalAlias[] = { /* SEQUENCE OF */ 1572static const struct field_t _RegistrationRequest_terminalAlias[] = { /* SEQUENCE OF */
1573 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 1573 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
1574}; 1574};
1575 1575
1576static field_t _RegistrationRequest[] = { /* SEQUENCE */ 1576static const struct field_t _RegistrationRequest[] = { /* SEQUENCE */
1577 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1577 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1578 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1578 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
1579 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 1579 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
@@ -1621,17 +1621,17 @@ static field_t _RegistrationRequest[] = { /* SEQUENCE */
1621 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1621 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1622}; 1622};
1623 1623
1624static field_t _RegistrationConfirm_callSignalAddress[] = { /* SEQUENCE OF */ 1624static const struct field_t _RegistrationConfirm_callSignalAddress[] = { /* SEQUENCE OF */
1625 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT, 1625 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
1626 sizeof(TransportAddress), _TransportAddress} 1626 sizeof(TransportAddress), _TransportAddress}
1627 , 1627 ,
1628}; 1628};
1629 1629
1630static field_t _RegistrationConfirm_terminalAlias[] = { /* SEQUENCE OF */ 1630static const struct field_t _RegistrationConfirm_terminalAlias[] = { /* SEQUENCE OF */
1631 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 1631 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
1632}; 1632};
1633 1633
1634static field_t _RegistrationConfirm[] = { /* SEQUENCE */ 1634static const struct field_t _RegistrationConfirm[] = { /* SEQUENCE */
1635 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1635 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1636 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL}, 1636 {FNAME("protocolIdentifier") OID, BYTE, 0, 0, SKIP, 0, NULL},
1637 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 1637 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
@@ -1667,13 +1667,13 @@ static field_t _RegistrationConfirm[] = { /* SEQUENCE */
1667 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1667 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1668}; 1668};
1669 1669
1670static field_t _UnregistrationRequest_callSignalAddress[] = { /* SEQUENCE OF */ 1670static const struct field_t _UnregistrationRequest_callSignalAddress[] = { /* SEQUENCE OF */
1671 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT, 1671 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
1672 sizeof(TransportAddress), _TransportAddress} 1672 sizeof(TransportAddress), _TransportAddress}
1673 , 1673 ,
1674}; 1674};
1675 1675
1676static field_t _UnregistrationRequest[] = { /* SEQUENCE */ 1676static const struct field_t _UnregistrationRequest[] = { /* SEQUENCE */
1677 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1677 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1678 {FNAME("callSignalAddress") SEQOF, SEMI, 0, 10, DECODE, 1678 {FNAME("callSignalAddress") SEQOF, SEMI, 0, 10, DECODE,
1679 offsetof(UnregistrationRequest, callSignalAddress), 1679 offsetof(UnregistrationRequest, callSignalAddress),
@@ -1694,24 +1694,24 @@ static field_t _UnregistrationRequest[] = { /* SEQUENCE */
1694 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1694 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1695}; 1695};
1696 1696
1697static field_t _CallModel[] = { /* CHOICE */ 1697static const struct field_t _CallModel[] = { /* CHOICE */
1698 {FNAME("direct") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1698 {FNAME("direct") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1699 {FNAME("gatekeeperRouted") NUL, FIXD, 0, 0, SKIP, 0, NULL}, 1699 {FNAME("gatekeeperRouted") NUL, FIXD, 0, 0, SKIP, 0, NULL},
1700}; 1700};
1701 1701
1702static field_t _AdmissionRequest_destinationInfo[] = { /* SEQUENCE OF */ 1702static const struct field_t _AdmissionRequest_destinationInfo[] = { /* SEQUENCE OF */
1703 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 1703 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
1704}; 1704};
1705 1705
1706static field_t _AdmissionRequest_destExtraCallInfo[] = { /* SEQUENCE OF */ 1706static const struct field_t _AdmissionRequest_destExtraCallInfo[] = { /* SEQUENCE OF */
1707 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 1707 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
1708}; 1708};
1709 1709
1710static field_t _AdmissionRequest_srcInfo[] = { /* SEQUENCE OF */ 1710static const struct field_t _AdmissionRequest_srcInfo[] = { /* SEQUENCE OF */
1711 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 1711 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
1712}; 1712};
1713 1713
1714static field_t _AdmissionRequest[] = { /* SEQUENCE */ 1714static const struct field_t _AdmissionRequest[] = { /* SEQUENCE */
1715 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1715 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1716 {FNAME("callType") CHOICE, 2, 4, 4, SKIP | EXT, 0, _CallType}, 1716 {FNAME("callType") CHOICE, 2, 4, 4, SKIP | EXT, 0, _CallType},
1717 {FNAME("callModel") CHOICE, 1, 2, 2, SKIP | EXT | OPT, 0, _CallModel}, 1717 {FNAME("callModel") CHOICE, 1, 2, 2, SKIP | EXT | OPT, 0, _CallModel},
@@ -1755,7 +1755,7 @@ static field_t _AdmissionRequest[] = { /* SEQUENCE */
1755 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1755 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1756}; 1756};
1757 1757
1758static field_t _AdmissionConfirm[] = { /* SEQUENCE */ 1758static const struct field_t _AdmissionConfirm[] = { /* SEQUENCE */
1759 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1759 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1760 {FNAME("bandWidth") INT, CONS, 0, 0, SKIP, 0, NULL}, 1760 {FNAME("bandWidth") INT, CONS, 0, 0, SKIP, 0, NULL},
1761 {FNAME("callModel") CHOICE, 1, 2, 2, SKIP | EXT, 0, _CallModel}, 1761 {FNAME("callModel") CHOICE, 1, 2, 2, SKIP | EXT, 0, _CallModel},
@@ -1790,11 +1790,11 @@ static field_t _AdmissionConfirm[] = { /* SEQUENCE */
1790 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1790 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1791}; 1791};
1792 1792
1793static field_t _LocationRequest_destinationInfo[] = { /* SEQUENCE OF */ 1793static const struct field_t _LocationRequest_destinationInfo[] = { /* SEQUENCE OF */
1794 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress}, 1794 {FNAME("item") CHOICE, 1, 2, 7, SKIP | EXT, 0, _AliasAddress},
1795}; 1795};
1796 1796
1797static field_t _LocationRequest[] = { /* SEQUENCE */ 1797static const struct field_t _LocationRequest[] = { /* SEQUENCE */
1798 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1798 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1799 {FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL}, 1799 {FNAME("endpointIdentifier") BMPSTR, 7, 1, 0, SKIP | OPT, 0, NULL},
1800 {FNAME("destinationInfo") SEQOF, SEMI, 0, 0, SKIP, 0, 1800 {FNAME("destinationInfo") SEQOF, SEMI, 0, 0, SKIP, 0,
@@ -1818,7 +1818,7 @@ static field_t _LocationRequest[] = { /* SEQUENCE */
1818 {FNAME("circuitInfo") SEQ, 3, 3, 3, STOP | EXT | OPT, 0, NULL}, 1818 {FNAME("circuitInfo") SEQ, 3, 3, 3, STOP | EXT | OPT, 0, NULL},
1819}; 1819};
1820 1820
1821static field_t _LocationConfirm[] = { /* SEQUENCE */ 1821static const struct field_t _LocationConfirm[] = { /* SEQUENCE */
1822 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1822 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
1823 {FNAME("callSignalAddress") CHOICE, 3, 7, 7, DECODE | EXT, 1823 {FNAME("callSignalAddress") CHOICE, 3, 7, 7, DECODE | EXT,
1824 offsetof(LocationConfirm, callSignalAddress), _TransportAddress}, 1824 offsetof(LocationConfirm, callSignalAddress), _TransportAddress},
@@ -1844,13 +1844,13 @@ static field_t _LocationConfirm[] = { /* SEQUENCE */
1844 {FNAME("serviceControl") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1844 {FNAME("serviceControl") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1845}; 1845};
1846 1846
1847static field_t _InfoRequestResponse_callSignalAddress[] = { /* SEQUENCE OF */ 1847static const struct field_t _InfoRequestResponse_callSignalAddress[] = { /* SEQUENCE OF */
1848 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT, 1848 {FNAME("item") CHOICE, 3, 7, 7, DECODE | EXT,
1849 sizeof(TransportAddress), _TransportAddress} 1849 sizeof(TransportAddress), _TransportAddress}
1850 , 1850 ,
1851}; 1851};
1852 1852
1853static field_t _InfoRequestResponse[] = { /* SEQUENCE */ 1853static const struct field_t _InfoRequestResponse[] = { /* SEQUENCE */
1854 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0, 1854 {FNAME("nonStandardData") SEQ, 0, 2, 2, SKIP | OPT, 0,
1855 _NonStandardParameter}, 1855 _NonStandardParameter},
1856 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL}, 1856 {FNAME("requestSeqNum") INT, WORD, 1, 0, SKIP, 0, NULL},
@@ -1873,7 +1873,7 @@ static field_t _InfoRequestResponse[] = { /* SEQUENCE */
1873 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL}, 1873 {FNAME("genericData") SEQOF, SEMI, 0, 0, STOP | OPT, 0, NULL},
1874}; 1874};
1875 1875
1876static field_t _RasMessage[] = { /* CHOICE */ 1876static const struct field_t _RasMessage[] = { /* CHOICE */
1877 {FNAME("gatekeeperRequest") SEQ, 4, 8, 18, DECODE | EXT, 1877 {FNAME("gatekeeperRequest") SEQ, 4, 8, 18, DECODE | EXT,
1878 offsetof(RasMessage, gatekeeperRequest), _GatekeeperRequest}, 1878 offsetof(RasMessage, gatekeeperRequest), _GatekeeperRequest},
1879 {FNAME("gatekeeperConfirm") SEQ, 2, 5, 14, DECODE | EXT, 1879 {FNAME("gatekeeperConfirm") SEQ, 2, 5, 14, DECODE | EXT,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 96aa637c0932..b1fd21cc1dbc 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -28,6 +28,7 @@
28#include <net/netfilter/nf_conntrack_core.h> 28#include <net/netfilter/nf_conntrack_core.h>
29#include <net/netfilter/nf_conntrack_extend.h> 29#include <net/netfilter/nf_conntrack_extend.h>
30 30
31static DEFINE_MUTEX(nf_ct_helper_mutex);
31static struct hlist_head *nf_ct_helper_hash __read_mostly; 32static struct hlist_head *nf_ct_helper_hash __read_mostly;
32static unsigned int nf_ct_helper_hsize __read_mostly; 33static unsigned int nf_ct_helper_hsize __read_mostly;
33static unsigned int nf_ct_helper_count __read_mostly; 34static unsigned int nf_ct_helper_count __read_mostly;
@@ -54,42 +55,13 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
54 return NULL; 55 return NULL;
55 56
56 h = helper_hash(tuple); 57 h = helper_hash(tuple);
57 hlist_for_each_entry(helper, n, &nf_ct_helper_hash[h], hnode) { 58 hlist_for_each_entry_rcu(helper, n, &nf_ct_helper_hash[h], hnode) {
58 if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) 59 if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
59 return helper; 60 return helper;
60 } 61 }
61 return NULL; 62 return NULL;
62} 63}
63 64EXPORT_SYMBOL_GPL(__nf_ct_helper_find);
64struct nf_conntrack_helper *
65nf_ct_helper_find_get(const struct nf_conntrack_tuple *tuple)
66{
67 struct nf_conntrack_helper *helper;
68
69 /* need nf_conntrack_lock to assure that helper exists until
70 * try_module_get() is called */
71 read_lock_bh(&nf_conntrack_lock);
72
73 helper = __nf_ct_helper_find(tuple);
74 if (helper) {
75 /* need to increase module usage count to assure helper will
76 * not go away while the caller is e.g. busy putting a
77 * conntrack in the hash that uses the helper */
78 if (!try_module_get(helper->me))
79 helper = NULL;
80 }
81
82 read_unlock_bh(&nf_conntrack_lock);
83
84 return helper;
85}
86EXPORT_SYMBOL_GPL(nf_ct_helper_find_get);
87
88void nf_ct_helper_put(struct nf_conntrack_helper *helper)
89{
90 module_put(helper->me);
91}
92EXPORT_SYMBOL_GPL(nf_ct_helper_put);
93 65
94struct nf_conntrack_helper * 66struct nf_conntrack_helper *
95__nf_conntrack_helper_find_byname(const char *name) 67__nf_conntrack_helper_find_byname(const char *name)
@@ -99,7 +71,7 @@ __nf_conntrack_helper_find_byname(const char *name)
99 unsigned int i; 71 unsigned int i;
100 72
101 for (i = 0; i < nf_ct_helper_hsize; i++) { 73 for (i = 0; i < nf_ct_helper_hsize; i++) {
102 hlist_for_each_entry(h, n, &nf_ct_helper_hash[i], hnode) { 74 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
103 if (!strcmp(h->name, name)) 75 if (!strcmp(h->name, name))
104 return h; 76 return h;
105 } 77 }
@@ -140,10 +112,10 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
140 112
141 BUG_ON(me->timeout == 0); 113 BUG_ON(me->timeout == 0);
142 114
143 write_lock_bh(&nf_conntrack_lock); 115 mutex_lock(&nf_ct_helper_mutex);
144 hlist_add_head(&me->hnode, &nf_ct_helper_hash[h]); 116 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
145 nf_ct_helper_count++; 117 nf_ct_helper_count++;
146 write_unlock_bh(&nf_conntrack_lock); 118 mutex_unlock(&nf_ct_helper_mutex);
147 119
148 return 0; 120 return 0;
149} 121}
@@ -156,10 +128,17 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
156 struct hlist_node *n, *next; 128 struct hlist_node *n, *next;
157 unsigned int i; 129 unsigned int i;
158 130
159 /* Need write lock here, to delete helper. */ 131 mutex_lock(&nf_ct_helper_mutex);
160 write_lock_bh(&nf_conntrack_lock); 132 hlist_del_rcu(&me->hnode);
161 hlist_del(&me->hnode);
162 nf_ct_helper_count--; 133 nf_ct_helper_count--;
134 mutex_unlock(&nf_ct_helper_mutex);
135
136 /* Make sure every nothing is still using the helper unless its a
137 * connection in the hash.
138 */
139 synchronize_rcu();
140
141 spin_lock_bh(&nf_conntrack_lock);
163 142
164 /* Get rid of expectations */ 143 /* Get rid of expectations */
165 for (i = 0; i < nf_ct_expect_hsize; i++) { 144 for (i = 0; i < nf_ct_expect_hsize; i++) {
@@ -181,10 +160,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
181 hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode) 160 hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode)
182 unhelp(h, me); 161 unhelp(h, me);
183 } 162 }
184 write_unlock_bh(&nf_conntrack_lock); 163 spin_unlock_bh(&nf_conntrack_lock);
185
186 /* Someone could be still looking at the helper in a bh. */
187 synchronize_net();
188} 164}
189EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); 165EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
190 166
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index dfaed4ba83cd..c336b07a0d4c 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -23,7 +23,7 @@
23 23
24#define MAX_PORTS 8 24#define MAX_PORTS 8
25static unsigned short ports[MAX_PORTS]; 25static unsigned short ports[MAX_PORTS];
26static int ports_c; 26static unsigned int ports_c;
27static unsigned int max_dcc_channels = 8; 27static unsigned int max_dcc_channels = 8;
28static unsigned int dcc_timeout __read_mostly = 300; 28static unsigned int dcc_timeout __read_mostly = 300;
29/* This is slow, but it's simple. --RR */ 29/* This is slow, but it's simple. --RR */
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 38141f104db7..4a1b42b2b7a5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -491,11 +491,6 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
491 && ctnetlink_dump_helpinfo(skb, ct) < 0) 491 && ctnetlink_dump_helpinfo(skb, ct) < 0)
492 goto nla_put_failure; 492 goto nla_put_failure;
493 493
494#ifdef CONFIG_NF_CONNTRACK_MARK
495 if ((events & IPCT_MARK || ct->mark)
496 && ctnetlink_dump_mark(skb, ct) < 0)
497 goto nla_put_failure;
498#endif
499#ifdef CONFIG_NF_CONNTRACK_SECMARK 494#ifdef CONFIG_NF_CONNTRACK_SECMARK
500 if ((events & IPCT_SECMARK || ct->secmark) 495 if ((events & IPCT_SECMARK || ct->secmark)
501 && ctnetlink_dump_secmark(skb, ct) < 0) 496 && ctnetlink_dump_secmark(skb, ct) < 0)
@@ -516,6 +511,12 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
516 goto nla_put_failure; 511 goto nla_put_failure;
517 } 512 }
518 513
514#ifdef CONFIG_NF_CONNTRACK_MARK
515 if ((events & IPCT_MARK || ct->mark)
516 && ctnetlink_dump_mark(skb, ct) < 0)
517 goto nla_put_failure;
518#endif
519
519 nlh->nlmsg_len = skb->tail - b; 520 nlh->nlmsg_len = skb->tail - b;
520 nfnetlink_send(skb, 0, group, 0); 521 nfnetlink_send(skb, 0, group, 0);
521 return NOTIFY_DONE; 522 return NOTIFY_DONE;
@@ -545,12 +546,12 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
545 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 546 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
546 u_int8_t l3proto = nfmsg->nfgen_family; 547 u_int8_t l3proto = nfmsg->nfgen_family;
547 548
548 read_lock_bh(&nf_conntrack_lock); 549 rcu_read_lock();
549 last = (struct nf_conn *)cb->args[1]; 550 last = (struct nf_conn *)cb->args[1];
550 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 551 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
551restart: 552restart:
552 hlist_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], 553 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[cb->args[0]],
553 hnode) { 554 hnode) {
554 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 555 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
555 continue; 556 continue;
556 ct = nf_ct_tuplehash_to_ctrack(h); 557 ct = nf_ct_tuplehash_to_ctrack(h);
@@ -568,7 +569,8 @@ restart:
568 cb->nlh->nlmsg_seq, 569 cb->nlh->nlmsg_seq,
569 IPCTNL_MSG_CT_NEW, 570 IPCTNL_MSG_CT_NEW,
570 1, ct) < 0) { 571 1, ct) < 0) {
571 nf_conntrack_get(&ct->ct_general); 572 if (!atomic_inc_not_zero(&ct->ct_general.use))
573 continue;
572 cb->args[1] = (unsigned long)ct; 574 cb->args[1] = (unsigned long)ct;
573 goto out; 575 goto out;
574 } 576 }
@@ -584,7 +586,7 @@ restart:
584 } 586 }
585 } 587 }
586out: 588out:
587 read_unlock_bh(&nf_conntrack_lock); 589 rcu_read_unlock();
588 if (last) 590 if (last)
589 nf_ct_put(last); 591 nf_ct_put(last);
590 592
@@ -1167,11 +1169,12 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1167 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 1169 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1168#endif 1170#endif
1169 1171
1170 helper = nf_ct_helper_find_get(rtuple); 1172 rcu_read_lock();
1173 helper = __nf_ct_helper_find(rtuple);
1171 if (helper) { 1174 if (helper) {
1172 help = nf_ct_helper_ext_add(ct, GFP_KERNEL); 1175 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
1173 if (help == NULL) { 1176 if (help == NULL) {
1174 nf_ct_helper_put(helper); 1177 rcu_read_unlock();
1175 err = -ENOMEM; 1178 err = -ENOMEM;
1176 goto err; 1179 goto err;
1177 } 1180 }
@@ -1187,9 +1190,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1187 1190
1188 add_timer(&ct->timeout); 1191 add_timer(&ct->timeout);
1189 nf_conntrack_hash_insert(ct); 1192 nf_conntrack_hash_insert(ct);
1190 1193 rcu_read_unlock();
1191 if (helper)
1192 nf_ct_helper_put(helper);
1193 1194
1194 return 0; 1195 return 0;
1195 1196
@@ -1220,11 +1221,11 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1220 return err; 1221 return err;
1221 } 1222 }
1222 1223
1223 write_lock_bh(&nf_conntrack_lock); 1224 spin_lock_bh(&nf_conntrack_lock);
1224 if (cda[CTA_TUPLE_ORIG]) 1225 if (cda[CTA_TUPLE_ORIG])
1225 h = __nf_conntrack_find(&otuple, NULL); 1226 h = __nf_conntrack_find(&otuple);
1226 else if (cda[CTA_TUPLE_REPLY]) 1227 else if (cda[CTA_TUPLE_REPLY])
1227 h = __nf_conntrack_find(&rtuple, NULL); 1228 h = __nf_conntrack_find(&rtuple);
1228 1229
1229 if (h == NULL) { 1230 if (h == NULL) {
1230 struct nf_conntrack_tuple master; 1231 struct nf_conntrack_tuple master;
@@ -1237,9 +1238,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1237 CTA_TUPLE_MASTER, 1238 CTA_TUPLE_MASTER,
1238 u3); 1239 u3);
1239 if (err < 0) 1240 if (err < 0)
1240 return err; 1241 goto out_unlock;
1241 1242
1242 master_h = __nf_conntrack_find(&master, NULL); 1243 master_h = __nf_conntrack_find(&master);
1243 if (master_h == NULL) { 1244 if (master_h == NULL) {
1244 err = -ENOENT; 1245 err = -ENOENT;
1245 goto out_unlock; 1246 goto out_unlock;
@@ -1248,7 +1249,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1248 atomic_inc(&master_ct->ct_general.use); 1249 atomic_inc(&master_ct->ct_general.use);
1249 } 1250 }
1250 1251
1251 write_unlock_bh(&nf_conntrack_lock); 1252 spin_unlock_bh(&nf_conntrack_lock);
1252 err = -ENOENT; 1253 err = -ENOENT;
1253 if (nlh->nlmsg_flags & NLM_F_CREATE) 1254 if (nlh->nlmsg_flags & NLM_F_CREATE)
1254 err = ctnetlink_create_conntrack(cda, 1255 err = ctnetlink_create_conntrack(cda,
@@ -1281,7 +1282,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1281 } 1282 }
1282 1283
1283out_unlock: 1284out_unlock:
1284 write_unlock_bh(&nf_conntrack_lock); 1285 spin_unlock_bh(&nf_conntrack_lock);
1285 return err; 1286 return err;
1286} 1287}
1287 1288
@@ -1472,7 +1473,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1472 struct hlist_node *n; 1473 struct hlist_node *n;
1473 u_int8_t l3proto = nfmsg->nfgen_family; 1474 u_int8_t l3proto = nfmsg->nfgen_family;
1474 1475
1475 read_lock_bh(&nf_conntrack_lock); 1476 rcu_read_lock();
1476 last = (struct nf_conntrack_expect *)cb->args[1]; 1477 last = (struct nf_conntrack_expect *)cb->args[1];
1477 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 1478 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
1478restart: 1479restart:
@@ -1489,7 +1490,8 @@ restart:
1489 cb->nlh->nlmsg_seq, 1490 cb->nlh->nlmsg_seq,
1490 IPCTNL_MSG_EXP_NEW, 1491 IPCTNL_MSG_EXP_NEW,
1491 1, exp) < 0) { 1492 1, exp) < 0) {
1492 atomic_inc(&exp->use); 1493 if (!atomic_inc_not_zero(&exp->use))
1494 continue;
1493 cb->args[1] = (unsigned long)exp; 1495 cb->args[1] = (unsigned long)exp;
1494 goto out; 1496 goto out;
1495 } 1497 }
@@ -1500,7 +1502,7 @@ restart:
1500 } 1502 }
1501 } 1503 }
1502out: 1504out:
1503 read_unlock_bh(&nf_conntrack_lock); 1505 rcu_read_unlock();
1504 if (last) 1506 if (last)
1505 nf_ct_expect_put(last); 1507 nf_ct_expect_put(last);
1506 1508
@@ -1613,10 +1615,10 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1613 struct nf_conn_help *m_help; 1615 struct nf_conn_help *m_help;
1614 1616
1615 /* delete all expectations for this helper */ 1617 /* delete all expectations for this helper */
1616 write_lock_bh(&nf_conntrack_lock); 1618 spin_lock_bh(&nf_conntrack_lock);
1617 h = __nf_conntrack_helper_find_byname(name); 1619 h = __nf_conntrack_helper_find_byname(name);
1618 if (!h) { 1620 if (!h) {
1619 write_unlock_bh(&nf_conntrack_lock); 1621 spin_unlock_bh(&nf_conntrack_lock);
1620 return -EINVAL; 1622 return -EINVAL;
1621 } 1623 }
1622 for (i = 0; i < nf_ct_expect_hsize; i++) { 1624 for (i = 0; i < nf_ct_expect_hsize; i++) {
@@ -1631,10 +1633,10 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1631 } 1633 }
1632 } 1634 }
1633 } 1635 }
1634 write_unlock_bh(&nf_conntrack_lock); 1636 spin_unlock_bh(&nf_conntrack_lock);
1635 } else { 1637 } else {
1636 /* This basically means we have to flush everything*/ 1638 /* This basically means we have to flush everything*/
1637 write_lock_bh(&nf_conntrack_lock); 1639 spin_lock_bh(&nf_conntrack_lock);
1638 for (i = 0; i < nf_ct_expect_hsize; i++) { 1640 for (i = 0; i < nf_ct_expect_hsize; i++) {
1639 hlist_for_each_entry_safe(exp, n, next, 1641 hlist_for_each_entry_safe(exp, n, next,
1640 &nf_ct_expect_hash[i], 1642 &nf_ct_expect_hash[i],
@@ -1645,7 +1647,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1645 } 1647 }
1646 } 1648 }
1647 } 1649 }
1648 write_unlock_bh(&nf_conntrack_lock); 1650 spin_unlock_bh(&nf_conntrack_lock);
1649 } 1651 }
1650 1652
1651 return 0; 1653 return 0;
@@ -1731,11 +1733,11 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1731 if (err < 0) 1733 if (err < 0)
1732 return err; 1734 return err;
1733 1735
1734 write_lock_bh(&nf_conntrack_lock); 1736 spin_lock_bh(&nf_conntrack_lock);
1735 exp = __nf_ct_expect_find(&tuple); 1737 exp = __nf_ct_expect_find(&tuple);
1736 1738
1737 if (!exp) { 1739 if (!exp) {
1738 write_unlock_bh(&nf_conntrack_lock); 1740 spin_unlock_bh(&nf_conntrack_lock);
1739 err = -ENOENT; 1741 err = -ENOENT;
1740 if (nlh->nlmsg_flags & NLM_F_CREATE) 1742 if (nlh->nlmsg_flags & NLM_F_CREATE)
1741 err = ctnetlink_create_expect(cda, u3); 1743 err = ctnetlink_create_expect(cda, u3);
@@ -1745,7 +1747,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1745 err = -EEXIST; 1747 err = -EEXIST;
1746 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) 1748 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
1747 err = ctnetlink_change_expect(exp, cda); 1749 err = ctnetlink_change_expect(exp, cda);
1748 write_unlock_bh(&nf_conntrack_lock); 1750 spin_unlock_bh(&nf_conntrack_lock);
1749 1751
1750 return err; 1752 return err;
1751} 1753}
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 099b6df3e2b5..b5cb8e831230 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -67,7 +67,7 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn);
67 67
68#ifdef DEBUG 68#ifdef DEBUG
69/* PptpControlMessageType names */ 69/* PptpControlMessageType names */
70const char *pptp_msg_name[] = { 70const char *const pptp_msg_name[] = {
71 "UNKNOWN_MESSAGE", 71 "UNKNOWN_MESSAGE",
72 "START_SESSION_REQUEST", 72 "START_SESSION_REQUEST",
73 "START_SESSION_REPLY", 73 "START_SESSION_REPLY",
@@ -136,7 +136,7 @@ static void pptp_expectfn(struct nf_conn *ct,
136 136
137static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t) 137static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
138{ 138{
139 struct nf_conntrack_tuple_hash *h; 139 const struct nf_conntrack_tuple_hash *h;
140 struct nf_conntrack_expect *exp; 140 struct nf_conntrack_expect *exp;
141 struct nf_conn *sibling; 141 struct nf_conn *sibling;
142 142
@@ -168,7 +168,7 @@ static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
168/* timeout GRE data connections */ 168/* timeout GRE data connections */
169static void pptp_destroy_siblings(struct nf_conn *ct) 169static void pptp_destroy_siblings(struct nf_conn *ct)
170{ 170{
171 struct nf_conn_help *help = nfct_help(ct); 171 const struct nf_conn_help *help = nfct_help(ct);
172 struct nf_conntrack_tuple t; 172 struct nf_conntrack_tuple t;
173 173
174 nf_ct_gre_keymap_destroy(ct); 174 nf_ct_gre_keymap_destroy(ct);
@@ -497,9 +497,11 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
497 497
498{ 498{
499 int dir = CTINFO2DIR(ctinfo); 499 int dir = CTINFO2DIR(ctinfo);
500 struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; 500 const struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
501 struct tcphdr _tcph, *tcph; 501 const struct tcphdr *tcph;
502 struct pptp_pkt_hdr _pptph, *pptph; 502 struct tcphdr _tcph;
503 const struct pptp_pkt_hdr *pptph;
504 struct pptp_pkt_hdr _pptph;
503 struct PptpControlHeader _ctlh, *ctlh; 505 struct PptpControlHeader _ctlh, *ctlh;
504 union pptp_ctrl_union _pptpReq, *pptpReq; 506 union pptp_ctrl_union _pptpReq, *pptpReq;
505 unsigned int tcplen = skb->len - protoff; 507 unsigned int tcplen = skb->len - protoff;
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 22c5dcb6306a..55458915575f 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -41,19 +41,19 @@ static int generic_print_tuple(struct seq_file *s,
41} 41}
42 42
43/* Returns verdict for packet, or -1 for invalid. */ 43/* Returns verdict for packet, or -1 for invalid. */
44static int packet(struct nf_conn *conntrack, 44static int packet(struct nf_conn *ct,
45 const struct sk_buff *skb, 45 const struct sk_buff *skb,
46 unsigned int dataoff, 46 unsigned int dataoff,
47 enum ip_conntrack_info ctinfo, 47 enum ip_conntrack_info ctinfo,
48 int pf, 48 int pf,
49 unsigned int hooknum) 49 unsigned int hooknum)
50{ 50{
51 nf_ct_refresh_acct(conntrack, ctinfo, skb, nf_ct_generic_timeout); 51 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_generic_timeout);
52 return NF_ACCEPT; 52 return NF_ACCEPT;
53} 53}
54 54
55/* Called when a new connection for this protocol found. */ 55/* Called when a new connection for this protocol found. */
56static int new(struct nf_conn *conntrack, const struct sk_buff *skb, 56static int new(struct nf_conn *ct, const struct sk_buff *skb,
57 unsigned int dataoff) 57 unsigned int dataoff)
58{ 58{
59 return 1; 59 return 1;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 4a185f6aa65a..e10024a1b666 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -161,9 +161,11 @@ static int gre_pkt_to_tuple(const struct sk_buff *skb,
161 unsigned int dataoff, 161 unsigned int dataoff,
162 struct nf_conntrack_tuple *tuple) 162 struct nf_conntrack_tuple *tuple)
163{ 163{
164 struct gre_hdr_pptp _pgrehdr, *pgrehdr; 164 const struct gre_hdr_pptp *pgrehdr;
165 struct gre_hdr_pptp _pgrehdr;
165 __be16 srckey; 166 __be16 srckey;
166 struct gre_hdr _grehdr, *grehdr; 167 const struct gre_hdr *grehdr;
168 struct gre_hdr _grehdr;
167 169
168 /* first only delinearize old RFC1701 GRE header */ 170 /* first only delinearize old RFC1701 GRE header */
169 grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr); 171 grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 21d29e782baf..f9a08370dbb3 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -25,7 +25,7 @@
25#include <net/netfilter/nf_conntrack_l4proto.h> 25#include <net/netfilter/nf_conntrack_l4proto.h>
26#include <net/netfilter/nf_conntrack_ecache.h> 26#include <net/netfilter/nf_conntrack_ecache.h>
27 27
28/* Protects conntrack->proto.sctp */ 28/* Protects ct->proto.sctp */
29static DEFINE_RWLOCK(sctp_lock); 29static DEFINE_RWLOCK(sctp_lock);
30 30
31/* FIXME: Examine ipfilter's timeouts and conntrack transitions more 31/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
@@ -624,7 +624,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
624#endif 624#endif
625}; 625};
626 626
627int __init nf_conntrack_proto_sctp_init(void) 627static int __init nf_conntrack_proto_sctp_init(void)
628{ 628{
629 int ret; 629 int ret;
630 630
@@ -647,7 +647,7 @@ int __init nf_conntrack_proto_sctp_init(void)
647 return ret; 647 return ret;
648} 648}
649 649
650void __exit nf_conntrack_proto_sctp_fini(void) 650static void __exit nf_conntrack_proto_sctp_fini(void)
651{ 651{
652 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6); 652 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
653 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 653 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 64c9b910419c..3e0cccae5636 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -26,7 +26,7 @@
26#include <net/netfilter/nf_conntrack_ecache.h> 26#include <net/netfilter/nf_conntrack_ecache.h>
27#include <net/netfilter/nf_log.h> 27#include <net/netfilter/nf_log.h>
28 28
29/* Protects conntrack->proto.tcp */ 29/* Protects ct->proto.tcp */
30static DEFINE_RWLOCK(tcp_lock); 30static DEFINE_RWLOCK(tcp_lock);
31 31
32/* "Be conservative in what you do, 32/* "Be conservative in what you do,
@@ -46,7 +46,7 @@ static int nf_ct_tcp_max_retrans __read_mostly = 3;
46 /* FIXME: Examine ipfilter's timeouts and conntrack transitions more 46 /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
47 closely. They're more complex. --RR */ 47 closely. They're more complex. --RR */
48 48
49static const char *tcp_conntrack_names[] = { 49static const char *const tcp_conntrack_names[] = {
50 "NONE", 50 "NONE",
51 "SYN_SENT", 51 "SYN_SENT",
52 "SYN_RECV", 52 "SYN_RECV",
@@ -261,7 +261,8 @@ static int tcp_pkt_to_tuple(const struct sk_buff *skb,
261 unsigned int dataoff, 261 unsigned int dataoff,
262 struct nf_conntrack_tuple *tuple) 262 struct nf_conntrack_tuple *tuple)
263{ 263{
264 struct tcphdr _hdr, *hp; 264 const struct tcphdr *hp;
265 struct tcphdr _hdr;
265 266
266 /* Actually only need first 8 bytes. */ 267 /* Actually only need first 8 bytes. */
267 hp = skb_header_pointer(skb, dataoff, 8, &_hdr); 268 hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
@@ -292,13 +293,12 @@ static int tcp_print_tuple(struct seq_file *s,
292} 293}
293 294
294/* Print out the private part of the conntrack. */ 295/* Print out the private part of the conntrack. */
295static int tcp_print_conntrack(struct seq_file *s, 296static int tcp_print_conntrack(struct seq_file *s, const struct nf_conn *ct)
296 const struct nf_conn *conntrack)
297{ 297{
298 enum tcp_conntrack state; 298 enum tcp_conntrack state;
299 299
300 read_lock_bh(&tcp_lock); 300 read_lock_bh(&tcp_lock);
301 state = conntrack->proto.tcp.state; 301 state = ct->proto.tcp.state;
302 read_unlock_bh(&tcp_lock); 302 read_unlock_bh(&tcp_lock);
303 303
304 return seq_printf(s, "%s ", tcp_conntrack_names[state]); 304 return seq_printf(s, "%s ", tcp_conntrack_names[state]);
@@ -344,7 +344,7 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph)
344static inline __u32 segment_seq_plus_len(__u32 seq, 344static inline __u32 segment_seq_plus_len(__u32 seq,
345 size_t len, 345 size_t len,
346 unsigned int dataoff, 346 unsigned int dataoff,
347 struct tcphdr *tcph) 347 const struct tcphdr *tcph)
348{ 348{
349 /* XXX Should I use payload length field in IP/IPv6 header ? 349 /* XXX Should I use payload length field in IP/IPv6 header ?
350 * - YK */ 350 * - YK */
@@ -363,11 +363,11 @@ static inline __u32 segment_seq_plus_len(__u32 seq,
363 */ 363 */
364static void tcp_options(const struct sk_buff *skb, 364static void tcp_options(const struct sk_buff *skb,
365 unsigned int dataoff, 365 unsigned int dataoff,
366 struct tcphdr *tcph, 366 const struct tcphdr *tcph,
367 struct ip_ct_tcp_state *state) 367 struct ip_ct_tcp_state *state)
368{ 368{
369 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; 369 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
370 unsigned char *ptr; 370 const unsigned char *ptr;
371 int length = (tcph->doff*4) - sizeof(struct tcphdr); 371 int length = (tcph->doff*4) - sizeof(struct tcphdr);
372 372
373 if (!length) 373 if (!length)
@@ -418,10 +418,10 @@ static void tcp_options(const struct sk_buff *skb,
418} 418}
419 419
420static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, 420static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
421 struct tcphdr *tcph, __u32 *sack) 421 const struct tcphdr *tcph, __u32 *sack)
422{ 422{
423 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; 423 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
424 unsigned char *ptr; 424 const unsigned char *ptr;
425 int length = (tcph->doff*4) - sizeof(struct tcphdr); 425 int length = (tcph->doff*4) - sizeof(struct tcphdr);
426 __u32 tmp; 426 __u32 tmp;
427 427
@@ -478,18 +478,18 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
478 } 478 }
479} 479}
480 480
481static int tcp_in_window(struct nf_conn *ct, 481static int tcp_in_window(const struct nf_conn *ct,
482 struct ip_ct_tcp *state, 482 struct ip_ct_tcp *state,
483 enum ip_conntrack_dir dir, 483 enum ip_conntrack_dir dir,
484 unsigned int index, 484 unsigned int index,
485 const struct sk_buff *skb, 485 const struct sk_buff *skb,
486 unsigned int dataoff, 486 unsigned int dataoff,
487 struct tcphdr *tcph, 487 const struct tcphdr *tcph,
488 int pf) 488 int pf)
489{ 489{
490 struct ip_ct_tcp_state *sender = &state->seen[dir]; 490 struct ip_ct_tcp_state *sender = &state->seen[dir];
491 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 491 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
492 struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 492 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
493 __u32 seq, ack, sack, end, win, swin; 493 __u32 seq, ack, sack, end, win, swin;
494 int res; 494 int res;
495 495
@@ -687,14 +687,14 @@ static int tcp_in_window(struct nf_conn *ct,
687#ifdef CONFIG_NF_NAT_NEEDED 687#ifdef CONFIG_NF_NAT_NEEDED
688/* Update sender->td_end after NAT successfully mangled the packet */ 688/* Update sender->td_end after NAT successfully mangled the packet */
689/* Caller must linearize skb at tcp header. */ 689/* Caller must linearize skb at tcp header. */
690void nf_conntrack_tcp_update(struct sk_buff *skb, 690void nf_conntrack_tcp_update(const struct sk_buff *skb,
691 unsigned int dataoff, 691 unsigned int dataoff,
692 struct nf_conn *conntrack, 692 struct nf_conn *ct,
693 int dir) 693 int dir)
694{ 694{
695 struct tcphdr *tcph = (void *)skb->data + dataoff; 695 const struct tcphdr *tcph = (const void *)skb->data + dataoff;
696 struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir]; 696 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
697 struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[!dir]; 697 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[!dir];
698 __u32 end; 698 __u32 end;
699 699
700 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph); 700 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
@@ -703,9 +703,9 @@ void nf_conntrack_tcp_update(struct sk_buff *skb,
703 /* 703 /*
704 * We have to worry for the ack in the reply packet only... 704 * We have to worry for the ack in the reply packet only...
705 */ 705 */
706 if (after(end, conntrack->proto.tcp.seen[dir].td_end)) 706 if (after(end, ct->proto.tcp.seen[dir].td_end))
707 conntrack->proto.tcp.seen[dir].td_end = end; 707 ct->proto.tcp.seen[dir].td_end = end;
708 conntrack->proto.tcp.last_end = end; 708 ct->proto.tcp.last_end = end;
709 write_unlock_bh(&tcp_lock); 709 write_unlock_bh(&tcp_lock);
710 pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " 710 pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
711 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 711 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
@@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
727#define TH_CWR 0x80 727#define TH_CWR 0x80
728 728
729/* table of valid flag combinations - PUSH, ECE and CWR are always valid */ 729/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
730static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] = 730static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
731{ 731{
732 [TH_SYN] = 1, 732 [TH_SYN] = 1,
733 [TH_SYN|TH_URG] = 1, 733 [TH_SYN|TH_URG] = 1,
@@ -747,7 +747,8 @@ static int tcp_error(struct sk_buff *skb,
747 int pf, 747 int pf,
748 unsigned int hooknum) 748 unsigned int hooknum)
749{ 749{
750 struct tcphdr _tcph, *th; 750 const struct tcphdr *th;
751 struct tcphdr _tcph;
751 unsigned int tcplen = skb->len - dataoff; 752 unsigned int tcplen = skb->len - dataoff;
752 u_int8_t tcpflags; 753 u_int8_t tcpflags;
753 754
@@ -794,7 +795,7 @@ static int tcp_error(struct sk_buff *skb,
794} 795}
795 796
796/* Returns verdict for packet, or -1 for invalid. */ 797/* Returns verdict for packet, or -1 for invalid. */
797static int tcp_packet(struct nf_conn *conntrack, 798static int tcp_packet(struct nf_conn *ct,
798 const struct sk_buff *skb, 799 const struct sk_buff *skb,
799 unsigned int dataoff, 800 unsigned int dataoff,
800 enum ip_conntrack_info ctinfo, 801 enum ip_conntrack_info ctinfo,
@@ -804,7 +805,8 @@ static int tcp_packet(struct nf_conn *conntrack,
804 struct nf_conntrack_tuple *tuple; 805 struct nf_conntrack_tuple *tuple;
805 enum tcp_conntrack new_state, old_state; 806 enum tcp_conntrack new_state, old_state;
806 enum ip_conntrack_dir dir; 807 enum ip_conntrack_dir dir;
807 struct tcphdr *th, _tcph; 808 const struct tcphdr *th;
809 struct tcphdr _tcph;
808 unsigned long timeout; 810 unsigned long timeout;
809 unsigned int index; 811 unsigned int index;
810 812
@@ -812,26 +814,24 @@ static int tcp_packet(struct nf_conn *conntrack,
812 BUG_ON(th == NULL); 814 BUG_ON(th == NULL);
813 815
814 write_lock_bh(&tcp_lock); 816 write_lock_bh(&tcp_lock);
815 old_state = conntrack->proto.tcp.state; 817 old_state = ct->proto.tcp.state;
816 dir = CTINFO2DIR(ctinfo); 818 dir = CTINFO2DIR(ctinfo);
817 index = get_conntrack_index(th); 819 index = get_conntrack_index(th);
818 new_state = tcp_conntracks[dir][index][old_state]; 820 new_state = tcp_conntracks[dir][index][old_state];
819 tuple = &conntrack->tuplehash[dir].tuple; 821 tuple = &ct->tuplehash[dir].tuple;
820 822
821 switch (new_state) { 823 switch (new_state) {
822 case TCP_CONNTRACK_SYN_SENT: 824 case TCP_CONNTRACK_SYN_SENT:
823 if (old_state < TCP_CONNTRACK_TIME_WAIT) 825 if (old_state < TCP_CONNTRACK_TIME_WAIT)
824 break; 826 break;
825 if ((conntrack->proto.tcp.seen[!dir].flags & 827 if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_CLOSE_INIT)
826 IP_CT_TCP_FLAG_CLOSE_INIT) 828 || (ct->proto.tcp.last_dir == dir
827 || (conntrack->proto.tcp.last_dir == dir 829 && ct->proto.tcp.last_index == TCP_RST_SET)) {
828 && conntrack->proto.tcp.last_index == TCP_RST_SET)) {
829 /* Attempt to reopen a closed/aborted connection. 830 /* Attempt to reopen a closed/aborted connection.
830 * Delete this connection and look up again. */ 831 * Delete this connection and look up again. */
831 write_unlock_bh(&tcp_lock); 832 write_unlock_bh(&tcp_lock);
832 if (del_timer(&conntrack->timeout)) 833 if (del_timer(&ct->timeout))
833 conntrack->timeout.function((unsigned long) 834 ct->timeout.function((unsigned long)ct);
834 conntrack);
835 return -NF_REPEAT; 835 return -NF_REPEAT;
836 } 836 }
837 /* Fall through */ 837 /* Fall through */
@@ -843,10 +843,9 @@ static int tcp_packet(struct nf_conn *conntrack,
843 * c) ACK in reply direction after initial SYN in original. 843 * c) ACK in reply direction after initial SYN in original.
844 */ 844 */
845 if (index == TCP_SYNACK_SET 845 if (index == TCP_SYNACK_SET
846 && conntrack->proto.tcp.last_index == TCP_SYN_SET 846 && ct->proto.tcp.last_index == TCP_SYN_SET
847 && conntrack->proto.tcp.last_dir != dir 847 && ct->proto.tcp.last_dir != dir
848 && ntohl(th->ack_seq) == 848 && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
849 conntrack->proto.tcp.last_end) {
850 /* This SYN/ACK acknowledges a SYN that we earlier 849 /* This SYN/ACK acknowledges a SYN that we earlier
851 * ignored as invalid. This means that the client and 850 * ignored as invalid. This means that the client and
852 * the server are both in sync, while the firewall is 851 * the server are both in sync, while the firewall is
@@ -858,15 +857,14 @@ static int tcp_packet(struct nf_conn *conntrack,
858 if (LOG_INVALID(IPPROTO_TCP)) 857 if (LOG_INVALID(IPPROTO_TCP))
859 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 858 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
860 "nf_ct_tcp: killing out of sync session "); 859 "nf_ct_tcp: killing out of sync session ");
861 if (del_timer(&conntrack->timeout)) 860 if (del_timer(&ct->timeout))
862 conntrack->timeout.function((unsigned long) 861 ct->timeout.function((unsigned long)ct);
863 conntrack);
864 return -NF_DROP; 862 return -NF_DROP;
865 } 863 }
866 conntrack->proto.tcp.last_index = index; 864 ct->proto.tcp.last_index = index;
867 conntrack->proto.tcp.last_dir = dir; 865 ct->proto.tcp.last_dir = dir;
868 conntrack->proto.tcp.last_seq = ntohl(th->seq); 866 ct->proto.tcp.last_seq = ntohl(th->seq);
869 conntrack->proto.tcp.last_end = 867 ct->proto.tcp.last_end =
870 segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th); 868 segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
871 869
872 write_unlock_bh(&tcp_lock); 870 write_unlock_bh(&tcp_lock);
@@ -885,11 +883,11 @@ static int tcp_packet(struct nf_conn *conntrack,
885 return -NF_ACCEPT; 883 return -NF_ACCEPT;
886 case TCP_CONNTRACK_CLOSE: 884 case TCP_CONNTRACK_CLOSE:
887 if (index == TCP_RST_SET 885 if (index == TCP_RST_SET
888 && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) 886 && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
889 && conntrack->proto.tcp.last_index == TCP_SYN_SET) 887 && ct->proto.tcp.last_index == TCP_SYN_SET)
890 || (!test_bit(IPS_ASSURED_BIT, &conntrack->status) 888 || (!test_bit(IPS_ASSURED_BIT, &ct->status)
891 && conntrack->proto.tcp.last_index == TCP_ACK_SET)) 889 && ct->proto.tcp.last_index == TCP_ACK_SET))
892 && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) { 890 && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
893 /* RST sent to invalid SYN or ACK we had let through 891 /* RST sent to invalid SYN or ACK we had let through
894 * at a) and c) above: 892 * at a) and c) above:
895 * 893 *
@@ -907,15 +905,15 @@ static int tcp_packet(struct nf_conn *conntrack,
907 break; 905 break;
908 } 906 }
909 907
910 if (!tcp_in_window(conntrack, &conntrack->proto.tcp, dir, index, 908 if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
911 skb, dataoff, th, pf)) { 909 skb, dataoff, th, pf)) {
912 write_unlock_bh(&tcp_lock); 910 write_unlock_bh(&tcp_lock);
913 return -NF_ACCEPT; 911 return -NF_ACCEPT;
914 } 912 }
915 in_window: 913 in_window:
916 /* From now on we have got in-window packets */ 914 /* From now on we have got in-window packets */
917 conntrack->proto.tcp.last_index = index; 915 ct->proto.tcp.last_index = index;
918 conntrack->proto.tcp.last_dir = dir; 916 ct->proto.tcp.last_dir = dir;
919 917
920 pr_debug("tcp_conntracks: "); 918 pr_debug("tcp_conntracks: ");
921 NF_CT_DUMP_TUPLE(tuple); 919 NF_CT_DUMP_TUPLE(tuple);
@@ -924,12 +922,12 @@ static int tcp_packet(struct nf_conn *conntrack,
924 (th->fin ? 1 : 0), (th->rst ? 1 : 0), 922 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
925 old_state, new_state); 923 old_state, new_state);
926 924
927 conntrack->proto.tcp.state = new_state; 925 ct->proto.tcp.state = new_state;
928 if (old_state != new_state 926 if (old_state != new_state
929 && (new_state == TCP_CONNTRACK_FIN_WAIT 927 && (new_state == TCP_CONNTRACK_FIN_WAIT
930 || new_state == TCP_CONNTRACK_CLOSE)) 928 || new_state == TCP_CONNTRACK_CLOSE))
931 conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; 929 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
932 timeout = conntrack->proto.tcp.retrans >= nf_ct_tcp_max_retrans 930 timeout = ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans
933 && tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans 931 && tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans
934 ? nf_ct_tcp_timeout_max_retrans : tcp_timeouts[new_state]; 932 ? nf_ct_tcp_timeout_max_retrans : tcp_timeouts[new_state];
935 write_unlock_bh(&tcp_lock); 933 write_unlock_bh(&tcp_lock);
@@ -938,41 +936,41 @@ static int tcp_packet(struct nf_conn *conntrack,
938 if (new_state != old_state) 936 if (new_state != old_state)
939 nf_conntrack_event_cache(IPCT_PROTOINFO, skb); 937 nf_conntrack_event_cache(IPCT_PROTOINFO, skb);
940 938
941 if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { 939 if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
942 /* If only reply is a RST, we can consider ourselves not to 940 /* If only reply is a RST, we can consider ourselves not to
943 have an established connection: this is a fairly common 941 have an established connection: this is a fairly common
944 problem case, so we can delete the conntrack 942 problem case, so we can delete the conntrack
945 immediately. --RR */ 943 immediately. --RR */
946 if (th->rst) { 944 if (th->rst) {
947 if (del_timer(&conntrack->timeout)) 945 if (del_timer(&ct->timeout))
948 conntrack->timeout.function((unsigned long) 946 ct->timeout.function((unsigned long)ct);
949 conntrack);
950 return NF_ACCEPT; 947 return NF_ACCEPT;
951 } 948 }
952 } else if (!test_bit(IPS_ASSURED_BIT, &conntrack->status) 949 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
953 && (old_state == TCP_CONNTRACK_SYN_RECV 950 && (old_state == TCP_CONNTRACK_SYN_RECV
954 || old_state == TCP_CONNTRACK_ESTABLISHED) 951 || old_state == TCP_CONNTRACK_ESTABLISHED)
955 && new_state == TCP_CONNTRACK_ESTABLISHED) { 952 && new_state == TCP_CONNTRACK_ESTABLISHED) {
956 /* Set ASSURED if we see see valid ack in ESTABLISHED 953 /* Set ASSURED if we see see valid ack in ESTABLISHED
957 after SYN_RECV or a valid answer for a picked up 954 after SYN_RECV or a valid answer for a picked up
958 connection. */ 955 connection. */
959 set_bit(IPS_ASSURED_BIT, &conntrack->status); 956 set_bit(IPS_ASSURED_BIT, &ct->status);
960 nf_conntrack_event_cache(IPCT_STATUS, skb); 957 nf_conntrack_event_cache(IPCT_STATUS, skb);
961 } 958 }
962 nf_ct_refresh_acct(conntrack, ctinfo, skb, timeout); 959 nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
963 960
964 return NF_ACCEPT; 961 return NF_ACCEPT;
965} 962}
966 963
967/* Called when a new connection for this protocol found. */ 964/* Called when a new connection for this protocol found. */
968static int tcp_new(struct nf_conn *conntrack, 965static int tcp_new(struct nf_conn *ct,
969 const struct sk_buff *skb, 966 const struct sk_buff *skb,
970 unsigned int dataoff) 967 unsigned int dataoff)
971{ 968{
972 enum tcp_conntrack new_state; 969 enum tcp_conntrack new_state;
973 struct tcphdr *th, _tcph; 970 const struct tcphdr *th;
974 struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[0]; 971 struct tcphdr _tcph;
975 struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[1]; 972 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
973 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
976 974
977 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph); 975 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
978 BUG_ON(th == NULL); 976 BUG_ON(th == NULL);
@@ -990,17 +988,17 @@ static int tcp_new(struct nf_conn *conntrack,
990 988
991 if (new_state == TCP_CONNTRACK_SYN_SENT) { 989 if (new_state == TCP_CONNTRACK_SYN_SENT) {
992 /* SYN packet */ 990 /* SYN packet */
993 conntrack->proto.tcp.seen[0].td_end = 991 ct->proto.tcp.seen[0].td_end =
994 segment_seq_plus_len(ntohl(th->seq), skb->len, 992 segment_seq_plus_len(ntohl(th->seq), skb->len,
995 dataoff, th); 993 dataoff, th);
996 conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window); 994 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
997 if (conntrack->proto.tcp.seen[0].td_maxwin == 0) 995 if (ct->proto.tcp.seen[0].td_maxwin == 0)
998 conntrack->proto.tcp.seen[0].td_maxwin = 1; 996 ct->proto.tcp.seen[0].td_maxwin = 1;
999 conntrack->proto.tcp.seen[0].td_maxend = 997 ct->proto.tcp.seen[0].td_maxend =
1000 conntrack->proto.tcp.seen[0].td_end; 998 ct->proto.tcp.seen[0].td_end;
1001 999
1002 tcp_options(skb, dataoff, th, &conntrack->proto.tcp.seen[0]); 1000 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
1003 conntrack->proto.tcp.seen[1].flags = 0; 1001 ct->proto.tcp.seen[1].flags = 0;
1004 } else if (nf_ct_tcp_loose == 0) { 1002 } else if (nf_ct_tcp_loose == 0) {
1005 /* Don't try to pick up connections. */ 1003 /* Don't try to pick up connections. */
1006 return 0; 1004 return 0;
@@ -1010,32 +1008,32 @@ static int tcp_new(struct nf_conn *conntrack,
1010 * its history is lost for us. 1008 * its history is lost for us.
1011 * Let's try to use the data from the packet. 1009 * Let's try to use the data from the packet.
1012 */ 1010 */
1013 conntrack->proto.tcp.seen[0].td_end = 1011 ct->proto.tcp.seen[0].td_end =
1014 segment_seq_plus_len(ntohl(th->seq), skb->len, 1012 segment_seq_plus_len(ntohl(th->seq), skb->len,
1015 dataoff, th); 1013 dataoff, th);
1016 conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window); 1014 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
1017 if (conntrack->proto.tcp.seen[0].td_maxwin == 0) 1015 if (ct->proto.tcp.seen[0].td_maxwin == 0)
1018 conntrack->proto.tcp.seen[0].td_maxwin = 1; 1016 ct->proto.tcp.seen[0].td_maxwin = 1;
1019 conntrack->proto.tcp.seen[0].td_maxend = 1017 ct->proto.tcp.seen[0].td_maxend =
1020 conntrack->proto.tcp.seen[0].td_end + 1018 ct->proto.tcp.seen[0].td_end +
1021 conntrack->proto.tcp.seen[0].td_maxwin; 1019 ct->proto.tcp.seen[0].td_maxwin;
1022 conntrack->proto.tcp.seen[0].td_scale = 0; 1020 ct->proto.tcp.seen[0].td_scale = 0;
1023 1021
1024 /* We assume SACK and liberal window checking to handle 1022 /* We assume SACK and liberal window checking to handle
1025 * window scaling */ 1023 * window scaling */
1026 conntrack->proto.tcp.seen[0].flags = 1024 ct->proto.tcp.seen[0].flags =
1027 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | 1025 ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1028 IP_CT_TCP_FLAG_BE_LIBERAL; 1026 IP_CT_TCP_FLAG_BE_LIBERAL;
1029 } 1027 }
1030 1028
1031 conntrack->proto.tcp.seen[1].td_end = 0; 1029 ct->proto.tcp.seen[1].td_end = 0;
1032 conntrack->proto.tcp.seen[1].td_maxend = 0; 1030 ct->proto.tcp.seen[1].td_maxend = 0;
1033 conntrack->proto.tcp.seen[1].td_maxwin = 1; 1031 ct->proto.tcp.seen[1].td_maxwin = 1;
1034 conntrack->proto.tcp.seen[1].td_scale = 0; 1032 ct->proto.tcp.seen[1].td_scale = 0;
1035 1033
1036 /* tcp_packet will set them */ 1034 /* tcp_packet will set them */
1037 conntrack->proto.tcp.state = TCP_CONNTRACK_NONE; 1035 ct->proto.tcp.state = TCP_CONNTRACK_NONE;
1038 conntrack->proto.tcp.last_index = TCP_NONE_SET; 1036 ct->proto.tcp.last_index = TCP_NONE_SET;
1039 1037
1040 pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " 1038 pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
1041 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 1039 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
@@ -1098,16 +1096,16 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1098 1096
1099static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) 1097static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1100{ 1098{
1101 struct nlattr *attr = cda[CTA_PROTOINFO_TCP]; 1099 struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1102 struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1]; 1100 struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1103 int err; 1101 int err;
1104 1102
1105 /* updates could not contain anything about the private 1103 /* updates could not contain anything about the private
1106 * protocol info, in that case skip the parsing */ 1104 * protocol info, in that case skip the parsing */
1107 if (!attr) 1105 if (!pattr)
1108 return 0; 1106 return 0;
1109 1107
1110 err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr, tcp_nla_policy); 1108 err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr, tcp_nla_policy);
1111 if (err < 0) 1109 if (err < 0)
1112 return err; 1110 return err;
1113 1111
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 384875411082..b8a35cc06416 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -30,7 +30,8 @@ static int udp_pkt_to_tuple(const struct sk_buff *skb,
30 unsigned int dataoff, 30 unsigned int dataoff,
31 struct nf_conntrack_tuple *tuple) 31 struct nf_conntrack_tuple *tuple)
32{ 32{
33 struct udphdr _hdr, *hp; 33 const struct udphdr *hp;
34 struct udphdr _hdr;
34 35
35 /* Actually only need first 8 bytes. */ 36 /* Actually only need first 8 bytes. */
36 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 37 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
@@ -61,7 +62,7 @@ static int udp_print_tuple(struct seq_file *s,
61} 62}
62 63
63/* Returns verdict for packet, and may modify conntracktype */ 64/* Returns verdict for packet, and may modify conntracktype */
64static int udp_packet(struct nf_conn *conntrack, 65static int udp_packet(struct nf_conn *ct,
65 const struct sk_buff *skb, 66 const struct sk_buff *skb,
66 unsigned int dataoff, 67 unsigned int dataoff,
67 enum ip_conntrack_info ctinfo, 68 enum ip_conntrack_info ctinfo,
@@ -70,20 +71,19 @@ static int udp_packet(struct nf_conn *conntrack,
70{ 71{
71 /* If we've seen traffic both ways, this is some kind of UDP 72 /* If we've seen traffic both ways, this is some kind of UDP
72 stream. Extend timeout. */ 73 stream. Extend timeout. */
73 if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { 74 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
74 nf_ct_refresh_acct(conntrack, ctinfo, skb, 75 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
75 nf_ct_udp_timeout_stream);
76 /* Also, more likely to be important, and not a probe */ 76 /* Also, more likely to be important, and not a probe */
77 if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status)) 77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
78 nf_conntrack_event_cache(IPCT_STATUS, skb); 78 nf_conntrack_event_cache(IPCT_STATUS, skb);
79 } else 79 } else
80 nf_ct_refresh_acct(conntrack, ctinfo, skb, nf_ct_udp_timeout); 80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
81 81
82 return NF_ACCEPT; 82 return NF_ACCEPT;
83} 83}
84 84
85/* Called when a new connection for this protocol found. */ 85/* Called when a new connection for this protocol found. */
86static int udp_new(struct nf_conn *conntrack, const struct sk_buff *skb, 86static int udp_new(struct nf_conn *ct, const struct sk_buff *skb,
87 unsigned int dataoff) 87 unsigned int dataoff)
88{ 88{
89 return 1; 89 return 1;
@@ -95,7 +95,8 @@ static int udp_error(struct sk_buff *skb, unsigned int dataoff,
95 unsigned int hooknum) 95 unsigned int hooknum)
96{ 96{
97 unsigned int udplen = skb->len - dataoff; 97 unsigned int udplen = skb->len - dataoff;
98 struct udphdr _hdr, *hdr; 98 const struct udphdr *hdr;
99 struct udphdr _hdr;
99 100
100 /* Header is too small? */ 101 /* Header is too small? */
101 hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 102 hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 070056d9bcd6..9dd03c7aeac6 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -31,7 +31,8 @@ static int udplite_pkt_to_tuple(const struct sk_buff *skb,
31 unsigned int dataoff, 31 unsigned int dataoff,
32 struct nf_conntrack_tuple *tuple) 32 struct nf_conntrack_tuple *tuple)
33{ 33{
34 struct udphdr _hdr, *hp; 34 const struct udphdr *hp;
35 struct udphdr _hdr;
35 36
36 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 37 hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
37 if (hp == NULL) 38 if (hp == NULL)
@@ -60,7 +61,7 @@ static int udplite_print_tuple(struct seq_file *s,
60} 61}
61 62
62/* Returns verdict for packet, and may modify conntracktype */ 63/* Returns verdict for packet, and may modify conntracktype */
63static int udplite_packet(struct nf_conn *conntrack, 64static int udplite_packet(struct nf_conn *ct,
64 const struct sk_buff *skb, 65 const struct sk_buff *skb,
65 unsigned int dataoff, 66 unsigned int dataoff,
66 enum ip_conntrack_info ctinfo, 67 enum ip_conntrack_info ctinfo,
@@ -69,21 +70,20 @@ static int udplite_packet(struct nf_conn *conntrack,
69{ 70{
70 /* If we've seen traffic both ways, this is some kind of UDP 71 /* If we've seen traffic both ways, this is some kind of UDP
71 stream. Extend timeout. */ 72 stream. Extend timeout. */
72 if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { 73 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
73 nf_ct_refresh_acct(conntrack, ctinfo, skb, 74 nf_ct_refresh_acct(ct, ctinfo, skb,
74 nf_ct_udplite_timeout_stream); 75 nf_ct_udplite_timeout_stream);
75 /* Also, more likely to be important, and not a probe */ 76 /* Also, more likely to be important, and not a probe */
76 if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status)) 77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
77 nf_conntrack_event_cache(IPCT_STATUS, skb); 78 nf_conntrack_event_cache(IPCT_STATUS, skb);
78 } else 79 } else
79 nf_ct_refresh_acct(conntrack, ctinfo, skb, 80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
80 nf_ct_udplite_timeout);
81 81
82 return NF_ACCEPT; 82 return NF_ACCEPT;
83} 83}
84 84
85/* Called when a new connection for this protocol found. */ 85/* Called when a new connection for this protocol found. */
86static int udplite_new(struct nf_conn *conntrack, const struct sk_buff *skb, 86static int udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
87 unsigned int dataoff) 87 unsigned int dataoff)
88{ 88{
89 return 1; 89 return 1;
@@ -95,7 +95,8 @@ static int udplite_error(struct sk_buff *skb, unsigned int dataoff,
95 unsigned int hooknum) 95 unsigned int hooknum)
96{ 96{
97 unsigned int udplen = skb->len - dataoff; 97 unsigned int udplen = skb->len - dataoff;
98 struct udphdr _hdr, *hdr; 98 const struct udphdr *hdr;
99 struct udphdr _hdr;
99 unsigned int cscov; 100 unsigned int cscov;
100 101
101 /* Header is too small? */ 102 /* Header is too small? */
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index b5a16c6e21c2..a70051d741a7 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -62,8 +62,9 @@ static int help(struct sk_buff *skb,
62 enum ip_conntrack_info ctinfo) 62 enum ip_conntrack_info ctinfo)
63{ 63{
64 unsigned int dataoff, datalen; 64 unsigned int dataoff, datalen;
65 struct tcphdr _tcph, *th; 65 const struct tcphdr *th;
66 char *sb_ptr; 66 struct tcphdr _tcph;
67 void *sb_ptr;
67 int ret = NF_ACCEPT; 68 int ret = NF_ACCEPT;
68 int dir = CTINFO2DIR(ctinfo); 69 int dir = CTINFO2DIR(ctinfo);
69 struct nf_ct_sane_master *ct_sane_info; 70 struct nf_ct_sane_master *ct_sane_info;
@@ -99,7 +100,7 @@ static int help(struct sk_buff *skb,
99 if (datalen != sizeof(struct sane_request)) 100 if (datalen != sizeof(struct sane_request))
100 goto out; 101 goto out;
101 102
102 req = (struct sane_request *)sb_ptr; 103 req = sb_ptr;
103 if (req->RPC_code != htonl(SANE_NET_START)) { 104 if (req->RPC_code != htonl(SANE_NET_START)) {
104 /* Not an interesting command */ 105 /* Not an interesting command */
105 ct_sane_info->state = SANE_STATE_NORMAL; 106 ct_sane_info->state = SANE_STATE_NORMAL;
@@ -123,7 +124,7 @@ static int help(struct sk_buff *skb,
123 goto out; 124 goto out;
124 } 125 }
125 126
126 reply = (struct sane_reply_net_start *)sb_ptr; 127 reply = sb_ptr;
127 if (reply->status != htonl(SANE_STATUS_SUCCESS)) { 128 if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
128 /* saned refused the command */ 129 /* saned refused the command */
129 pr_debug("nf_ct_sane: unsuccessful SANE_STATUS = %u\n", 130 pr_debug("nf_ct_sane: unsuccessful SANE_STATUS = %u\n",
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 47d8947cf263..c521c891d351 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -28,7 +28,7 @@ MODULE_ALIAS("ip_conntrack_sip");
28 28
29#define MAX_PORTS 8 29#define MAX_PORTS 8
30static unsigned short ports[MAX_PORTS]; 30static unsigned short ports[MAX_PORTS];
31static int ports_c; 31static unsigned int ports_c;
32module_param_array(ports, ushort, &ports_c, 0400); 32module_param_array(ports, ushort, &ports_c, 0400);
33MODULE_PARM_DESC(ports, "port numbers of SIP servers"); 33MODULE_PARM_DESC(ports, "port numbers of SIP servers");
34 34
@@ -48,10 +48,10 @@ unsigned int (*nf_nat_sdp_hook)(struct sk_buff *skb,
48 const char *dptr) __read_mostly; 48 const char *dptr) __read_mostly;
49EXPORT_SYMBOL_GPL(nf_nat_sdp_hook); 49EXPORT_SYMBOL_GPL(nf_nat_sdp_hook);
50 50
51static int digits_len(struct nf_conn *, const char *, const char *, int *); 51static int digits_len(const struct nf_conn *, const char *, const char *, int *);
52static int epaddr_len(struct nf_conn *, const char *, const char *, int *); 52static int epaddr_len(const struct nf_conn *, const char *, const char *, int *);
53static int skp_digits_len(struct nf_conn *, const char *, const char *, int *); 53static int skp_digits_len(const struct nf_conn *, const char *, const char *, int *);
54static int skp_epaddr_len(struct nf_conn *, const char *, const char *, int *); 54static int skp_epaddr_len(const struct nf_conn *, const char *, const char *, int *);
55 55
56struct sip_header_nfo { 56struct sip_header_nfo {
57 const char *lname; 57 const char *lname;
@@ -61,7 +61,7 @@ struct sip_header_nfo {
61 size_t snlen; 61 size_t snlen;
62 size_t ln_strlen; 62 size_t ln_strlen;
63 int case_sensitive; 63 int case_sensitive;
64 int (*match_len)(struct nf_conn *, const char *, 64 int (*match_len)(const struct nf_conn *, const char *,
65 const char *, int *); 65 const char *, int *);
66}; 66};
67 67
@@ -225,7 +225,7 @@ const char *ct_sip_search(const char *needle, const char *haystack,
225} 225}
226EXPORT_SYMBOL_GPL(ct_sip_search); 226EXPORT_SYMBOL_GPL(ct_sip_search);
227 227
228static int digits_len(struct nf_conn *ct, const char *dptr, 228static int digits_len(const struct nf_conn *ct, const char *dptr,
229 const char *limit, int *shift) 229 const char *limit, int *shift)
230{ 230{
231 int len = 0; 231 int len = 0;
@@ -237,7 +237,7 @@ static int digits_len(struct nf_conn *ct, const char *dptr,
237} 237}
238 238
239/* get digits length, skipping blank spaces. */ 239/* get digits length, skipping blank spaces. */
240static int skp_digits_len(struct nf_conn *ct, const char *dptr, 240static int skp_digits_len(const struct nf_conn *ct, const char *dptr,
241 const char *limit, int *shift) 241 const char *limit, int *shift)
242{ 242{
243 for (; dptr <= limit && *dptr == ' '; dptr++) 243 for (; dptr <= limit && *dptr == ' '; dptr++)
@@ -246,8 +246,9 @@ static int skp_digits_len(struct nf_conn *ct, const char *dptr,
246 return digits_len(ct, dptr, limit, shift); 246 return digits_len(ct, dptr, limit, shift);
247} 247}
248 248
249static int parse_addr(struct nf_conn *ct, const char *cp, const char **endp, 249static int parse_addr(const struct nf_conn *ct, const char *cp,
250 union nf_inet_addr *addr, const char *limit) 250 const char **endp, union nf_inet_addr *addr,
251 const char *limit)
251{ 252{
252 const char *end; 253 const char *end;
253 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; 254 int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
@@ -272,7 +273,7 @@ static int parse_addr(struct nf_conn *ct, const char *cp, const char **endp,
272} 273}
273 274
274/* skip ip address. returns its length. */ 275/* skip ip address. returns its length. */
275static int epaddr_len(struct nf_conn *ct, const char *dptr, 276static int epaddr_len(const struct nf_conn *ct, const char *dptr,
276 const char *limit, int *shift) 277 const char *limit, int *shift)
277{ 278{
278 union nf_inet_addr addr; 279 union nf_inet_addr addr;
@@ -292,7 +293,7 @@ static int epaddr_len(struct nf_conn *ct, const char *dptr,
292} 293}
293 294
294/* get address length, skiping user info. */ 295/* get address length, skiping user info. */
295static int skp_epaddr_len(struct nf_conn *ct, const char *dptr, 296static int skp_epaddr_len(const struct nf_conn *ct, const char *dptr,
296 const char *limit, int *shift) 297 const char *limit, int *shift)
297{ 298{
298 const char *start = dptr; 299 const char *start = dptr;
@@ -319,7 +320,7 @@ static int skp_epaddr_len(struct nf_conn *ct, const char *dptr,
319} 320}
320 321
321/* Returns 0 if not found, -1 error parsing. */ 322/* Returns 0 if not found, -1 error parsing. */
322int ct_sip_get_info(struct nf_conn *ct, 323int ct_sip_get_info(const struct nf_conn *ct,
323 const char *dptr, size_t dlen, 324 const char *dptr, size_t dlen,
324 unsigned int *matchoff, 325 unsigned int *matchoff,
325 unsigned int *matchlen, 326 unsigned int *matchlen,
@@ -407,7 +408,7 @@ static int sip_help(struct sk_buff *skb,
407 unsigned int dataoff, datalen; 408 unsigned int dataoff, datalen;
408 const char *dptr; 409 const char *dptr;
409 int ret = NF_ACCEPT; 410 int ret = NF_ACCEPT;
410 int matchoff, matchlen; 411 unsigned int matchoff, matchlen;
411 u_int16_t port; 412 u_int16_t port;
412 enum sip_header_pos pos; 413 enum sip_header_pos pos;
413 typeof(nf_nat_sip_hook) nf_nat_sip; 414 typeof(nf_nat_sip_hook) nf_nat_sip;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 696074a037c1..e88e96af613d 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -31,8 +31,8 @@ MODULE_LICENSE("GPL");
31#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
32int 32int
33print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, 33print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
34 struct nf_conntrack_l3proto *l3proto, 34 const struct nf_conntrack_l3proto *l3proto,
35 struct nf_conntrack_l4proto *l4proto) 35 const struct nf_conntrack_l4proto *l4proto)
36{ 36{
37 return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple); 37 return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple);
38} 38}
@@ -58,12 +58,14 @@ struct ct_iter_state {
58static struct hlist_node *ct_get_first(struct seq_file *seq) 58static struct hlist_node *ct_get_first(struct seq_file *seq)
59{ 59{
60 struct ct_iter_state *st = seq->private; 60 struct ct_iter_state *st = seq->private;
61 struct hlist_node *n;
61 62
62 for (st->bucket = 0; 63 for (st->bucket = 0;
63 st->bucket < nf_conntrack_htable_size; 64 st->bucket < nf_conntrack_htable_size;
64 st->bucket++) { 65 st->bucket++) {
65 if (!hlist_empty(&nf_conntrack_hash[st->bucket])) 66 n = rcu_dereference(nf_conntrack_hash[st->bucket].first);
66 return nf_conntrack_hash[st->bucket].first; 67 if (n)
68 return n;
67 } 69 }
68 return NULL; 70 return NULL;
69} 71}
@@ -73,11 +75,11 @@ static struct hlist_node *ct_get_next(struct seq_file *seq,
73{ 75{
74 struct ct_iter_state *st = seq->private; 76 struct ct_iter_state *st = seq->private;
75 77
76 head = head->next; 78 head = rcu_dereference(head->next);
77 while (head == NULL) { 79 while (head == NULL) {
78 if (++st->bucket >= nf_conntrack_htable_size) 80 if (++st->bucket >= nf_conntrack_htable_size)
79 return NULL; 81 return NULL;
80 head = nf_conntrack_hash[st->bucket].first; 82 head = rcu_dereference(nf_conntrack_hash[st->bucket].first);
81 } 83 }
82 return head; 84 return head;
83} 85}
@@ -93,8 +95,9 @@ static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
93} 95}
94 96
95static void *ct_seq_start(struct seq_file *seq, loff_t *pos) 97static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
98 __acquires(RCU)
96{ 99{
97 read_lock_bh(&nf_conntrack_lock); 100 rcu_read_lock();
98 return ct_get_idx(seq, *pos); 101 return ct_get_idx(seq, *pos);
99} 102}
100 103
@@ -105,79 +108,80 @@ static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
105} 108}
106 109
107static void ct_seq_stop(struct seq_file *s, void *v) 110static void ct_seq_stop(struct seq_file *s, void *v)
111 __releases(RCU)
108{ 112{
109 read_unlock_bh(&nf_conntrack_lock); 113 rcu_read_unlock();
110} 114}
111 115
112/* return 0 on success, 1 in case of error */ 116/* return 0 on success, 1 in case of error */
113static int ct_seq_show(struct seq_file *s, void *v) 117static int ct_seq_show(struct seq_file *s, void *v)
114{ 118{
115 const struct nf_conntrack_tuple_hash *hash = v; 119 const struct nf_conntrack_tuple_hash *hash = v;
116 const struct nf_conn *conntrack = nf_ct_tuplehash_to_ctrack(hash); 120 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
117 struct nf_conntrack_l3proto *l3proto; 121 const struct nf_conntrack_l3proto *l3proto;
118 struct nf_conntrack_l4proto *l4proto; 122 const struct nf_conntrack_l4proto *l4proto;
119 123
120 NF_CT_ASSERT(conntrack); 124 NF_CT_ASSERT(ct);
121 125
122 /* we only want to print DIR_ORIGINAL */ 126 /* we only want to print DIR_ORIGINAL */
123 if (NF_CT_DIRECTION(hash)) 127 if (NF_CT_DIRECTION(hash))
124 return 0; 128 return 0;
125 129
126 l3proto = __nf_ct_l3proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL] 130 l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
127 .tuple.src.l3num); 131 .tuple.src.l3num);
128 132
129 NF_CT_ASSERT(l3proto); 133 NF_CT_ASSERT(l3proto);
130 l4proto = __nf_ct_l4proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL] 134 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
131 .tuple.src.l3num, 135 .tuple.src.l3num,
132 conntrack->tuplehash[IP_CT_DIR_ORIGINAL] 136 ct->tuplehash[IP_CT_DIR_ORIGINAL]
133 .tuple.dst.protonum); 137 .tuple.dst.protonum);
134 NF_CT_ASSERT(l4proto); 138 NF_CT_ASSERT(l4proto);
135 139
136 if (seq_printf(s, "%-8s %u %-8s %u %ld ", 140 if (seq_printf(s, "%-8s %u %-8s %u %ld ",
137 l3proto->name, 141 l3proto->name,
138 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, 142 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num,
139 l4proto->name, 143 l4proto->name,
140 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum, 144 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
141 timer_pending(&conntrack->timeout) 145 timer_pending(&ct->timeout)
142 ? (long)(conntrack->timeout.expires - jiffies)/HZ : 0) != 0) 146 ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
143 return -ENOSPC; 147 return -ENOSPC;
144 148
145 if (l4proto->print_conntrack && l4proto->print_conntrack(s, conntrack)) 149 if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
146 return -ENOSPC; 150 return -ENOSPC;
147 151
148 if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 152 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
149 l3proto, l4proto)) 153 l3proto, l4proto))
150 return -ENOSPC; 154 return -ENOSPC;
151 155
152 if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL])) 156 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL]))
153 return -ENOSPC; 157 return -ENOSPC;
154 158
155 if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status))) 159 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
156 if (seq_printf(s, "[UNREPLIED] ")) 160 if (seq_printf(s, "[UNREPLIED] "))
157 return -ENOSPC; 161 return -ENOSPC;
158 162
159 if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple, 163 if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
160 l3proto, l4proto)) 164 l3proto, l4proto))
161 return -ENOSPC; 165 return -ENOSPC;
162 166
163 if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY])) 167 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY]))
164 return -ENOSPC; 168 return -ENOSPC;
165 169
166 if (test_bit(IPS_ASSURED_BIT, &conntrack->status)) 170 if (test_bit(IPS_ASSURED_BIT, &ct->status))
167 if (seq_printf(s, "[ASSURED] ")) 171 if (seq_printf(s, "[ASSURED] "))
168 return -ENOSPC; 172 return -ENOSPC;
169 173
170#if defined(CONFIG_NF_CONNTRACK_MARK) 174#if defined(CONFIG_NF_CONNTRACK_MARK)
171 if (seq_printf(s, "mark=%u ", conntrack->mark)) 175 if (seq_printf(s, "mark=%u ", ct->mark))
172 return -ENOSPC; 176 return -ENOSPC;
173#endif 177#endif
174 178
175#ifdef CONFIG_NF_CONNTRACK_SECMARK 179#ifdef CONFIG_NF_CONNTRACK_SECMARK
176 if (seq_printf(s, "secmark=%u ", conntrack->secmark)) 180 if (seq_printf(s, "secmark=%u ", ct->secmark))
177 return -ENOSPC; 181 return -ENOSPC;
178#endif 182#endif
179 183
180 if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use))) 184 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
181 return -ENOSPC; 185 return -ENOSPC;
182 186
183 return 0; 187 return 0;
@@ -242,7 +246,7 @@ static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
242static int ct_cpu_seq_show(struct seq_file *seq, void *v) 246static int ct_cpu_seq_show(struct seq_file *seq, void *v)
243{ 247{
244 unsigned int nr_conntracks = atomic_read(&nf_conntrack_count); 248 unsigned int nr_conntracks = atomic_read(&nf_conntrack_count);
245 struct ip_conntrack_stat *st = v; 249 const struct ip_conntrack_stat *st = v;
246 250
247 if (v == SEQ_START_TOKEN) { 251 if (v == SEQ_START_TOKEN) {
248 seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n"); 252 seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
@@ -380,7 +384,7 @@ static ctl_table nf_ct_netfilter_table[] = {
380 { .ctl_name = 0 } 384 { .ctl_name = 0 }
381}; 385};
382 386
383struct ctl_path nf_ct_path[] = { 387static struct ctl_path nf_ct_path[] = {
384 { .procname = "net", .ctl_name = CTL_NET, }, 388 { .procname = "net", .ctl_name = CTL_NET, },
385 { } 389 { }
386}; 390};
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index e894aa1ff3ad..bd2e800f23cc 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -25,7 +25,7 @@ MODULE_ALIAS("ip_conntrack_tftp");
25 25
26#define MAX_PORTS 8 26#define MAX_PORTS 8
27static unsigned short ports[MAX_PORTS]; 27static unsigned short ports[MAX_PORTS];
28static int ports_c; 28static unsigned int ports_c;
29module_param_array(ports, ushort, &ports_c, 0400); 29module_param_array(ports, ushort, &ports_c, 0400);
30MODULE_PARM_DESC(ports, "Port numbers of TFTP servers"); 30MODULE_PARM_DESC(ports, "Port numbers of TFTP servers");
31 31
@@ -39,7 +39,8 @@ static int tftp_help(struct sk_buff *skb,
39 struct nf_conn *ct, 39 struct nf_conn *ct,
40 enum ip_conntrack_info ctinfo) 40 enum ip_conntrack_info ctinfo)
41{ 41{
42 struct tftphdr _tftph, *tfh; 42 const struct tftphdr *tfh;
43 struct tftphdr _tftph;
43 struct nf_conntrack_expect *exp; 44 struct nf_conntrack_expect *exp;
44 struct nf_conntrack_tuple *tuple; 45 struct nf_conntrack_tuple *tuple;
45 unsigned int ret = NF_ACCEPT; 46 unsigned int ret = NF_ACCEPT;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 4f5f2885fcac..cec9976aecbf 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -103,6 +103,7 @@ EXPORT_SYMBOL(nf_log_packet);
103 103
104#ifdef CONFIG_PROC_FS 104#ifdef CONFIG_PROC_FS
105static void *seq_start(struct seq_file *seq, loff_t *pos) 105static void *seq_start(struct seq_file *seq, loff_t *pos)
106 __acquires(RCU)
106{ 107{
107 rcu_read_lock(); 108 rcu_read_lock();
108 109
@@ -123,6 +124,7 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
123} 124}
124 125
125static void seq_stop(struct seq_file *s, void *v) 126static void seq_stop(struct seq_file *s, void *v)
127 __releases(RCU)
126{ 128{
127 rcu_read_unlock(); 129 rcu_read_unlock();
128} 130}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 5013cb97ce2b..7efa40d47393 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -467,7 +467,7 @@ __build_packet_message(struct nfulnl_instance *inst,
467 read_lock_bh(&skb->sk->sk_callback_lock); 467 read_lock_bh(&skb->sk->sk_callback_lock);
468 if (skb->sk->sk_socket && skb->sk->sk_socket->file) { 468 if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
469 __be32 uid = htonl(skb->sk->sk_socket->file->f_uid); 469 __be32 uid = htonl(skb->sk->sk_socket->file->f_uid);
470 __be32 gid = htons(skb->sk->sk_socket->file->f_gid); 470 __be32 gid = htonl(skb->sk->sk_socket->file->f_gid);
471 /* need to unlock here since NLA_PUT may goto */ 471 /* need to unlock here since NLA_PUT may goto */
472 read_unlock_bh(&skb->sk->sk_callback_lock); 472 read_unlock_bh(&skb->sk->sk_callback_lock);
473 NLA_PUT_BE32(inst->skb, NFULA_UID, uid); 473 NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
@@ -866,6 +866,7 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
866} 866}
867 867
868static void *seq_start(struct seq_file *seq, loff_t *pos) 868static void *seq_start(struct seq_file *seq, loff_t *pos)
869 __acquires(instances_lock)
869{ 870{
870 read_lock_bh(&instances_lock); 871 read_lock_bh(&instances_lock);
871 return get_idx(seq->private, *pos); 872 return get_idx(seq->private, *pos);
@@ -878,6 +879,7 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
878} 879}
879 880
880static void seq_stop(struct seq_file *s, void *v) 881static void seq_stop(struct seq_file *s, void *v)
882 __releases(instances_lock)
881{ 883{
882 read_unlock_bh(&instances_lock); 884 read_unlock_bh(&instances_lock);
883} 885}
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 51476f82bb54..a48b20fe9cd6 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -360,7 +360,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
360 360
361 if (data_len) { 361 if (data_len) {
362 struct nlattr *nla; 362 struct nlattr *nla;
363 int size = nla_attr_size(data_len); 363 int sz = nla_attr_size(data_len);
364 364
365 if (skb_tailroom(skb) < nla_total_size(data_len)) { 365 if (skb_tailroom(skb) < nla_total_size(data_len)) {
366 printk(KERN_WARNING "nf_queue: no tailroom!\n"); 366 printk(KERN_WARNING "nf_queue: no tailroom!\n");
@@ -369,7 +369,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
369 369
370 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); 370 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
371 nla->nla_type = NFQA_PAYLOAD; 371 nla->nla_type = NFQA_PAYLOAD;
372 nla->nla_len = size; 372 nla->nla_len = sz;
373 373
374 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) 374 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
375 BUG(); 375 BUG();
@@ -845,6 +845,7 @@ static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
845} 845}
846 846
847static void *seq_start(struct seq_file *seq, loff_t *pos) 847static void *seq_start(struct seq_file *seq, loff_t *pos)
848 __acquires(instances_lock)
848{ 849{
849 spin_lock(&instances_lock); 850 spin_lock(&instances_lock);
850 return get_idx(seq, *pos); 851 return get_idx(seq, *pos);
@@ -857,6 +858,7 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
857} 858}
858 859
859static void seq_stop(struct seq_file *s, void *v) 860static void seq_stop(struct seq_file *s, void *v)
861 __releases(instances_lock)
860{ 862{
861 spin_unlock(&instances_lock); 863 spin_unlock(&instances_lock);
862} 864}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8d4fca96a4a7..a6792089fcf9 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -44,7 +44,6 @@ struct xt_af {
44 struct mutex mutex; 44 struct mutex mutex;
45 struct list_head match; 45 struct list_head match;
46 struct list_head target; 46 struct list_head target;
47 struct list_head tables;
48#ifdef CONFIG_COMPAT 47#ifdef CONFIG_COMPAT
49 struct mutex compat_mutex; 48 struct mutex compat_mutex;
50 struct compat_delta *compat_offsets; 49 struct compat_delta *compat_offsets;
@@ -59,12 +58,6 @@ static struct xt_af *xt;
59#define duprintf(format, args...) 58#define duprintf(format, args...)
60#endif 59#endif
61 60
62enum {
63 TABLE,
64 TARGET,
65 MATCH,
66};
67
68static const char *xt_prefix[NPROTO] = { 61static const char *xt_prefix[NPROTO] = {
69 [AF_INET] = "ip", 62 [AF_INET] = "ip",
70 [AF_INET6] = "ip6", 63 [AF_INET6] = "ip6",
@@ -400,7 +393,7 @@ int xt_compat_match_offset(struct xt_match *match)
400EXPORT_SYMBOL_GPL(xt_compat_match_offset); 393EXPORT_SYMBOL_GPL(xt_compat_match_offset);
401 394
402int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, 395int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
403 int *size) 396 unsigned int *size)
404{ 397{
405 struct xt_match *match = m->u.kernel.match; 398 struct xt_match *match = m->u.kernel.match;
406 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; 399 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
@@ -427,7 +420,7 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
427EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 420EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
428 421
429int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, 422int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
430 int *size) 423 unsigned int *size)
431{ 424{
432 struct xt_match *match = m->u.kernel.match; 425 struct xt_match *match = m->u.kernel.match;
433 struct compat_xt_entry_match __user *cm = *dstptr; 426 struct compat_xt_entry_match __user *cm = *dstptr;
@@ -494,7 +487,7 @@ int xt_compat_target_offset(struct xt_target *target)
494EXPORT_SYMBOL_GPL(xt_compat_target_offset); 487EXPORT_SYMBOL_GPL(xt_compat_target_offset);
495 488
496void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, 489void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
497 int *size) 490 unsigned int *size)
498{ 491{
499 struct xt_target *target = t->u.kernel.target; 492 struct xt_target *target = t->u.kernel.target;
500 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; 493 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
@@ -520,7 +513,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
520EXPORT_SYMBOL_GPL(xt_compat_target_from_user); 513EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
521 514
522int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr, 515int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
523 int *size) 516 unsigned int *size)
524{ 517{
525 struct xt_target *target = t->u.kernel.target; 518 struct xt_target *target = t->u.kernel.target;
526 struct compat_xt_entry_target __user *ct = *dstptr; 519 struct compat_xt_entry_target __user *ct = *dstptr;
@@ -597,14 +590,14 @@ void xt_free_table_info(struct xt_table_info *info)
597EXPORT_SYMBOL(xt_free_table_info); 590EXPORT_SYMBOL(xt_free_table_info);
598 591
599/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ 592/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
600struct xt_table *xt_find_table_lock(int af, const char *name) 593struct xt_table *xt_find_table_lock(struct net *net, int af, const char *name)
601{ 594{
602 struct xt_table *t; 595 struct xt_table *t;
603 596
604 if (mutex_lock_interruptible(&xt[af].mutex) != 0) 597 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
605 return ERR_PTR(-EINTR); 598 return ERR_PTR(-EINTR);
606 599
607 list_for_each_entry(t, &xt[af].tables, list) 600 list_for_each_entry(t, &net->xt.tables[af], list)
608 if (strcmp(t->name, name) == 0 && try_module_get(t->me)) 601 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
609 return t; 602 return t;
610 mutex_unlock(&xt[af].mutex); 603 mutex_unlock(&xt[af].mutex);
@@ -660,20 +653,27 @@ xt_replace_table(struct xt_table *table,
660} 653}
661EXPORT_SYMBOL_GPL(xt_replace_table); 654EXPORT_SYMBOL_GPL(xt_replace_table);
662 655
663int xt_register_table(struct xt_table *table, 656struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
664 struct xt_table_info *bootstrap, 657 struct xt_table_info *bootstrap,
665 struct xt_table_info *newinfo) 658 struct xt_table_info *newinfo)
666{ 659{
667 int ret; 660 int ret;
668 struct xt_table_info *private; 661 struct xt_table_info *private;
669 struct xt_table *t; 662 struct xt_table *t;
670 663
664 /* Don't add one object to multiple lists. */
665 table = kmemdup(table, sizeof(struct xt_table), GFP_KERNEL);
666 if (!table) {
667 ret = -ENOMEM;
668 goto out;
669 }
670
671 ret = mutex_lock_interruptible(&xt[table->af].mutex); 671 ret = mutex_lock_interruptible(&xt[table->af].mutex);
672 if (ret != 0) 672 if (ret != 0)
673 return ret; 673 goto out_free;
674 674
675 /* Don't autoload: we'd eat our tail... */ 675 /* Don't autoload: we'd eat our tail... */
676 list_for_each_entry(t, &xt[table->af].tables, list) { 676 list_for_each_entry(t, &net->xt.tables[table->af], list) {
677 if (strcmp(t->name, table->name) == 0) { 677 if (strcmp(t->name, table->name) == 0) {
678 ret = -EEXIST; 678 ret = -EEXIST;
679 goto unlock; 679 goto unlock;
@@ -692,12 +692,16 @@ int xt_register_table(struct xt_table *table,
692 /* save number of initial entries */ 692 /* save number of initial entries */
693 private->initial_entries = private->number; 693 private->initial_entries = private->number;
694 694
695 list_add(&table->list, &xt[table->af].tables); 695 list_add(&table->list, &net->xt.tables[table->af]);
696 mutex_unlock(&xt[table->af].mutex);
697 return table;
696 698
697 ret = 0;
698 unlock: 699 unlock:
699 mutex_unlock(&xt[table->af].mutex); 700 mutex_unlock(&xt[table->af].mutex);
700 return ret; 701out_free:
702 kfree(table);
703out:
704 return ERR_PTR(ret);
701} 705}
702EXPORT_SYMBOL_GPL(xt_register_table); 706EXPORT_SYMBOL_GPL(xt_register_table);
703 707
@@ -709,130 +713,204 @@ void *xt_unregister_table(struct xt_table *table)
709 private = table->private; 713 private = table->private;
710 list_del(&table->list); 714 list_del(&table->list);
711 mutex_unlock(&xt[table->af].mutex); 715 mutex_unlock(&xt[table->af].mutex);
716 kfree(table);
712 717
713 return private; 718 return private;
714} 719}
715EXPORT_SYMBOL_GPL(xt_unregister_table); 720EXPORT_SYMBOL_GPL(xt_unregister_table);
716 721
717#ifdef CONFIG_PROC_FS 722#ifdef CONFIG_PROC_FS
718static struct list_head *xt_get_idx(struct list_head *list, struct seq_file *seq, loff_t pos) 723struct xt_names_priv {
724 struct seq_net_private p;
725 int af;
726};
727static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
719{ 728{
720 struct list_head *head = list->next; 729 struct xt_names_priv *priv = seq->private;
730 struct net *net = priv->p.net;
731 int af = priv->af;
721 732
722 if (!head || list_empty(list)) 733 mutex_lock(&xt[af].mutex);
723 return NULL; 734 return seq_list_start(&net->xt.tables[af], *pos);
735}
724 736
725 while (pos && (head = head->next)) { 737static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
726 if (head == list) 738{
727 return NULL; 739 struct xt_names_priv *priv = seq->private;
728 pos--; 740 struct net *net = priv->p.net;
729 } 741 int af = priv->af;
730 return pos ? NULL : head;
731}
732
733static struct list_head *type2list(u_int16_t af, u_int16_t type)
734{
735 struct list_head *list;
736
737 switch (type) {
738 case TARGET:
739 list = &xt[af].target;
740 break;
741 case MATCH:
742 list = &xt[af].match;
743 break;
744 case TABLE:
745 list = &xt[af].tables;
746 break;
747 default:
748 list = NULL;
749 break;
750 }
751 742
752 return list; 743 return seq_list_next(v, &net->xt.tables[af], pos);
753} 744}
754 745
755static void *xt_tgt_seq_start(struct seq_file *seq, loff_t *pos) 746static void xt_table_seq_stop(struct seq_file *seq, void *v)
756{ 747{
757 struct proc_dir_entry *pde = (struct proc_dir_entry *) seq->private; 748 struct xt_names_priv *priv = seq->private;
758 u_int16_t af = (unsigned long)pde->data & 0xffff; 749 int af = priv->af;
759 u_int16_t type = (unsigned long)pde->data >> 16;
760 struct list_head *list;
761 750
762 if (af >= NPROTO) 751 mutex_unlock(&xt[af].mutex);
763 return NULL; 752}
764 753
765 list = type2list(af, type); 754static int xt_table_seq_show(struct seq_file *seq, void *v)
766 if (!list) 755{
767 return NULL; 756 struct xt_table *table = list_entry(v, struct xt_table, list);
768 757
769 if (mutex_lock_interruptible(&xt[af].mutex) != 0) 758 if (strlen(table->name))
770 return NULL; 759 return seq_printf(seq, "%s\n", table->name);
760 else
761 return 0;
762}
763
764static const struct seq_operations xt_table_seq_ops = {
765 .start = xt_table_seq_start,
766 .next = xt_table_seq_next,
767 .stop = xt_table_seq_stop,
768 .show = xt_table_seq_show,
769};
771 770
772 return xt_get_idx(list, seq, *pos); 771static int xt_table_open(struct inode *inode, struct file *file)
772{
773 int ret;
774 struct xt_names_priv *priv;
775
776 ret = seq_open_net(inode, file, &xt_table_seq_ops,
777 sizeof(struct xt_names_priv));
778 if (!ret) {
779 priv = ((struct seq_file *)file->private_data)->private;
780 priv->af = (unsigned long)PDE(inode)->data;
781 }
782 return ret;
773} 783}
774 784
775static void *xt_tgt_seq_next(struct seq_file *seq, void *v, loff_t *pos) 785static const struct file_operations xt_table_ops = {
786 .owner = THIS_MODULE,
787 .open = xt_table_open,
788 .read = seq_read,
789 .llseek = seq_lseek,
790 .release = seq_release,
791};
792
793static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
776{ 794{
777 struct proc_dir_entry *pde = seq->private; 795 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private;
778 u_int16_t af = (unsigned long)pde->data & 0xffff; 796 u_int16_t af = (unsigned long)pde->data;
779 u_int16_t type = (unsigned long)pde->data >> 16;
780 struct list_head *list;
781 797
782 if (af >= NPROTO) 798 mutex_lock(&xt[af].mutex);
783 return NULL; 799 return seq_list_start(&xt[af].match, *pos);
800}
784 801
785 list = type2list(af, type); 802static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *pos)
786 if (!list) 803{
787 return NULL; 804 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private;
805 u_int16_t af = (unsigned long)pde->data;
788 806
789 (*pos)++; 807 return seq_list_next(v, &xt[af].match, pos);
790 return xt_get_idx(list, seq, *pos);
791} 808}
792 809
793static void xt_tgt_seq_stop(struct seq_file *seq, void *v) 810static void xt_match_seq_stop(struct seq_file *seq, void *v)
794{ 811{
795 struct proc_dir_entry *pde = seq->private; 812 struct proc_dir_entry *pde = seq->private;
796 u_int16_t af = (unsigned long)pde->data & 0xffff; 813 u_int16_t af = (unsigned long)pde->data;
797 814
798 mutex_unlock(&xt[af].mutex); 815 mutex_unlock(&xt[af].mutex);
799} 816}
800 817
801static int xt_name_seq_show(struct seq_file *seq, void *v) 818static int xt_match_seq_show(struct seq_file *seq, void *v)
802{ 819{
803 char *name = (char *)v + sizeof(struct list_head); 820 struct xt_match *match = list_entry(v, struct xt_match, list);
804 821
805 if (strlen(name)) 822 if (strlen(match->name))
806 return seq_printf(seq, "%s\n", name); 823 return seq_printf(seq, "%s\n", match->name);
807 else 824 else
808 return 0; 825 return 0;
809} 826}
810 827
811static const struct seq_operations xt_tgt_seq_ops = { 828static const struct seq_operations xt_match_seq_ops = {
812 .start = xt_tgt_seq_start, 829 .start = xt_match_seq_start,
813 .next = xt_tgt_seq_next, 830 .next = xt_match_seq_next,
814 .stop = xt_tgt_seq_stop, 831 .stop = xt_match_seq_stop,
815 .show = xt_name_seq_show, 832 .show = xt_match_seq_show,
816}; 833};
817 834
818static int xt_tgt_open(struct inode *inode, struct file *file) 835static int xt_match_open(struct inode *inode, struct file *file)
819{ 836{
820 int ret; 837 int ret;
821 838
822 ret = seq_open(file, &xt_tgt_seq_ops); 839 ret = seq_open(file, &xt_match_seq_ops);
823 if (!ret) { 840 if (!ret) {
824 struct seq_file *seq = file->private_data; 841 struct seq_file *seq = file->private_data;
825 struct proc_dir_entry *pde = PDE(inode);
826 842
827 seq->private = pde; 843 seq->private = PDE(inode);
828 } 844 }
845 return ret;
846}
847
848static const struct file_operations xt_match_ops = {
849 .owner = THIS_MODULE,
850 .open = xt_match_open,
851 .read = seq_read,
852 .llseek = seq_lseek,
853 .release = seq_release,
854};
855
856static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
857{
858 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private;
859 u_int16_t af = (unsigned long)pde->data;
860
861 mutex_lock(&xt[af].mutex);
862 return seq_list_start(&xt[af].target, *pos);
863}
864
865static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *pos)
866{
867 struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private;
868 u_int16_t af = (unsigned long)pde->data;
869
870 return seq_list_next(v, &xt[af].target, pos);
871}
872
873static void xt_target_seq_stop(struct seq_file *seq, void *v)
874{
875 struct proc_dir_entry *pde = seq->private;
876 u_int16_t af = (unsigned long)pde->data;
877
878 mutex_unlock(&xt[af].mutex);
879}
829 880
881static int xt_target_seq_show(struct seq_file *seq, void *v)
882{
883 struct xt_target *target = list_entry(v, struct xt_target, list);
884
885 if (strlen(target->name))
886 return seq_printf(seq, "%s\n", target->name);
887 else
888 return 0;
889}
890
891static const struct seq_operations xt_target_seq_ops = {
892 .start = xt_target_seq_start,
893 .next = xt_target_seq_next,
894 .stop = xt_target_seq_stop,
895 .show = xt_target_seq_show,
896};
897
898static int xt_target_open(struct inode *inode, struct file *file)
899{
900 int ret;
901
902 ret = seq_open(file, &xt_target_seq_ops);
903 if (!ret) {
904 struct seq_file *seq = file->private_data;
905
906 seq->private = PDE(inode);
907 }
830 return ret; 908 return ret;
831} 909}
832 910
833static const struct file_operations xt_file_ops = { 911static const struct file_operations xt_target_ops = {
834 .owner = THIS_MODULE, 912 .owner = THIS_MODULE,
835 .open = xt_tgt_open, 913 .open = xt_target_open,
836 .read = seq_read, 914 .read = seq_read,
837 .llseek = seq_lseek, 915 .llseek = seq_lseek,
838 .release = seq_release, 916 .release = seq_release,
@@ -844,7 +922,7 @@ static const struct file_operations xt_file_ops = {
844 922
845#endif /* CONFIG_PROC_FS */ 923#endif /* CONFIG_PROC_FS */
846 924
847int xt_proto_init(int af) 925int xt_proto_init(struct net *net, int af)
848{ 926{
849#ifdef CONFIG_PROC_FS 927#ifdef CONFIG_PROC_FS
850 char buf[XT_FUNCTION_MAXNAMELEN]; 928 char buf[XT_FUNCTION_MAXNAMELEN];
@@ -858,25 +936,25 @@ int xt_proto_init(int af)
858#ifdef CONFIG_PROC_FS 936#ifdef CONFIG_PROC_FS
859 strlcpy(buf, xt_prefix[af], sizeof(buf)); 937 strlcpy(buf, xt_prefix[af], sizeof(buf));
860 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 938 strlcat(buf, FORMAT_TABLES, sizeof(buf));
861 proc = proc_net_fops_create(&init_net, buf, 0440, &xt_file_ops); 939 proc = proc_net_fops_create(net, buf, 0440, &xt_table_ops);
862 if (!proc) 940 if (!proc)
863 goto out; 941 goto out;
864 proc->data = (void *) ((unsigned long) af | (TABLE << 16)); 942 proc->data = (void *)(unsigned long)af;
865 943
866 944
867 strlcpy(buf, xt_prefix[af], sizeof(buf)); 945 strlcpy(buf, xt_prefix[af], sizeof(buf));
868 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 946 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
869 proc = proc_net_fops_create(&init_net, buf, 0440, &xt_file_ops); 947 proc = proc_net_fops_create(net, buf, 0440, &xt_match_ops);
870 if (!proc) 948 if (!proc)
871 goto out_remove_tables; 949 goto out_remove_tables;
872 proc->data = (void *) ((unsigned long) af | (MATCH << 16)); 950 proc->data = (void *)(unsigned long)af;
873 951
874 strlcpy(buf, xt_prefix[af], sizeof(buf)); 952 strlcpy(buf, xt_prefix[af], sizeof(buf));
875 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 953 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
876 proc = proc_net_fops_create(&init_net, buf, 0440, &xt_file_ops); 954 proc = proc_net_fops_create(net, buf, 0440, &xt_target_ops);
877 if (!proc) 955 if (!proc)
878 goto out_remove_matches; 956 goto out_remove_matches;
879 proc->data = (void *) ((unsigned long) af | (TARGET << 16)); 957 proc->data = (void *)(unsigned long)af;
880#endif 958#endif
881 959
882 return 0; 960 return 0;
@@ -885,42 +963,54 @@ int xt_proto_init(int af)
885out_remove_matches: 963out_remove_matches:
886 strlcpy(buf, xt_prefix[af], sizeof(buf)); 964 strlcpy(buf, xt_prefix[af], sizeof(buf));
887 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 965 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
888 proc_net_remove(&init_net, buf); 966 proc_net_remove(net, buf);
889 967
890out_remove_tables: 968out_remove_tables:
891 strlcpy(buf, xt_prefix[af], sizeof(buf)); 969 strlcpy(buf, xt_prefix[af], sizeof(buf));
892 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 970 strlcat(buf, FORMAT_TABLES, sizeof(buf));
893 proc_net_remove(&init_net, buf); 971 proc_net_remove(net, buf);
894out: 972out:
895 return -1; 973 return -1;
896#endif 974#endif
897} 975}
898EXPORT_SYMBOL_GPL(xt_proto_init); 976EXPORT_SYMBOL_GPL(xt_proto_init);
899 977
900void xt_proto_fini(int af) 978void xt_proto_fini(struct net *net, int af)
901{ 979{
902#ifdef CONFIG_PROC_FS 980#ifdef CONFIG_PROC_FS
903 char buf[XT_FUNCTION_MAXNAMELEN]; 981 char buf[XT_FUNCTION_MAXNAMELEN];
904 982
905 strlcpy(buf, xt_prefix[af], sizeof(buf)); 983 strlcpy(buf, xt_prefix[af], sizeof(buf));
906 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 984 strlcat(buf, FORMAT_TABLES, sizeof(buf));
907 proc_net_remove(&init_net, buf); 985 proc_net_remove(net, buf);
908 986
909 strlcpy(buf, xt_prefix[af], sizeof(buf)); 987 strlcpy(buf, xt_prefix[af], sizeof(buf));
910 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 988 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
911 proc_net_remove(&init_net, buf); 989 proc_net_remove(net, buf);
912 990
913 strlcpy(buf, xt_prefix[af], sizeof(buf)); 991 strlcpy(buf, xt_prefix[af], sizeof(buf));
914 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 992 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
915 proc_net_remove(&init_net, buf); 993 proc_net_remove(net, buf);
916#endif /*CONFIG_PROC_FS*/ 994#endif /*CONFIG_PROC_FS*/
917} 995}
918EXPORT_SYMBOL_GPL(xt_proto_fini); 996EXPORT_SYMBOL_GPL(xt_proto_fini);
919 997
998static int __net_init xt_net_init(struct net *net)
999{
1000 int i;
1001
1002 for (i = 0; i < NPROTO; i++)
1003 INIT_LIST_HEAD(&net->xt.tables[i]);
1004 return 0;
1005}
1006
1007static struct pernet_operations xt_net_ops = {
1008 .init = xt_net_init,
1009};
920 1010
921static int __init xt_init(void) 1011static int __init xt_init(void)
922{ 1012{
923 int i; 1013 int i, rv;
924 1014
925 xt = kmalloc(sizeof(struct xt_af) * NPROTO, GFP_KERNEL); 1015 xt = kmalloc(sizeof(struct xt_af) * NPROTO, GFP_KERNEL);
926 if (!xt) 1016 if (!xt)
@@ -934,13 +1024,16 @@ static int __init xt_init(void)
934#endif 1024#endif
935 INIT_LIST_HEAD(&xt[i].target); 1025 INIT_LIST_HEAD(&xt[i].target);
936 INIT_LIST_HEAD(&xt[i].match); 1026 INIT_LIST_HEAD(&xt[i].match);
937 INIT_LIST_HEAD(&xt[i].tables);
938 } 1027 }
939 return 0; 1028 rv = register_pernet_subsys(&xt_net_ops);
1029 if (rv < 0)
1030 kfree(xt);
1031 return rv;
940} 1032}
941 1033
942static void __exit xt_fini(void) 1034static void __exit xt_fini(void)
943{ 1035{
1036 unregister_pernet_subsys(&xt_net_ops);
944 kfree(xt); 1037 kfree(xt);
945} 1038}
946 1039
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 60e3767cc71d..217e2b686322 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -13,7 +13,10 @@
13#include <linux/ip.h> 13#include <linux/ip.h>
14#include <linux/ipv6.h> 14#include <linux/ipv6.h>
15#include <linux/tcp.h> 15#include <linux/tcp.h>
16#include <net/dst.h>
17#include <net/flow.h>
16#include <net/ipv6.h> 18#include <net/ipv6.h>
19#include <net/route.h>
17#include <net/tcp.h> 20#include <net/tcp.h>
18 21
19#include <linux/netfilter_ipv4/ip_tables.h> 22#include <linux/netfilter_ipv4/ip_tables.h>
@@ -41,6 +44,7 @@ optlen(const u_int8_t *opt, unsigned int offset)
41static int 44static int
42tcpmss_mangle_packet(struct sk_buff *skb, 45tcpmss_mangle_packet(struct sk_buff *skb,
43 const struct xt_tcpmss_info *info, 46 const struct xt_tcpmss_info *info,
47 unsigned int in_mtu,
44 unsigned int tcphoff, 48 unsigned int tcphoff,
45 unsigned int minlen) 49 unsigned int minlen)
46{ 50{
@@ -76,7 +80,13 @@ tcpmss_mangle_packet(struct sk_buff *skb,
76 dst_mtu(skb->dst)); 80 dst_mtu(skb->dst));
77 return -1; 81 return -1;
78 } 82 }
79 newmss = dst_mtu(skb->dst) - minlen; 83 if (in_mtu <= minlen) {
84 if (net_ratelimit())
85 printk(KERN_ERR "xt_TCPMSS: unknown or "
86 "invalid path-MTU (%u)\n", in_mtu);
87 return -1;
88 }
89 newmss = min(dst_mtu(skb->dst), in_mtu) - minlen;
80 } else 90 } else
81 newmss = info->mss; 91 newmss = info->mss;
82 92
@@ -137,6 +147,28 @@ tcpmss_mangle_packet(struct sk_buff *skb,
137 return TCPOLEN_MSS; 147 return TCPOLEN_MSS;
138} 148}
139 149
150static u_int32_t tcpmss_reverse_mtu4(const struct iphdr *iph)
151{
152 struct flowi fl = {
153 .fl4_dst = iph->saddr,
154 };
155 const struct nf_afinfo *ai;
156 struct rtable *rt = NULL;
157 u_int32_t mtu = ~0U;
158
159 rcu_read_lock();
160 ai = nf_get_afinfo(AF_INET);
161 if (ai != NULL)
162 ai->route((struct dst_entry **)&rt, &fl);
163 rcu_read_unlock();
164
165 if (rt != NULL) {
166 mtu = dst_mtu(&rt->u.dst);
167 dst_release(&rt->u.dst);
168 }
169 return mtu;
170}
171
140static unsigned int 172static unsigned int
141tcpmss_tg4(struct sk_buff *skb, const struct net_device *in, 173tcpmss_tg4(struct sk_buff *skb, const struct net_device *in,
142 const struct net_device *out, unsigned int hooknum, 174 const struct net_device *out, unsigned int hooknum,
@@ -146,7 +178,8 @@ tcpmss_tg4(struct sk_buff *skb, const struct net_device *in,
146 __be16 newlen; 178 __be16 newlen;
147 int ret; 179 int ret;
148 180
149 ret = tcpmss_mangle_packet(skb, targinfo, iph->ihl * 4, 181 ret = tcpmss_mangle_packet(skb, targinfo, tcpmss_reverse_mtu4(iph),
182 iph->ihl * 4,
150 sizeof(*iph) + sizeof(struct tcphdr)); 183 sizeof(*iph) + sizeof(struct tcphdr));
151 if (ret < 0) 184 if (ret < 0)
152 return NF_DROP; 185 return NF_DROP;
@@ -160,6 +193,28 @@ tcpmss_tg4(struct sk_buff *skb, const struct net_device *in,
160} 193}
161 194
162#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) 195#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
196static u_int32_t tcpmss_reverse_mtu6(const struct ipv6hdr *iph)
197{
198 struct flowi fl = {
199 .fl6_dst = iph->saddr,
200 };
201 const struct nf_afinfo *ai;
202 struct rtable *rt = NULL;
203 u_int32_t mtu = ~0U;
204
205 rcu_read_lock();
206 ai = nf_get_afinfo(AF_INET6);
207 if (ai != NULL)
208 ai->route((struct dst_entry **)&rt, &fl);
209 rcu_read_unlock();
210
211 if (rt != NULL) {
212 mtu = dst_mtu(&rt->u.dst);
213 dst_release(&rt->u.dst);
214 }
215 return mtu;
216}
217
163static unsigned int 218static unsigned int
164tcpmss_tg6(struct sk_buff *skb, const struct net_device *in, 219tcpmss_tg6(struct sk_buff *skb, const struct net_device *in,
165 const struct net_device *out, unsigned int hooknum, 220 const struct net_device *out, unsigned int hooknum,
@@ -174,7 +229,8 @@ tcpmss_tg6(struct sk_buff *skb, const struct net_device *in,
174 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr); 229 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
175 if (tcphoff < 0) 230 if (tcphoff < 0)
176 return NF_DROP; 231 return NF_DROP;
177 ret = tcpmss_mangle_packet(skb, targinfo, tcphoff, 232 ret = tcpmss_mangle_packet(skb, targinfo, tcpmss_reverse_mtu6(ipv6h),
233 tcphoff,
178 sizeof(*ipv6h) + sizeof(struct tcphdr)); 234 sizeof(*ipv6h) + sizeof(struct tcphdr));
179 if (ret < 0) 235 if (ret < 0)
180 return NF_DROP; 236 return NF_DROP;
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index e00ecd974fa3..3b0111933f60 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -120,11 +120,11 @@ static int count_them(struct xt_connlimit_data *data,
120 else 120 else
121 hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)]; 121 hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)];
122 122
123 read_lock_bh(&nf_conntrack_lock); 123 rcu_read_lock();
124 124
125 /* check the saved connections */ 125 /* check the saved connections */
126 list_for_each_entry_safe(conn, tmp, hash, list) { 126 list_for_each_entry_safe(conn, tmp, hash, list) {
127 found = __nf_conntrack_find(&conn->tuple, NULL); 127 found = __nf_conntrack_find(&conn->tuple);
128 found_ct = NULL; 128 found_ct = NULL;
129 129
130 if (found != NULL) 130 if (found != NULL)
@@ -163,7 +163,7 @@ static int count_them(struct xt_connlimit_data *data,
163 ++matches; 163 ++matches;
164 } 164 }
165 165
166 read_unlock_bh(&nf_conntrack_lock); 166 rcu_read_unlock();
167 167
168 if (addit) { 168 if (addit) {
169 /* save the new connection in our list */ 169 /* save the new connection in our list */
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index e92190eafcc5..85330856a29c 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -4,7 +4,6 @@
4 * 4 *
5 * (C) 2001 Marc Boucher (marc@mbsi.ca). 5 * (C) 2001 Marc Boucher (marc@mbsi.ca).
6 * Copyright © CC Computer Consultants GmbH, 2007 - 2008 6 * Copyright © CC Computer Consultants GmbH, 2007 - 2008
7 * Jan Engelhardt <jengelh@computergmbh.de>
8 * 7 *
9 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +19,7 @@
20 19
21MODULE_LICENSE("GPL"); 20MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); 21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
22MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
23MODULE_DESCRIPTION("Xtables: connection tracking state match"); 23MODULE_DESCRIPTION("Xtables: connection tracking state match");
24MODULE_ALIAS("ipt_conntrack"); 24MODULE_ALIAS("ipt_conntrack");
25MODULE_ALIAS("ip6t_conntrack"); 25MODULE_ALIAS("ip6t_conntrack");
@@ -166,6 +166,44 @@ conntrack_mt_repldst(const struct nf_conn *ct,
166 &info->repldst_addr, &info->repldst_mask, family); 166 &info->repldst_addr, &info->repldst_mask, family);
167} 167}
168 168
169static inline bool
170ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
171 const struct nf_conn *ct)
172{
173 const struct nf_conntrack_tuple *tuple;
174
175 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
176 if ((info->match_flags & XT_CONNTRACK_PROTO) &&
177 (tuple->dst.protonum == info->l4proto) ^
178 !(info->invert_flags & XT_CONNTRACK_PROTO))
179 return false;
180
181 /* Shortcut to match all recognized protocols by using ->src.all. */
182 if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
183 (tuple->src.u.all == info->origsrc_port) ^
184 !(info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
185 return false;
186
187 if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
188 (tuple->dst.u.all == info->origdst_port) ^
189 !(info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
190 return false;
191
192 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
193
194 if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
195 (tuple->src.u.all == info->replsrc_port) ^
196 !(info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
197 return false;
198
199 if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
200 (tuple->dst.u.all == info->repldst_port) ^
201 !(info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
202 return false;
203
204 return true;
205}
206
169static bool 207static bool
170conntrack_mt(const struct sk_buff *skb, const struct net_device *in, 208conntrack_mt(const struct sk_buff *skb, const struct net_device *in,
171 const struct net_device *out, const struct xt_match *match, 209 const struct net_device *out, const struct xt_match *match,
@@ -200,10 +238,9 @@ conntrack_mt(const struct sk_buff *skb, const struct net_device *in,
200 238
201 if (ct == NULL) 239 if (ct == NULL)
202 return info->match_flags & XT_CONNTRACK_STATE; 240 return info->match_flags & XT_CONNTRACK_STATE;
203 241 if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
204 if ((info->match_flags & XT_CONNTRACK_PROTO) && 242 (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
205 ((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == 243 !!(info->invert_flags & XT_CONNTRACK_DIRECTION))
206 info->l4proto) ^ !(info->invert_flags & XT_CONNTRACK_PROTO)))
207 return false; 244 return false;
208 245
209 if (info->match_flags & XT_CONNTRACK_ORIGSRC) 246 if (info->match_flags & XT_CONNTRACK_ORIGSRC)
@@ -226,6 +263,9 @@ conntrack_mt(const struct sk_buff *skb, const struct net_device *in,
226 !(info->invert_flags & XT_CONNTRACK_REPLDST)) 263 !(info->invert_flags & XT_CONNTRACK_REPLDST))
227 return false; 264 return false;
228 265
266 if (!ct_proto_port_check(info, ct))
267 return false;
268
229 if ((info->match_flags & XT_CONNTRACK_STATUS) && 269 if ((info->match_flags & XT_CONNTRACK_STATUS) &&
230 (!!(info->status_mask & ct->status) ^ 270 (!!(info->status_mask & ct->status) ^
231 !(info->invert_flags & XT_CONNTRACK_STATUS))) 271 !(info->invert_flags & XT_CONNTRACK_STATUS)))
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index d479ca980115..744c7f2ab0b1 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -1,9 +1,9 @@
1/* iptables match extension to limit the number of packets per second 1/*
2 * seperately for each hashbucket (sourceip/sourceport/dstip/dstport) 2 * xt_hashlimit - Netfilter module to limit the number of packets per time
3 * seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
3 * 4 *
4 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> 5 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
5 * 6 * Copyright © CC Computer Consultants GmbH, 2007 - 2008
6 * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $
7 * 7 *
8 * Development of this code was funded by Astaro AG, http://www.astaro.com/ 8 * Development of this code was funded by Astaro AG, http://www.astaro.com/
9 */ 9 */
@@ -35,6 +35,7 @@
35 35
36MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 37MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
38MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
38MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); 39MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
39MODULE_ALIAS("ipt_hashlimit"); 40MODULE_ALIAS("ipt_hashlimit");
40MODULE_ALIAS("ip6t_hashlimit"); 41MODULE_ALIAS("ip6t_hashlimit");
@@ -57,7 +58,7 @@ struct dsthash_dst {
57 __be32 dst[4]; 58 __be32 dst[4];
58 } ip6; 59 } ip6;
59#endif 60#endif
60 } addr; 61 };
61 __be16 src_port; 62 __be16 src_port;
62 __be16 dst_port; 63 __be16 dst_port;
63}; 64};
@@ -81,7 +82,7 @@ struct xt_hashlimit_htable {
81 atomic_t use; 82 atomic_t use;
82 int family; 83 int family;
83 84
84 struct hashlimit_cfg cfg; /* config */ 85 struct hashlimit_cfg1 cfg; /* config */
85 86
86 /* used internally */ 87 /* used internally */
87 spinlock_t lock; /* lock for list_head */ 88 spinlock_t lock; /* lock for list_head */
@@ -184,7 +185,7 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
184} 185}
185static void htable_gc(unsigned long htlong); 186static void htable_gc(unsigned long htlong);
186 187
187static int htable_create(struct xt_hashlimit_info *minfo, int family) 188static int htable_create_v0(struct xt_hashlimit_info *minfo, int family)
188{ 189{
189 struct xt_hashlimit_htable *hinfo; 190 struct xt_hashlimit_htable *hinfo;
190 unsigned int size; 191 unsigned int size;
@@ -210,7 +211,18 @@ static int htable_create(struct xt_hashlimit_info *minfo, int family)
210 minfo->hinfo = hinfo; 211 minfo->hinfo = hinfo;
211 212
212 /* copy match config into hashtable config */ 213 /* copy match config into hashtable config */
213 memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg)); 214 hinfo->cfg.mode = minfo->cfg.mode;
215 hinfo->cfg.avg = minfo->cfg.avg;
216 hinfo->cfg.burst = minfo->cfg.burst;
217 hinfo->cfg.max = minfo->cfg.max;
218 hinfo->cfg.gc_interval = minfo->cfg.gc_interval;
219 hinfo->cfg.expire = minfo->cfg.expire;
220
221 if (family == AF_INET)
222 hinfo->cfg.srcmask = hinfo->cfg.dstmask = 32;
223 else
224 hinfo->cfg.srcmask = hinfo->cfg.dstmask = 128;
225
214 hinfo->cfg.size = size; 226 hinfo->cfg.size = size;
215 if (!hinfo->cfg.max) 227 if (!hinfo->cfg.max)
216 hinfo->cfg.max = 8 * hinfo->cfg.size; 228 hinfo->cfg.max = 8 * hinfo->cfg.size;
@@ -246,6 +258,70 @@ static int htable_create(struct xt_hashlimit_info *minfo, int family)
246 return 0; 258 return 0;
247} 259}
248 260
261static int htable_create(struct xt_hashlimit_mtinfo1 *minfo,
262 unsigned int family)
263{
264 struct xt_hashlimit_htable *hinfo;
265 unsigned int size;
266 unsigned int i;
267
268 if (minfo->cfg.size) {
269 size = minfo->cfg.size;
270 } else {
271 size = (num_physpages << PAGE_SHIFT) / 16384 /
272 sizeof(struct list_head);
273 if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE)
274 size = 8192;
275 if (size < 16)
276 size = 16;
277 }
278 /* FIXME: don't use vmalloc() here or anywhere else -HW */
279 hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
280 sizeof(struct list_head) * size);
281 if (hinfo == NULL) {
282 printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
283 return -1;
284 }
285 minfo->hinfo = hinfo;
286
287 /* copy match config into hashtable config */
288 memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
289 hinfo->cfg.size = size;
290 if (hinfo->cfg.max == 0)
291 hinfo->cfg.max = 8 * hinfo->cfg.size;
292 else if (hinfo->cfg.max < hinfo->cfg.size)
293 hinfo->cfg.max = hinfo->cfg.size;
294
295 for (i = 0; i < hinfo->cfg.size; i++)
296 INIT_HLIST_HEAD(&hinfo->hash[i]);
297
298 atomic_set(&hinfo->use, 1);
299 hinfo->count = 0;
300 hinfo->family = family;
301 hinfo->rnd_initialized = 0;
302 spin_lock_init(&hinfo->lock);
303
304 hinfo->pde = create_proc_entry(minfo->name, 0,
305 family == AF_INET ? hashlimit_procdir4 :
306 hashlimit_procdir6);
307 if (hinfo->pde == NULL) {
308 vfree(hinfo);
309 return -1;
310 }
311 hinfo->pde->proc_fops = &dl_file_ops;
312 hinfo->pde->data = hinfo;
313
314 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
315 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
316 add_timer(&hinfo->timer);
317
318 spin_lock_bh(&hashlimit_lock);
319 hlist_add_head(&hinfo->node, &hashlimit_htables);
320 spin_unlock_bh(&hashlimit_lock);
321
322 return 0;
323}
324
249static bool select_all(const struct xt_hashlimit_htable *ht, 325static bool select_all(const struct xt_hashlimit_htable *ht,
250 const struct dsthash_ent *he) 326 const struct dsthash_ent *he)
251{ 327{
@@ -388,6 +464,48 @@ static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
388 dh->rateinfo.prev = now; 464 dh->rateinfo.prev = now;
389} 465}
390 466
467static inline __be32 maskl(__be32 a, unsigned int l)
468{
469 return htonl(ntohl(a) & ~(~(u_int32_t)0 >> l));
470}
471
472#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
473static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
474{
475 switch (p) {
476 case 0:
477 i[0] = i[1] = 0;
478 i[2] = i[3] = 0;
479 break;
480 case 1 ... 31:
481 i[0] = maskl(i[0], p);
482 i[1] = i[2] = i[3] = 0;
483 break;
484 case 32:
485 i[1] = i[2] = i[3] = 0;
486 break;
487 case 33 ... 63:
488 i[1] = maskl(i[1], p - 32);
489 i[2] = i[3] = 0;
490 break;
491 case 64:
492 i[2] = i[3] = 0;
493 break;
494 case 65 ... 95:
495 i[2] = maskl(i[2], p - 64);
496 i[3] = 0;
497 case 96:
498 i[3] = 0;
499 break;
500 case 97 ... 127:
501 i[3] = maskl(i[3], p - 96);
502 break;
503 case 128:
504 break;
505 }
506}
507#endif
508
391static int 509static int
392hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, 510hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
393 struct dsthash_dst *dst, 511 struct dsthash_dst *dst,
@@ -401,9 +519,11 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
401 switch (hinfo->family) { 519 switch (hinfo->family) {
402 case AF_INET: 520 case AF_INET:
403 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) 521 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
404 dst->addr.ip.dst = ip_hdr(skb)->daddr; 522 dst->ip.dst = maskl(ip_hdr(skb)->daddr,
523 hinfo->cfg.dstmask);
405 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) 524 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
406 dst->addr.ip.src = ip_hdr(skb)->saddr; 525 dst->ip.src = maskl(ip_hdr(skb)->saddr,
526 hinfo->cfg.srcmask);
407 527
408 if (!(hinfo->cfg.mode & 528 if (!(hinfo->cfg.mode &
409 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) 529 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
@@ -412,12 +532,16 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
412 break; 532 break;
413#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) 533#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
414 case AF_INET6: 534 case AF_INET6:
415 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) 535 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
416 memcpy(&dst->addr.ip6.dst, &ipv6_hdr(skb)->daddr, 536 memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
417 sizeof(dst->addr.ip6.dst)); 537 sizeof(dst->ip6.dst));
418 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) 538 hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask);
419 memcpy(&dst->addr.ip6.src, &ipv6_hdr(skb)->saddr, 539 }
420 sizeof(dst->addr.ip6.src)); 540 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) {
541 memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr,
542 sizeof(dst->ip6.src));
543 hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask);
544 }
421 545
422 if (!(hinfo->cfg.mode & 546 if (!(hinfo->cfg.mode &
423 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) 547 (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
@@ -457,10 +581,10 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
457} 581}
458 582
459static bool 583static bool
460hashlimit_mt(const struct sk_buff *skb, const struct net_device *in, 584hashlimit_mt_v0(const struct sk_buff *skb, const struct net_device *in,
461 const struct net_device *out, const struct xt_match *match, 585 const struct net_device *out, const struct xt_match *match,
462 const void *matchinfo, int offset, unsigned int protoff, 586 const void *matchinfo, int offset, unsigned int protoff,
463 bool *hotdrop) 587 bool *hotdrop)
464{ 588{
465 const struct xt_hashlimit_info *r = 589 const struct xt_hashlimit_info *r =
466 ((const struct xt_hashlimit_info *)matchinfo)->u.master; 590 ((const struct xt_hashlimit_info *)matchinfo)->u.master;
@@ -512,9 +636,62 @@ hotdrop:
512} 636}
513 637
514static bool 638static bool
515hashlimit_mt_check(const char *tablename, const void *inf, 639hashlimit_mt(const struct sk_buff *skb, const struct net_device *in,
516 const struct xt_match *match, void *matchinfo, 640 const struct net_device *out, const struct xt_match *match,
517 unsigned int hook_mask) 641 const void *matchinfo, int offset, unsigned int protoff,
642 bool *hotdrop)
643{
644 const struct xt_hashlimit_mtinfo1 *info = matchinfo;
645 struct xt_hashlimit_htable *hinfo = info->hinfo;
646 unsigned long now = jiffies;
647 struct dsthash_ent *dh;
648 struct dsthash_dst dst;
649
650 if (hashlimit_init_dst(hinfo, &dst, skb, protoff) < 0)
651 goto hotdrop;
652
653 spin_lock_bh(&hinfo->lock);
654 dh = dsthash_find(hinfo, &dst);
655 if (dh == NULL) {
656 dh = dsthash_alloc_init(hinfo, &dst);
657 if (dh == NULL) {
658 spin_unlock_bh(&hinfo->lock);
659 goto hotdrop;
660 }
661
662 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
663 dh->rateinfo.prev = jiffies;
664 dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
665 hinfo->cfg.burst);
666 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
667 hinfo->cfg.burst);
668 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
669 } else {
670 /* update expiration timeout */
671 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
672 rateinfo_recalc(dh, now);
673 }
674
675 if (dh->rateinfo.credit >= dh->rateinfo.cost) {
676 /* below the limit */
677 dh->rateinfo.credit -= dh->rateinfo.cost;
678 spin_unlock_bh(&hinfo->lock);
679 return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
680 }
681
682 spin_unlock_bh(&hinfo->lock);
683 /* default match is underlimit - so over the limit, we need to invert */
684 return info->cfg.mode & XT_HASHLIMIT_INVERT;
685
686 hotdrop:
687 *hotdrop = true;
688 return false;
689}
690
691static bool
692hashlimit_mt_check_v0(const char *tablename, const void *inf,
693 const struct xt_match *match, void *matchinfo,
694 unsigned int hook_mask)
518{ 695{
519 struct xt_hashlimit_info *r = matchinfo; 696 struct xt_hashlimit_info *r = matchinfo;
520 697
@@ -546,7 +723,7 @@ hashlimit_mt_check(const char *tablename, const void *inf,
546 * create duplicate proc files. -HW */ 723 * create duplicate proc files. -HW */
547 mutex_lock(&hlimit_mutex); 724 mutex_lock(&hlimit_mutex);
548 r->hinfo = htable_find_get(r->name, match->family); 725 r->hinfo = htable_find_get(r->name, match->family);
549 if (!r->hinfo && htable_create(r, match->family) != 0) { 726 if (!r->hinfo && htable_create_v0(r, match->family) != 0) {
550 mutex_unlock(&hlimit_mutex); 727 mutex_unlock(&hlimit_mutex);
551 return false; 728 return false;
552 } 729 }
@@ -557,14 +734,68 @@ hashlimit_mt_check(const char *tablename, const void *inf,
557 return true; 734 return true;
558} 735}
559 736
737static bool
738hashlimit_mt_check(const char *tablename, const void *inf,
739 const struct xt_match *match, void *matchinfo,
740 unsigned int hook_mask)
741{
742 struct xt_hashlimit_mtinfo1 *info = matchinfo;
743
744 /* Check for overflow. */
745 if (info->cfg.burst == 0 ||
746 user2credits(info->cfg.avg * info->cfg.burst) <
747 user2credits(info->cfg.avg)) {
748 printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
749 info->cfg.avg, info->cfg.burst);
750 return false;
751 }
752 if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
753 return false;
754 if (info->name[sizeof(info->name)-1] != '\0')
755 return false;
756 if (match->family == AF_INET) {
757 if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
758 return false;
759 } else {
760 if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128)
761 return false;
762 }
763
764 /* This is the best we've got: We cannot release and re-grab lock,
765 * since checkentry() is called before x_tables.c grabs xt_mutex.
766 * We also cannot grab the hashtable spinlock, since htable_create will
767 * call vmalloc, and that can sleep. And we cannot just re-search
768 * the list of htable's in htable_create(), since then we would
769 * create duplicate proc files. -HW */
770 mutex_lock(&hlimit_mutex);
771 info->hinfo = htable_find_get(info->name, match->family);
772 if (!info->hinfo && htable_create(info, match->family) != 0) {
773 mutex_unlock(&hlimit_mutex);
774 return false;
775 }
776 mutex_unlock(&hlimit_mutex);
777
778 /* Ugly hack: For SMP, we only want to use one set */
779 info->master = info;
780 return true;
781}
782
560static void 783static void
561hashlimit_mt_destroy(const struct xt_match *match, void *matchinfo) 784hashlimit_mt_destroy_v0(const struct xt_match *match, void *matchinfo)
562{ 785{
563 const struct xt_hashlimit_info *r = matchinfo; 786 const struct xt_hashlimit_info *r = matchinfo;
564 787
565 htable_put(r->hinfo); 788 htable_put(r->hinfo);
566} 789}
567 790
791static void
792hashlimit_mt_destroy(const struct xt_match *match, void *matchinfo)
793{
794 const struct xt_hashlimit_mtinfo1 *info = matchinfo;
795
796 htable_put(info->hinfo);
797}
798
568#ifdef CONFIG_COMPAT 799#ifdef CONFIG_COMPAT
569struct compat_xt_hashlimit_info { 800struct compat_xt_hashlimit_info {
570 char name[IFNAMSIZ]; 801 char name[IFNAMSIZ];
@@ -592,38 +823,60 @@ static int hashlimit_mt_compat_to_user(void __user *dst, void *src)
592static struct xt_match hashlimit_mt_reg[] __read_mostly = { 823static struct xt_match hashlimit_mt_reg[] __read_mostly = {
593 { 824 {
594 .name = "hashlimit", 825 .name = "hashlimit",
826 .revision = 0,
595 .family = AF_INET, 827 .family = AF_INET,
596 .match = hashlimit_mt, 828 .match = hashlimit_mt_v0,
597 .matchsize = sizeof(struct xt_hashlimit_info), 829 .matchsize = sizeof(struct xt_hashlimit_info),
598#ifdef CONFIG_COMPAT 830#ifdef CONFIG_COMPAT
599 .compatsize = sizeof(struct compat_xt_hashlimit_info), 831 .compatsize = sizeof(struct compat_xt_hashlimit_info),
600 .compat_from_user = hashlimit_mt_compat_from_user, 832 .compat_from_user = hashlimit_mt_compat_from_user,
601 .compat_to_user = hashlimit_mt_compat_to_user, 833 .compat_to_user = hashlimit_mt_compat_to_user,
602#endif 834#endif
603 .checkentry = hashlimit_mt_check, 835 .checkentry = hashlimit_mt_check_v0,
604 .destroy = hashlimit_mt_destroy, 836 .destroy = hashlimit_mt_destroy_v0,
605 .me = THIS_MODULE 837 .me = THIS_MODULE
606 }, 838 },
839 {
840 .name = "hashlimit",
841 .revision = 1,
842 .family = AF_INET,
843 .match = hashlimit_mt,
844 .matchsize = sizeof(struct xt_hashlimit_mtinfo1),
845 .checkentry = hashlimit_mt_check,
846 .destroy = hashlimit_mt_destroy,
847 .me = THIS_MODULE,
848 },
607#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) 849#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
608 { 850 {
609 .name = "hashlimit", 851 .name = "hashlimit",
610 .family = AF_INET6, 852 .family = AF_INET6,
611 .match = hashlimit_mt, 853 .match = hashlimit_mt_v0,
612 .matchsize = sizeof(struct xt_hashlimit_info), 854 .matchsize = sizeof(struct xt_hashlimit_info),
613#ifdef CONFIG_COMPAT 855#ifdef CONFIG_COMPAT
614 .compatsize = sizeof(struct compat_xt_hashlimit_info), 856 .compatsize = sizeof(struct compat_xt_hashlimit_info),
615 .compat_from_user = hashlimit_mt_compat_from_user, 857 .compat_from_user = hashlimit_mt_compat_from_user,
616 .compat_to_user = hashlimit_mt_compat_to_user, 858 .compat_to_user = hashlimit_mt_compat_to_user,
617#endif 859#endif
618 .checkentry = hashlimit_mt_check, 860 .checkentry = hashlimit_mt_check_v0,
619 .destroy = hashlimit_mt_destroy, 861 .destroy = hashlimit_mt_destroy_v0,
620 .me = THIS_MODULE 862 .me = THIS_MODULE
621 }, 863 },
864 {
865 .name = "hashlimit",
866 .revision = 1,
867 .family = AF_INET6,
868 .match = hashlimit_mt,
869 .matchsize = sizeof(struct xt_hashlimit_mtinfo1),
870 .checkentry = hashlimit_mt_check,
871 .destroy = hashlimit_mt_destroy,
872 .me = THIS_MODULE,
873 },
622#endif 874#endif
623}; 875};
624 876
625/* PROC stuff */ 877/* PROC stuff */
626static void *dl_seq_start(struct seq_file *s, loff_t *pos) 878static void *dl_seq_start(struct seq_file *s, loff_t *pos)
879 __acquires(htable->lock)
627{ 880{
628 struct proc_dir_entry *pde = s->private; 881 struct proc_dir_entry *pde = s->private;
629 struct xt_hashlimit_htable *htable = pde->data; 882 struct xt_hashlimit_htable *htable = pde->data;
@@ -656,6 +909,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
656} 909}
657 910
658static void dl_seq_stop(struct seq_file *s, void *v) 911static void dl_seq_stop(struct seq_file *s, void *v)
912 __releases(htable->lock)
659{ 913{
660 struct proc_dir_entry *pde = s->private; 914 struct proc_dir_entry *pde = s->private;
661 struct xt_hashlimit_htable *htable = pde->data; 915 struct xt_hashlimit_htable *htable = pde->data;
@@ -676,9 +930,9 @@ static int dl_seq_real_show(struct dsthash_ent *ent, int family,
676 return seq_printf(s, "%ld %u.%u.%u.%u:%u->" 930 return seq_printf(s, "%ld %u.%u.%u.%u:%u->"
677 "%u.%u.%u.%u:%u %u %u %u\n", 931 "%u.%u.%u.%u:%u %u %u %u\n",
678 (long)(ent->expires - jiffies)/HZ, 932 (long)(ent->expires - jiffies)/HZ,
679 NIPQUAD(ent->dst.addr.ip.src), 933 NIPQUAD(ent->dst.ip.src),
680 ntohs(ent->dst.src_port), 934 ntohs(ent->dst.src_port),
681 NIPQUAD(ent->dst.addr.ip.dst), 935 NIPQUAD(ent->dst.ip.dst),
682 ntohs(ent->dst.dst_port), 936 ntohs(ent->dst.dst_port),
683 ent->rateinfo.credit, ent->rateinfo.credit_cap, 937 ent->rateinfo.credit, ent->rateinfo.credit_cap,
684 ent->rateinfo.cost); 938 ent->rateinfo.cost);
@@ -687,9 +941,9 @@ static int dl_seq_real_show(struct dsthash_ent *ent, int family,
687 return seq_printf(s, "%ld " NIP6_FMT ":%u->" 941 return seq_printf(s, "%ld " NIP6_FMT ":%u->"
688 NIP6_FMT ":%u %u %u %u\n", 942 NIP6_FMT ":%u %u %u %u\n",
689 (long)(ent->expires - jiffies)/HZ, 943 (long)(ent->expires - jiffies)/HZ,
690 NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.src), 944 NIP6(*(struct in6_addr *)&ent->dst.ip6.src),
691 ntohs(ent->dst.src_port), 945 ntohs(ent->dst.src_port),
692 NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.dst), 946 NIP6(*(struct in6_addr *)&ent->dst.ip6.dst),
693 ntohs(ent->dst.dst_port), 947 ntohs(ent->dst.dst_port),
694 ent->rateinfo.credit, ent->rateinfo.credit_cap, 948 ent->rateinfo.credit, ent->rateinfo.credit_cap,
695 ent->rateinfo.cost); 949 ent->rateinfo.cost);
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index dbea0e0893f3..01035fc0e140 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -101,7 +101,7 @@ iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b)
101 int r; 101 int r;
102 102
103 for (i = 0; i < 4; ++i) { 103 for (i = 0; i < 4; ++i) {
104 r = a->s6_addr32[i] - b->s6_addr32[i]; 104 r = (__force u32)a->s6_addr32[i] - (__force u32)b->s6_addr32[i];
105 if (r != 0) 105 if (r != 0)
106 return r; 106 return r;
107 } 107 }
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index d382f9cc38b0..9059c16144c3 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -4,8 +4,8 @@
4 * 4 *
5 * (C) 2000 Marc Boucher <marc@mbsi.ca> 5 * (C) 2000 Marc Boucher <marc@mbsi.ca>
6 * 6 *
7 * Copyright © CC Computer Consultants GmbH, 2007 7 * Copyright © CC Computer Consultants GmbH, 2007 - 2008
8 * Contact: <jengelh@computergmbh.de> 8 * <jengelh@computergmbh.de>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -102,13 +102,15 @@ owner_mt(const struct sk_buff *skb, const struct net_device *in,
102 (XT_OWNER_UID | XT_OWNER_GID)) == 0; 102 (XT_OWNER_UID | XT_OWNER_GID)) == 0;
103 103
104 if (info->match & XT_OWNER_UID) 104 if (info->match & XT_OWNER_UID)
105 if ((filp->f_uid != info->uid) ^ 105 if ((filp->f_uid >= info->uid_min &&
106 !!(info->invert & XT_OWNER_UID)) 106 filp->f_uid <= info->uid_max) ^
107 !(info->invert & XT_OWNER_UID))
107 return false; 108 return false;
108 109
109 if (info->match & XT_OWNER_GID) 110 if (info->match & XT_OWNER_GID)
110 if ((filp->f_gid != info->gid) ^ 111 if ((filp->f_gid >= info->gid_min &&
111 !!(info->invert & XT_OWNER_GID)) 112 filp->f_gid <= info->gid_max) ^
113 !(info->invert & XT_OWNER_GID))
112 return false; 114 return false;
113 115
114 return true; 116 return true;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6b178e1247b5..ff9fb6ba0c5c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1344,6 +1344,22 @@ static void netlink_data_ready(struct sock *sk, int len)
1344 * queueing. 1344 * queueing.
1345 */ 1345 */
1346 1346
1347static void __netlink_release(struct sock *sk)
1348{
1349 /*
1350 * Last sock_put should drop referrence to sk->sk_net. It has already
1351 * been dropped in netlink_kernel_create. Taking referrence to stopping
1352 * namespace is not an option.
1353 * Take referrence to a socket to remove it from netlink lookup table
1354 * _alive_ and after that destroy it in the context of init_net.
1355 */
1356
1357 sock_hold(sk);
1358 sock_release(sk->sk_socket);
1359 sk->sk_net = get_net(&init_net);
1360 sock_put(sk);
1361}
1362
1347struct sock * 1363struct sock *
1348netlink_kernel_create(struct net *net, int unit, unsigned int groups, 1364netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1349 void (*input)(struct sk_buff *skb), 1365 void (*input)(struct sk_buff *skb),
@@ -1362,8 +1378,18 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1362 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) 1378 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1363 return NULL; 1379 return NULL;
1364 1380
1365 if (__netlink_create(net, sock, cb_mutex, unit) < 0) 1381 /*
1366 goto out_sock_release; 1382 * We have to just have a reference on the net from sk, but don't
1383 * get_net it. Besides, we cannot get and then put the net here.
1384 * So we create one inside init_net and the move it to net.
1385 */
1386
1387 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1388 goto out_sock_release_nosk;
1389
1390 sk = sock->sk;
1391 put_net(sk->sk_net);
1392 sk->sk_net = net;
1367 1393
1368 if (groups < 32) 1394 if (groups < 32)
1369 groups = 32; 1395 groups = 32;
@@ -1372,7 +1398,6 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1372 if (!listeners) 1398 if (!listeners)
1373 goto out_sock_release; 1399 goto out_sock_release;
1374 1400
1375 sk = sock->sk;
1376 sk->sk_data_ready = netlink_data_ready; 1401 sk->sk_data_ready = netlink_data_ready;
1377 if (input) 1402 if (input)
1378 nlk_sk(sk)->netlink_rcv = input; 1403 nlk_sk(sk)->netlink_rcv = input;
@@ -1395,14 +1420,14 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1395 nl_table[unit].registered++; 1420 nl_table[unit].registered++;
1396 } 1421 }
1397 netlink_table_ungrab(); 1422 netlink_table_ungrab();
1398
1399 /* Do not hold an extra referrence to a namespace as this socket is
1400 * internal to a namespace and does not prevent it to stop. */
1401 put_net(net);
1402 return sk; 1423 return sk;
1403 1424
1404out_sock_release: 1425out_sock_release:
1405 kfree(listeners); 1426 kfree(listeners);
1427 __netlink_release(sk);
1428 return NULL;
1429
1430out_sock_release_nosk:
1406 sock_release(sock); 1431 sock_release(sock);
1407 return NULL; 1432 return NULL;
1408} 1433}
@@ -1415,18 +1440,7 @@ netlink_kernel_release(struct sock *sk)
1415 if (sk == NULL || sk->sk_socket == NULL) 1440 if (sk == NULL || sk->sk_socket == NULL)
1416 return; 1441 return;
1417 1442
1418 /* 1443 __netlink_release(sk);
1419 * Last sock_put should drop referrence to sk->sk_net. It has already
1420 * been dropped in netlink_kernel_create. Taking referrence to stopping
1421 * namespace is not an option.
1422 * Take referrence to a socket to remove it from netlink lookup table
1423 * _alive_ and after that destroy it in the context of init_net.
1424 */
1425 sock_hold(sk);
1426 sock_release(sk->sk_socket);
1427
1428 sk->sk_net = get_net(&init_net);
1429 sock_put(sk);
1430} 1444}
1431EXPORT_SYMBOL(netlink_kernel_release); 1445EXPORT_SYMBOL(netlink_kernel_release);
1432 1446
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index d1e9d68f8ba0..e4b051dbed61 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -84,6 +84,7 @@ static void rfkill_schedule_toggle(struct rfkill_task *task)
84static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); 84static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN);
85static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); 85static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH);
86static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); 86static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB);
87static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX);
87 88
88static void rfkill_event(struct input_handle *handle, unsigned int type, 89static void rfkill_event(struct input_handle *handle, unsigned int type,
89 unsigned int code, int down) 90 unsigned int code, int down)
@@ -99,6 +100,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type,
99 case KEY_UWB: 100 case KEY_UWB:
100 rfkill_schedule_toggle(&rfkill_uwb); 101 rfkill_schedule_toggle(&rfkill_uwb);
101 break; 102 break;
103 case KEY_WIMAX:
104 rfkill_schedule_toggle(&rfkill_wimax);
105 break;
102 default: 106 default:
103 break; 107 break;
104 } 108 }
@@ -159,6 +163,11 @@ static const struct input_device_id rfkill_ids[] = {
159 .evbit = { BIT_MASK(EV_KEY) }, 163 .evbit = { BIT_MASK(EV_KEY) },
160 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, 164 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
161 }, 165 },
166 {
167 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
168 .evbit = { BIT_MASK(EV_KEY) },
169 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
170 },
162 { } 171 { }
163}; 172};
164 173
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index d06d338812e9..6562f868e82f 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -126,6 +126,9 @@ static ssize_t rfkill_type_show(struct device *dev,
126 case RFKILL_TYPE_UWB: 126 case RFKILL_TYPE_UWB:
127 type = "ultrawideband"; 127 type = "ultrawideband";
128 break; 128 break;
129 case RFKILL_TYPE_WIMAX:
130 type = "wimax";
131 break;
129 default: 132 default:
130 BUG(); 133 BUG();
131 } 134 }
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index 3c04b00dab74..d9231245a79a 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -15,7 +15,7 @@
15#include <net/af_rxrpc.h> 15#include <net/af_rxrpc.h>
16#include "ar-internal.h" 16#include "ar-internal.h"
17 17
18const char *rxrpc_call_states[] = { 18const char *const rxrpc_call_states[] = {
19 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", 19 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
20 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", 20 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
21 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 21 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 58aaf892238e..1aaa2e804b0d 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -565,9 +565,9 @@ extern void __exit rxrpc_destroy_all_peers(void);
565/* 565/*
566 * ar-proc.c 566 * ar-proc.c
567 */ 567 */
568extern const char *rxrpc_call_states[]; 568extern const char *const rxrpc_call_states[];
569extern struct file_operations rxrpc_call_seq_fops; 569extern const struct file_operations rxrpc_call_seq_fops;
570extern struct file_operations rxrpc_connection_seq_fops; 570extern const struct file_operations rxrpc_connection_seq_fops;
571 571
572/* 572/*
573 * ar-recvmsg.c 573 * ar-recvmsg.c
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
index 2e83ce325d15..83eda247fe48 100644
--- a/net/rxrpc/ar-proc.c
+++ b/net/rxrpc/ar-proc.c
@@ -14,7 +14,7 @@
14#include <net/af_rxrpc.h> 14#include <net/af_rxrpc.h>
15#include "ar-internal.h" 15#include "ar-internal.h"
16 16
17static const char *rxrpc_conn_states[] = { 17static const char *const rxrpc_conn_states[] = {
18 [RXRPC_CONN_UNUSED] = "Unused ", 18 [RXRPC_CONN_UNUSED] = "Unused ",
19 [RXRPC_CONN_CLIENT] = "Client ", 19 [RXRPC_CONN_CLIENT] = "Client ",
20 [RXRPC_CONN_SERVER_UNSECURED] = "SvUnsec ", 20 [RXRPC_CONN_SERVER_UNSECURED] = "SvUnsec ",
@@ -98,7 +98,7 @@ static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
98 return seq_open(file, &rxrpc_call_seq_ops); 98 return seq_open(file, &rxrpc_call_seq_ops);
99} 99}
100 100
101struct file_operations rxrpc_call_seq_fops = { 101const struct file_operations rxrpc_call_seq_fops = {
102 .owner = THIS_MODULE, 102 .owner = THIS_MODULE,
103 .open = rxrpc_call_seq_open, 103 .open = rxrpc_call_seq_open,
104 .read = seq_read, 104 .read = seq_read,
@@ -183,7 +183,7 @@ static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
183 return seq_open(file, &rxrpc_connection_seq_ops); 183 return seq_open(file, &rxrpc_connection_seq_ops);
184} 184}
185 185
186struct file_operations rxrpc_connection_seq_fops = { 186const struct file_operations rxrpc_connection_seq_fops = {
187 .owner = THIS_MODULE, 187 .owner = THIS_MODULE,
188 .open = rxrpc_connection_seq_open, 188 .open = rxrpc_connection_seq_open,
189 .read = seq_read, 189 .read = seq_read,
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 87af7c913d81..82adfe6447d7 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -198,7 +198,7 @@ config NET_SCH_NETEM
198 198
199config NET_SCH_INGRESS 199config NET_SCH_INGRESS
200 tristate "Ingress Qdisc" 200 tristate "Ingress Qdisc"
201 depends on NET_CLS_ACT || NETFILTER 201 depends on NET_CLS_ACT
202 ---help--- 202 ---help---
203 Say Y here if you want to use classifiers for incoming packets. 203 Say Y here if you want to use classifiers for incoming packets.
204 If unsure, say Y. 204 If unsure, say Y.
@@ -307,6 +307,17 @@ config NET_CLS_RSVP6
307 To compile this code as a module, choose M here: the 307 To compile this code as a module, choose M here: the
308 module will be called cls_rsvp6. 308 module will be called cls_rsvp6.
309 309
310config NET_CLS_FLOW
311 tristate "Flow classifier"
312 select NET_CLS
313 ---help---
314 If you say Y here, you will be able to classify packets based on
315 a configurable combination of packet keys. This is mostly useful
316 in combination with SFQ.
317
318 To compile this code as a module, choose M here: the
319 module will be called cls_flow.
320
310config NET_EMATCH 321config NET_EMATCH
311 bool "Extended Matches" 322 bool "Extended Matches"
312 select NET_CLS 323 select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 81ecbe8e7dce..1d2b0f7df848 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_NET_CLS_RSVP) += cls_rsvp.o
35obj-$(CONFIG_NET_CLS_TCINDEX) += cls_tcindex.o 35obj-$(CONFIG_NET_CLS_TCINDEX) += cls_tcindex.o
36obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o 36obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o
37obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o 37obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o
38obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o
38obj-$(CONFIG_NET_EMATCH) += ematch.o 39obj-$(CONFIG_NET_EMATCH) += ematch.o
39obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o 40obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o
40obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o 41obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 3377ca0d0a0c..0fbedcabf111 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -482,7 +482,7 @@ EXPORT_SYMBOL(tcf_exts_destroy);
482 482
483int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb, 483int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb,
484 struct nlattr *rate_tlv, struct tcf_exts *exts, 484 struct nlattr *rate_tlv, struct tcf_exts *exts,
485 struct tcf_ext_map *map) 485 const struct tcf_ext_map *map)
486{ 486{
487 memset(exts, 0, sizeof(*exts)); 487 memset(exts, 0, sizeof(*exts));
488 488
@@ -535,7 +535,7 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
535EXPORT_SYMBOL(tcf_exts_change); 535EXPORT_SYMBOL(tcf_exts_change);
536 536
537int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, 537int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
538 struct tcf_ext_map *map) 538 const struct tcf_ext_map *map)
539{ 539{
540#ifdef CONFIG_NET_CLS_ACT 540#ifdef CONFIG_NET_CLS_ACT
541 if (map->action && exts->action) { 541 if (map->action && exts->action) {
@@ -571,7 +571,7 @@ EXPORT_SYMBOL(tcf_exts_dump);
571 571
572 572
573int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, 573int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
574 struct tcf_ext_map *map) 574 const struct tcf_ext_map *map)
575{ 575{
576#ifdef CONFIG_NET_CLS_ACT 576#ifdef CONFIG_NET_CLS_ACT
577 if (exts->action) 577 if (exts->action)
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index bfb4342ea88c..956915c217d6 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -35,7 +35,7 @@ struct basic_filter
35 struct list_head link; 35 struct list_head link;
36}; 36};
37 37
38static struct tcf_ext_map basic_ext_map = { 38static const struct tcf_ext_map basic_ext_map = {
39 .action = TCA_BASIC_ACT, 39 .action = TCA_BASIC_ACT,
40 .police = TCA_BASIC_POLICE 40 .police = TCA_BASIC_POLICE
41}; 41};
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
new file mode 100644
index 000000000000..5a7f6a3060fc
--- /dev/null
+++ b/net/sched/cls_flow.c
@@ -0,0 +1,660 @@
1/*
2 * net/sched/cls_flow.c Generic flow classifier
3 *
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/list.h>
15#include <linux/jhash.h>
16#include <linux/random.h>
17#include <linux/pkt_cls.h>
18#include <linux/skbuff.h>
19#include <linux/in.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/route.h>
26#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
27#include <net/netfilter/nf_conntrack.h>
28#endif
29
30struct flow_head {
31 struct list_head filters;
32};
33
34struct flow_filter {
35 struct list_head list;
36 struct tcf_exts exts;
37 struct tcf_ematch_tree ematches;
38 u32 handle;
39
40 u32 nkeys;
41 u32 keymask;
42 u32 mode;
43 u32 mask;
44 u32 xor;
45 u32 rshift;
46 u32 addend;
47 u32 divisor;
48 u32 baseclass;
49};
50
51static u32 flow_hashrnd __read_mostly;
52static int flow_hashrnd_initted __read_mostly;
53
54static const struct tcf_ext_map flow_ext_map = {
55 .action = TCA_FLOW_ACT,
56 .police = TCA_FLOW_POLICE,
57};
58
59static inline u32 addr_fold(void *addr)
60{
61 unsigned long a = (unsigned long)addr;
62
63 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
64}
65
66static u32 flow_get_src(const struct sk_buff *skb)
67{
68 switch (skb->protocol) {
69 case __constant_htons(ETH_P_IP):
70 return ntohl(ip_hdr(skb)->saddr);
71 case __constant_htons(ETH_P_IPV6):
72 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
73 default:
74 return addr_fold(skb->sk);
75 }
76}
77
78static u32 flow_get_dst(const struct sk_buff *skb)
79{
80 switch (skb->protocol) {
81 case __constant_htons(ETH_P_IP):
82 return ntohl(ip_hdr(skb)->daddr);
83 case __constant_htons(ETH_P_IPV6):
84 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
85 default:
86 return addr_fold(skb->dst) ^ (__force u16)skb->protocol;
87 }
88}
89
90static u32 flow_get_proto(const struct sk_buff *skb)
91{
92 switch (skb->protocol) {
93 case __constant_htons(ETH_P_IP):
94 return ip_hdr(skb)->protocol;
95 case __constant_htons(ETH_P_IPV6):
96 return ipv6_hdr(skb)->nexthdr;
97 default:
98 return 0;
99 }
100}
101
102static int has_ports(u8 protocol)
103{
104 switch (protocol) {
105 case IPPROTO_TCP:
106 case IPPROTO_UDP:
107 case IPPROTO_UDPLITE:
108 case IPPROTO_SCTP:
109 case IPPROTO_DCCP:
110 case IPPROTO_ESP:
111 return 1;
112 default:
113 return 0;
114 }
115}
116
117static u32 flow_get_proto_src(const struct sk_buff *skb)
118{
119 u32 res = 0;
120
121 switch (skb->protocol) {
122 case __constant_htons(ETH_P_IP): {
123 struct iphdr *iph = ip_hdr(skb);
124
125 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
126 has_ports(iph->protocol))
127 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4));
128 break;
129 }
130 case __constant_htons(ETH_P_IPV6): {
131 struct ipv6hdr *iph = ipv6_hdr(skb);
132
133 if (has_ports(iph->nexthdr))
134 res = ntohs(*(__be16 *)&iph[1]);
135 break;
136 }
137 default:
138 res = addr_fold(skb->sk);
139 }
140
141 return res;
142}
143
144static u32 flow_get_proto_dst(const struct sk_buff *skb)
145{
146 u32 res = 0;
147
148 switch (skb->protocol) {
149 case __constant_htons(ETH_P_IP): {
150 struct iphdr *iph = ip_hdr(skb);
151
152 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
153 has_ports(iph->protocol))
154 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2));
155 break;
156 }
157 case __constant_htons(ETH_P_IPV6): {
158 struct ipv6hdr *iph = ipv6_hdr(skb);
159
160 if (has_ports(iph->nexthdr))
161 res = ntohs(*(__be16 *)((void *)&iph[1] + 2));
162 break;
163 }
164 default:
165 res = addr_fold(skb->dst) ^ (__force u16)skb->protocol;
166 }
167
168 return res;
169}
170
171static u32 flow_get_iif(const struct sk_buff *skb)
172{
173 return skb->iif;
174}
175
176static u32 flow_get_priority(const struct sk_buff *skb)
177{
178 return skb->priority;
179}
180
181static u32 flow_get_mark(const struct sk_buff *skb)
182{
183 return skb->mark;
184}
185
186static u32 flow_get_nfct(const struct sk_buff *skb)
187{
188#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
189 return addr_fold(skb->nfct);
190#else
191 return 0;
192#endif
193}
194
195#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
196#define CTTUPLE(skb, member) \
197({ \
198 enum ip_conntrack_info ctinfo; \
199 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
200 if (ct == NULL) \
201 goto fallback; \
202 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
203})
204#else
205#define CTTUPLE(skb, member) \
206({ \
207 goto fallback; \
208 0; \
209})
210#endif
211
212static u32 flow_get_nfct_src(const struct sk_buff *skb)
213{
214 switch (skb->protocol) {
215 case __constant_htons(ETH_P_IP):
216 return ntohl(CTTUPLE(skb, src.u3.ip));
217 case __constant_htons(ETH_P_IPV6):
218 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
219 }
220fallback:
221 return flow_get_src(skb);
222}
223
224static u32 flow_get_nfct_dst(const struct sk_buff *skb)
225{
226 switch (skb->protocol) {
227 case __constant_htons(ETH_P_IP):
228 return ntohl(CTTUPLE(skb, dst.u3.ip));
229 case __constant_htons(ETH_P_IPV6):
230 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
231 }
232fallback:
233 return flow_get_dst(skb);
234}
235
236static u32 flow_get_nfct_proto_src(const struct sk_buff *skb)
237{
238 return ntohs(CTTUPLE(skb, src.u.all));
239fallback:
240 return flow_get_proto_src(skb);
241}
242
243static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb)
244{
245 return ntohs(CTTUPLE(skb, dst.u.all));
246fallback:
247 return flow_get_proto_dst(skb);
248}
249
250static u32 flow_get_rtclassid(const struct sk_buff *skb)
251{
252#ifdef CONFIG_NET_CLS_ROUTE
253 if (skb->dst)
254 return skb->dst->tclassid;
255#endif
256 return 0;
257}
258
259static u32 flow_get_skuid(const struct sk_buff *skb)
260{
261 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
262 return skb->sk->sk_socket->file->f_uid;
263 return 0;
264}
265
266static u32 flow_get_skgid(const struct sk_buff *skb)
267{
268 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
269 return skb->sk->sk_socket->file->f_gid;
270 return 0;
271}
272
273static u32 flow_key_get(const struct sk_buff *skb, int key)
274{
275 switch (key) {
276 case FLOW_KEY_SRC:
277 return flow_get_src(skb);
278 case FLOW_KEY_DST:
279 return flow_get_dst(skb);
280 case FLOW_KEY_PROTO:
281 return flow_get_proto(skb);
282 case FLOW_KEY_PROTO_SRC:
283 return flow_get_proto_src(skb);
284 case FLOW_KEY_PROTO_DST:
285 return flow_get_proto_dst(skb);
286 case FLOW_KEY_IIF:
287 return flow_get_iif(skb);
288 case FLOW_KEY_PRIORITY:
289 return flow_get_priority(skb);
290 case FLOW_KEY_MARK:
291 return flow_get_mark(skb);
292 case FLOW_KEY_NFCT:
293 return flow_get_nfct(skb);
294 case FLOW_KEY_NFCT_SRC:
295 return flow_get_nfct_src(skb);
296 case FLOW_KEY_NFCT_DST:
297 return flow_get_nfct_dst(skb);
298 case FLOW_KEY_NFCT_PROTO_SRC:
299 return flow_get_nfct_proto_src(skb);
300 case FLOW_KEY_NFCT_PROTO_DST:
301 return flow_get_nfct_proto_dst(skb);
302 case FLOW_KEY_RTCLASSID:
303 return flow_get_rtclassid(skb);
304 case FLOW_KEY_SKUID:
305 return flow_get_skuid(skb);
306 case FLOW_KEY_SKGID:
307 return flow_get_skgid(skb);
308 default:
309 WARN_ON(1);
310 return 0;
311 }
312}
313
314static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp,
315 struct tcf_result *res)
316{
317 struct flow_head *head = tp->root;
318 struct flow_filter *f;
319 u32 keymask;
320 u32 classid;
321 unsigned int n, key;
322 int r;
323
324 list_for_each_entry(f, &head->filters, list) {
325 u32 keys[f->nkeys];
326
327 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
328 continue;
329
330 keymask = f->keymask;
331
332 for (n = 0; n < f->nkeys; n++) {
333 key = ffs(keymask) - 1;
334 keymask &= ~(1 << key);
335 keys[n] = flow_key_get(skb, key);
336 }
337
338 if (f->mode == FLOW_MODE_HASH)
339 classid = jhash2(keys, f->nkeys, flow_hashrnd);
340 else {
341 classid = keys[0];
342 classid = (classid & f->mask) ^ f->xor;
343 classid = (classid >> f->rshift) + f->addend;
344 }
345
346 if (f->divisor)
347 classid %= f->divisor;
348
349 res->class = 0;
350 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
351
352 r = tcf_exts_exec(skb, &f->exts, res);
353 if (r < 0)
354 continue;
355 return r;
356 }
357 return -1;
358}
359
360static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
361 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
362 [TCA_FLOW_MODE] = { .type = NLA_U32 },
363 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
364 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
365 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
366 [TCA_FLOW_MASK] = { .type = NLA_U32 },
367 [TCA_FLOW_XOR] = { .type = NLA_U32 },
368 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
369 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
370 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
371 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
372};
373
374static int flow_change(struct tcf_proto *tp, unsigned long base,
375 u32 handle, struct nlattr **tca,
376 unsigned long *arg)
377{
378 struct flow_head *head = tp->root;
379 struct flow_filter *f;
380 struct nlattr *opt = tca[TCA_OPTIONS];
381 struct nlattr *tb[TCA_FLOW_MAX + 1];
382 struct tcf_exts e;
383 struct tcf_ematch_tree t;
384 unsigned int nkeys = 0;
385 u32 baseclass = 0;
386 u32 keymask = 0;
387 u32 mode;
388 int err;
389
390 if (opt == NULL)
391 return -EINVAL;
392
393 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy);
394 if (err < 0)
395 return err;
396
397 if (tb[TCA_FLOW_BASECLASS]) {
398 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
399 if (TC_H_MIN(baseclass) == 0)
400 return -EINVAL;
401 }
402
403 if (tb[TCA_FLOW_KEYS]) {
404 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
405 if (fls(keymask) - 1 > FLOW_KEY_MAX)
406 return -EOPNOTSUPP;
407
408 nkeys = hweight32(keymask);
409 if (nkeys == 0)
410 return -EINVAL;
411 }
412
413 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
414 if (err < 0)
415 return err;
416
417 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t);
418 if (err < 0)
419 goto err1;
420
421 f = (struct flow_filter *)*arg;
422 if (f != NULL) {
423 err = -EINVAL;
424 if (f->handle != handle && handle)
425 goto err2;
426
427 mode = f->mode;
428 if (tb[TCA_FLOW_MODE])
429 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
430 if (mode != FLOW_MODE_HASH && nkeys > 1)
431 goto err2;
432 } else {
433 err = -EINVAL;
434 if (!handle)
435 goto err2;
436 if (!tb[TCA_FLOW_KEYS])
437 goto err2;
438
439 mode = FLOW_MODE_MAP;
440 if (tb[TCA_FLOW_MODE])
441 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
442 if (mode != FLOW_MODE_HASH && nkeys > 1)
443 goto err2;
444
445 if (TC_H_MAJ(baseclass) == 0)
446 baseclass = TC_H_MAKE(tp->q->handle, baseclass);
447 if (TC_H_MIN(baseclass) == 0)
448 baseclass = TC_H_MAKE(baseclass, 1);
449
450 err = -ENOBUFS;
451 f = kzalloc(sizeof(*f), GFP_KERNEL);
452 if (f == NULL)
453 goto err2;
454
455 f->handle = handle;
456 f->mask = ~0U;
457 }
458
459 tcf_exts_change(tp, &f->exts, &e);
460 tcf_em_tree_change(tp, &f->ematches, &t);
461
462 tcf_tree_lock(tp);
463
464 if (tb[TCA_FLOW_KEYS]) {
465 f->keymask = keymask;
466 f->nkeys = nkeys;
467 }
468
469 f->mode = mode;
470
471 if (tb[TCA_FLOW_MASK])
472 f->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
473 if (tb[TCA_FLOW_XOR])
474 f->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
475 if (tb[TCA_FLOW_RSHIFT])
476 f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
477 if (tb[TCA_FLOW_ADDEND])
478 f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
479
480 if (tb[TCA_FLOW_DIVISOR])
481 f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
482 if (baseclass)
483 f->baseclass = baseclass;
484
485 if (*arg == 0)
486 list_add_tail(&f->list, &head->filters);
487
488 tcf_tree_unlock(tp);
489
490 *arg = (unsigned long)f;
491 return 0;
492
493err2:
494 tcf_em_tree_destroy(tp, &t);
495err1:
496 tcf_exts_destroy(tp, &e);
497 return err;
498}
499
500static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
501{
502 tcf_exts_destroy(tp, &f->exts);
503 tcf_em_tree_destroy(tp, &f->ematches);
504 kfree(f);
505}
506
507static int flow_delete(struct tcf_proto *tp, unsigned long arg)
508{
509 struct flow_filter *f = (struct flow_filter *)arg;
510
511 tcf_tree_lock(tp);
512 list_del(&f->list);
513 tcf_tree_unlock(tp);
514 flow_destroy_filter(tp, f);
515 return 0;
516}
517
518static int flow_init(struct tcf_proto *tp)
519{
520 struct flow_head *head;
521
522 if (!flow_hashrnd_initted) {
523 get_random_bytes(&flow_hashrnd, 4);
524 flow_hashrnd_initted = 1;
525 }
526
527 head = kzalloc(sizeof(*head), GFP_KERNEL);
528 if (head == NULL)
529 return -ENOBUFS;
530 INIT_LIST_HEAD(&head->filters);
531 tp->root = head;
532 return 0;
533}
534
535static void flow_destroy(struct tcf_proto *tp)
536{
537 struct flow_head *head = tp->root;
538 struct flow_filter *f, *next;
539
540 list_for_each_entry_safe(f, next, &head->filters, list) {
541 list_del(&f->list);
542 flow_destroy_filter(tp, f);
543 }
544 kfree(head);
545}
546
547static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
548{
549 struct flow_head *head = tp->root;
550 struct flow_filter *f;
551
552 list_for_each_entry(f, &head->filters, list)
553 if (f->handle == handle)
554 return (unsigned long)f;
555 return 0;
556}
557
558static void flow_put(struct tcf_proto *tp, unsigned long f)
559{
560 return;
561}
562
563static int flow_dump(struct tcf_proto *tp, unsigned long fh,
564 struct sk_buff *skb, struct tcmsg *t)
565{
566 struct flow_filter *f = (struct flow_filter *)fh;
567 struct nlattr *nest;
568
569 if (f == NULL)
570 return skb->len;
571
572 t->tcm_handle = f->handle;
573
574 nest = nla_nest_start(skb, TCA_OPTIONS);
575 if (nest == NULL)
576 goto nla_put_failure;
577
578 NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask);
579 NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode);
580
581 if (f->mask != ~0 || f->xor != 0) {
582 NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask);
583 NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor);
584 }
585 if (f->rshift)
586 NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift);
587 if (f->addend)
588 NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend);
589
590 if (f->divisor)
591 NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor);
592 if (f->baseclass)
593 NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
594
595 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
596 goto nla_put_failure;
597
598 if (f->ematches.hdr.nmatches &&
599 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
600 goto nla_put_failure;
601
602 nla_nest_end(skb, nest);
603
604 if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0)
605 goto nla_put_failure;
606
607 return skb->len;
608
609nla_put_failure:
610 nlmsg_trim(skb, nest);
611 return -1;
612}
613
614static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
615{
616 struct flow_head *head = tp->root;
617 struct flow_filter *f;
618
619 list_for_each_entry(f, &head->filters, list) {
620 if (arg->count < arg->skip)
621 goto skip;
622 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
623 arg->stop = 1;
624 break;
625 }
626skip:
627 arg->count++;
628 }
629}
630
631static struct tcf_proto_ops cls_flow_ops __read_mostly = {
632 .kind = "flow",
633 .classify = flow_classify,
634 .init = flow_init,
635 .destroy = flow_destroy,
636 .change = flow_change,
637 .delete = flow_delete,
638 .get = flow_get,
639 .put = flow_put,
640 .dump = flow_dump,
641 .walk = flow_walk,
642 .owner = THIS_MODULE,
643};
644
645static int __init cls_flow_init(void)
646{
647 return register_tcf_proto_ops(&cls_flow_ops);
648}
649
650static void __exit cls_flow_exit(void)
651{
652 unregister_tcf_proto_ops(&cls_flow_ops);
653}
654
655module_init(cls_flow_init);
656module_exit(cls_flow_exit);
657
658MODULE_LICENSE("GPL");
659MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
660MODULE_DESCRIPTION("TC flow classifier");
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 436a6e7c438e..b0f90e593af0 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -47,7 +47,7 @@ struct fw_filter
47 struct tcf_exts exts; 47 struct tcf_exts exts;
48}; 48};
49 49
50static struct tcf_ext_map fw_ext_map = { 50static const struct tcf_ext_map fw_ext_map = {
51 .action = TCA_FW_ACT, 51 .action = TCA_FW_ACT,
52 .police = TCA_FW_POLICE 52 .police = TCA_FW_POLICE
53}; 53};
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index f7e7d3955d28..784dcb870b98 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -62,7 +62,7 @@ struct route4_filter
62 62
63#define ROUTE4_FAILURE ((struct route4_filter*)(-1L)) 63#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
64 64
65static struct tcf_ext_map route_ext_map = { 65static const struct tcf_ext_map route_ext_map = {
66 .police = TCA_ROUTE4_POLICE, 66 .police = TCA_ROUTE4_POLICE,
67 .action = TCA_ROUTE4_ACT 67 .action = TCA_ROUTE4_ACT
68}; 68};
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index ee60b2d1705d..7a7bff5ded24 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -55,7 +55,7 @@ struct tcindex_data {
55 int fall_through; /* 0: only classify if explicit match */ 55 int fall_through; /* 0: only classify if explicit match */
56}; 56};
57 57
58static struct tcf_ext_map tcindex_ext_map = { 58static const struct tcf_ext_map tcindex_ext_map = {
59 .police = TCA_TCINDEX_POLICE, 59 .police = TCA_TCINDEX_POLICE,
60 .action = TCA_TCINDEX_ACT 60 .action = TCA_TCINDEX_ACT
61}; 61};
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index e8a775689123..b18fa95ef248 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -82,7 +82,7 @@ struct tc_u_common
82 u32 hgenerator; 82 u32 hgenerator;
83}; 83};
84 84
85static struct tcf_ext_map u32_ext_map = { 85static const struct tcf_ext_map u32_ext_map = {
86 .action = TCA_U32_ACT, 86 .action = TCA_U32_ACT,
87 .police = TCA_U32_POLICE 87 .police = TCA_U32_POLICE
88}; 88};
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 3f72d528273c..274b1ddb160c 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -12,18 +12,10 @@
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h> 14#include <linux/rtnetlink.h>
15#include <linux/netfilter_ipv4.h>
16#include <linux/netfilter_ipv6.h>
17#include <linux/netfilter.h>
18#include <net/netlink.h> 15#include <net/netlink.h>
19#include <net/pkt_sched.h> 16#include <net/pkt_sched.h>
20 17
21 18
22/* Thanks to Doron Oz for this hack */
23#if !defined(CONFIG_NET_CLS_ACT) && defined(CONFIG_NETFILTER)
24static int nf_registered;
25#endif
26
27struct ingress_qdisc_data { 19struct ingress_qdisc_data {
28 struct tcf_proto *filter_list; 20 struct tcf_proto *filter_list;
29}; 21};
@@ -84,11 +76,6 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
84 76
85 result = tc_classify(skb, p->filter_list, &res); 77 result = tc_classify(skb, p->filter_list, &res);
86 78
87 /*
88 * Unlike normal "enqueue" functions, ingress_enqueue returns a
89 * firewall FW_* code.
90 */
91#ifdef CONFIG_NET_CLS_ACT
92 sch->bstats.packets++; 79 sch->bstats.packets++;
93 sch->bstats.bytes += skb->len; 80 sch->bstats.bytes += skb->len;
94 switch (result) { 81 switch (result) {
@@ -107,71 +94,10 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
107 result = TC_ACT_OK; 94 result = TC_ACT_OK;
108 break; 95 break;
109 } 96 }
110#else
111 result = NF_ACCEPT;
112 sch->bstats.packets++;
113 sch->bstats.bytes += skb->len;
114#endif
115 97
116 return result; 98 return result;
117} 99}
118 100
119#if !defined(CONFIG_NET_CLS_ACT) && defined(CONFIG_NETFILTER)
120static unsigned int ing_hook(unsigned int hook, struct sk_buff *skb,
121 const struct net_device *indev,
122 const struct net_device *outdev,
123 int (*okfn)(struct sk_buff *))
124{
125
126 struct Qdisc *q;
127 struct net_device *dev = skb->dev;
128 int fwres = NF_ACCEPT;
129
130 if (dev->qdisc_ingress) {
131 spin_lock(&dev->ingress_lock);
132 if ((q = dev->qdisc_ingress) != NULL)
133 fwres = q->enqueue(skb, q);
134 spin_unlock(&dev->ingress_lock);
135 }
136
137 return fwres;
138}
139
140/* after ipt_filter */
141static struct nf_hook_ops ing_ops[] __read_mostly = {
142 {
143 .hook = ing_hook,
144 .owner = THIS_MODULE,
145 .pf = PF_INET,
146 .hooknum = NF_INET_PRE_ROUTING,
147 .priority = NF_IP_PRI_FILTER + 1,
148 },
149 {
150 .hook = ing_hook,
151 .owner = THIS_MODULE,
152 .pf = PF_INET6,
153 .hooknum = NF_INET_PRE_ROUTING,
154 .priority = NF_IP6_PRI_FILTER + 1,
155 },
156};
157#endif
158
159static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
160{
161#if !defined(CONFIG_NET_CLS_ACT) && defined(CONFIG_NETFILTER)
162 printk("Ingress scheduler: Classifier actions prefered over netfilter\n");
163
164 if (!nf_registered) {
165 if (nf_register_hooks(ing_ops, ARRAY_SIZE(ing_ops)) < 0) {
166 printk("ingress qdisc registration error \n");
167 return -EINVAL;
168 }
169 nf_registered++;
170 }
171#endif
172 return 0;
173}
174
175/* ------------------------------------------------------------- */ 101/* ------------------------------------------------------------- */
176 102
177static void ingress_destroy(struct Qdisc *sch) 103static void ingress_destroy(struct Qdisc *sch)
@@ -213,7 +139,6 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
213 .id = "ingress", 139 .id = "ingress",
214 .priv_size = sizeof(struct ingress_qdisc_data), 140 .priv_size = sizeof(struct ingress_qdisc_data),
215 .enqueue = ingress_enqueue, 141 .enqueue = ingress_enqueue,
216 .init = ingress_init,
217 .destroy = ingress_destroy, 142 .destroy = ingress_destroy,
218 .dump = ingress_dump, 143 .dump = ingress_dump,
219 .owner = THIS_MODULE, 144 .owner = THIS_MODULE,
@@ -227,10 +152,6 @@ static int __init ingress_module_init(void)
227static void __exit ingress_module_exit(void) 152static void __exit ingress_module_exit(void)
228{ 153{
229 unregister_qdisc(&ingress_qdisc_ops); 154 unregister_qdisc(&ingress_qdisc_ops);
230#if !defined(CONFIG_NET_CLS_ACT) && defined(CONFIG_NETFILTER)
231 if (nf_registered)
232 nf_unregister_hooks(ing_ops, ARRAY_SIZE(ing_ops));
233#endif
234} 155}
235 156
236module_init(ingress_module_init) 157module_init(ingress_module_init)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 91af539ab6e6..a20e2ef7704b 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -95,6 +95,7 @@ struct sfq_sched_data
95 int limit; 95 int limit;
96 96
97/* Variables */ 97/* Variables */
98 struct tcf_proto *filter_list;
98 struct timer_list perturb_timer; 99 struct timer_list perturb_timer;
99 u32 perturbation; 100 u32 perturbation;
100 sfq_index tail; /* Index of current slot in round */ 101 sfq_index tail; /* Index of current slot in round */
@@ -155,6 +156,39 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
155 return sfq_fold_hash(q, h, h2); 156 return sfq_fold_hash(q, h, h2);
156} 157}
157 158
159static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
160 int *qerr)
161{
162 struct sfq_sched_data *q = qdisc_priv(sch);
163 struct tcf_result res;
164 int result;
165
166 if (TC_H_MAJ(skb->priority) == sch->handle &&
167 TC_H_MIN(skb->priority) > 0 &&
168 TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
169 return TC_H_MIN(skb->priority);
170
171 if (!q->filter_list)
172 return sfq_hash(q, skb) + 1;
173
174 *qerr = NET_XMIT_BYPASS;
175 result = tc_classify(skb, q->filter_list, &res);
176 if (result >= 0) {
177#ifdef CONFIG_NET_CLS_ACT
178 switch (result) {
179 case TC_ACT_STOLEN:
180 case TC_ACT_QUEUED:
181 *qerr = NET_XMIT_SUCCESS;
182 case TC_ACT_SHOT:
183 return 0;
184 }
185#endif
186 if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
187 return TC_H_MIN(res.classid);
188 }
189 return 0;
190}
191
158static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) 192static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
159{ 193{
160 sfq_index p, n; 194 sfq_index p, n;
@@ -245,8 +279,18 @@ static int
245sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 279sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
246{ 280{
247 struct sfq_sched_data *q = qdisc_priv(sch); 281 struct sfq_sched_data *q = qdisc_priv(sch);
248 unsigned hash = sfq_hash(q, skb); 282 unsigned int hash;
249 sfq_index x; 283 sfq_index x;
284 int ret;
285
286 hash = sfq_classify(skb, sch, &ret);
287 if (hash == 0) {
288 if (ret == NET_XMIT_BYPASS)
289 sch->qstats.drops++;
290 kfree_skb(skb);
291 return ret;
292 }
293 hash--;
250 294
251 x = q->ht[hash]; 295 x = q->ht[hash];
252 if (x == SFQ_DEPTH) { 296 if (x == SFQ_DEPTH) {
@@ -289,8 +333,18 @@ static int
289sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) 333sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
290{ 334{
291 struct sfq_sched_data *q = qdisc_priv(sch); 335 struct sfq_sched_data *q = qdisc_priv(sch);
292 unsigned hash = sfq_hash(q, skb); 336 unsigned int hash;
293 sfq_index x; 337 sfq_index x;
338 int ret;
339
340 hash = sfq_classify(skb, sch, &ret);
341 if (hash == 0) {
342 if (ret == NET_XMIT_BYPASS)
343 sch->qstats.drops++;
344 kfree_skb(skb);
345 return ret;
346 }
347 hash--;
294 348
295 x = q->ht[hash]; 349 x = q->ht[hash];
296 if (x == SFQ_DEPTH) { 350 if (x == SFQ_DEPTH) {
@@ -465,6 +519,8 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
465static void sfq_destroy(struct Qdisc *sch) 519static void sfq_destroy(struct Qdisc *sch)
466{ 520{
467 struct sfq_sched_data *q = qdisc_priv(sch); 521 struct sfq_sched_data *q = qdisc_priv(sch);
522
523 tcf_destroy_chain(q->filter_list);
468 del_timer(&q->perturb_timer); 524 del_timer(&q->perturb_timer);
469} 525}
470 526
@@ -490,9 +546,79 @@ nla_put_failure:
490 return -1; 546 return -1;
491} 547}
492 548
549static int sfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
550 struct nlattr **tca, unsigned long *arg)
551{
552 return -EOPNOTSUPP;
553}
554
555static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
556{
557 return 0;
558}
559
560static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
561{
562 struct sfq_sched_data *q = qdisc_priv(sch);
563
564 if (cl)
565 return NULL;
566 return &q->filter_list;
567}
568
569static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
570 struct sk_buff *skb, struct tcmsg *tcm)
571{
572 tcm->tcm_handle |= TC_H_MIN(cl);
573 return 0;
574}
575
576static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
577 struct gnet_dump *d)
578{
579 struct sfq_sched_data *q = qdisc_priv(sch);
580 sfq_index idx = q->ht[cl-1];
581 struct gnet_stats_queue qs = { .qlen = q->qs[idx].qlen };
582 struct tc_sfq_xstats xstats = { .allot = q->allot[idx] };
583
584 if (gnet_stats_copy_queue(d, &qs) < 0)
585 return -1;
586 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
587}
588
589static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
590{
591 struct sfq_sched_data *q = qdisc_priv(sch);
592 unsigned int i;
593
594 if (arg->stop)
595 return;
596
597 for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
598 if (q->ht[i] == SFQ_DEPTH ||
599 arg->count < arg->skip) {
600 arg->count++;
601 continue;
602 }
603 if (arg->fn(sch, i + 1, arg) < 0) {
604 arg->stop = 1;
605 break;
606 }
607 arg->count++;
608 }
609}
610
611static const struct Qdisc_class_ops sfq_class_ops = {
612 .get = sfq_get,
613 .change = sfq_change_class,
614 .tcf_chain = sfq_find_tcf,
615 .dump = sfq_dump_class,
616 .dump_stats = sfq_dump_class_stats,
617 .walk = sfq_walk,
618};
619
493static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { 620static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
494 .next = NULL, 621 .cl_ops = &sfq_class_ops,
495 .cl_ops = NULL,
496 .id = "sfq", 622 .id = "sfq",
497 .priv_size = sizeof(struct sfq_sched_data), 623 .priv_size = sizeof(struct sfq_sched_data),
498 .enqueue = sfq_enqueue, 624 .enqueue = sfq_enqueue,
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 1411c7b1fbdc..0444fd0f0d22 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -71,7 +71,7 @@ struct teql_sched_data
71 71
72#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next) 72#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
73 73
74#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT|IFF_BROADCAST) 74#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
75 75
76/* "teql*" qdisc routines */ 76/* "teql*" qdisc routines */
77 77
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index dd98763c8b00..77383e9b3988 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2056,7 +2056,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
2056 break; 2056 break;
2057 2057
2058 case SCTP_PARAM_HMAC_ALGO: 2058 case SCTP_PARAM_HMAC_ALGO:
2059 if (!sctp_auth_enable) 2059 if (sctp_auth_enable)
2060 break; 2060 break;
2061 /* Fall Through */ 2061 /* Fall Through */
2062fallthrough: 2062fallthrough:
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 07fad7ccf832..339ca4a8e89e 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1652,7 +1652,7 @@ static int __init x25_init(void)
1652 1652
1653 register_netdevice_notifier(&x25_dev_notifier); 1653 register_netdevice_notifier(&x25_dev_notifier);
1654 1654
1655 printk(KERN_INFO "X.25 for Linux. Version 0.2 for Linux 2.1.15\n"); 1655 printk(KERN_INFO "X.25 for Linux Version 0.2\n");
1656 1656
1657#ifdef CONFIG_SYSCTL 1657#ifdef CONFIG_SYSCTL
1658 x25_register_sysctl(); 1658 x25_register_sysctl();
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index b5c5347aed66..6cc15250de69 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -28,6 +28,105 @@
28 * that instantiated crypto transforms have correct parameters for IPsec 28 * that instantiated crypto transforms have correct parameters for IPsec
29 * purposes. 29 * purposes.
30 */ 30 */
31static struct xfrm_algo_desc aead_list[] = {
32{
33 .name = "rfc4106(gcm(aes))",
34
35 .uinfo = {
36 .aead = {
37 .icv_truncbits = 64,
38 }
39 },
40
41 .desc = {
42 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV8,
43 .sadb_alg_ivlen = 8,
44 .sadb_alg_minbits = 128,
45 .sadb_alg_maxbits = 256
46 }
47},
48{
49 .name = "rfc4106(gcm(aes))",
50
51 .uinfo = {
52 .aead = {
53 .icv_truncbits = 96,
54 }
55 },
56
57 .desc = {
58 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV12,
59 .sadb_alg_ivlen = 8,
60 .sadb_alg_minbits = 128,
61 .sadb_alg_maxbits = 256
62 }
63},
64{
65 .name = "rfc4106(gcm(aes))",
66
67 .uinfo = {
68 .aead = {
69 .icv_truncbits = 128,
70 }
71 },
72
73 .desc = {
74 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV16,
75 .sadb_alg_ivlen = 8,
76 .sadb_alg_minbits = 128,
77 .sadb_alg_maxbits = 256
78 }
79},
80{
81 .name = "rfc4309(ccm(aes))",
82
83 .uinfo = {
84 .aead = {
85 .icv_truncbits = 64,
86 }
87 },
88
89 .desc = {
90 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV8,
91 .sadb_alg_ivlen = 8,
92 .sadb_alg_minbits = 128,
93 .sadb_alg_maxbits = 256
94 }
95},
96{
97 .name = "rfc4309(ccm(aes))",
98
99 .uinfo = {
100 .aead = {
101 .icv_truncbits = 96,
102 }
103 },
104
105 .desc = {
106 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV12,
107 .sadb_alg_ivlen = 8,
108 .sadb_alg_minbits = 128,
109 .sadb_alg_maxbits = 256
110 }
111},
112{
113 .name = "rfc4309(ccm(aes))",
114
115 .uinfo = {
116 .aead = {
117 .icv_truncbits = 128,
118 }
119 },
120
121 .desc = {
122 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV16,
123 .sadb_alg_ivlen = 8,
124 .sadb_alg_minbits = 128,
125 .sadb_alg_maxbits = 256
126 }
127},
128};
129
31static struct xfrm_algo_desc aalg_list[] = { 130static struct xfrm_algo_desc aalg_list[] = {
32{ 131{
33 .name = "hmac(digest_null)", 132 .name = "hmac(digest_null)",
@@ -332,6 +431,11 @@ static struct xfrm_algo_desc calg_list[] = {
332}, 431},
333}; 432};
334 433
434static inline int aead_entries(void)
435{
436 return ARRAY_SIZE(aead_list);
437}
438
335static inline int aalg_entries(void) 439static inline int aalg_entries(void)
336{ 440{
337 return ARRAY_SIZE(aalg_list); 441 return ARRAY_SIZE(aalg_list);
@@ -354,25 +458,32 @@ struct xfrm_algo_list {
354 u32 mask; 458 u32 mask;
355}; 459};
356 460
461static const struct xfrm_algo_list xfrm_aead_list = {
462 .algs = aead_list,
463 .entries = ARRAY_SIZE(aead_list),
464 .type = CRYPTO_ALG_TYPE_AEAD,
465 .mask = CRYPTO_ALG_TYPE_MASK,
466};
467
357static const struct xfrm_algo_list xfrm_aalg_list = { 468static const struct xfrm_algo_list xfrm_aalg_list = {
358 .algs = aalg_list, 469 .algs = aalg_list,
359 .entries = ARRAY_SIZE(aalg_list), 470 .entries = ARRAY_SIZE(aalg_list),
360 .type = CRYPTO_ALG_TYPE_HASH, 471 .type = CRYPTO_ALG_TYPE_HASH,
361 .mask = CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC, 472 .mask = CRYPTO_ALG_TYPE_HASH_MASK,
362}; 473};
363 474
364static const struct xfrm_algo_list xfrm_ealg_list = { 475static const struct xfrm_algo_list xfrm_ealg_list = {
365 .algs = ealg_list, 476 .algs = ealg_list,
366 .entries = ARRAY_SIZE(ealg_list), 477 .entries = ARRAY_SIZE(ealg_list),
367 .type = CRYPTO_ALG_TYPE_BLKCIPHER, 478 .type = CRYPTO_ALG_TYPE_BLKCIPHER,
368 .mask = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC, 479 .mask = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
369}; 480};
370 481
371static const struct xfrm_algo_list xfrm_calg_list = { 482static const struct xfrm_algo_list xfrm_calg_list = {
372 .algs = calg_list, 483 .algs = calg_list,
373 .entries = ARRAY_SIZE(calg_list), 484 .entries = ARRAY_SIZE(calg_list),
374 .type = CRYPTO_ALG_TYPE_COMPRESS, 485 .type = CRYPTO_ALG_TYPE_COMPRESS,
375 .mask = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC, 486 .mask = CRYPTO_ALG_TYPE_MASK,
376}; 487};
377 488
378static struct xfrm_algo_desc *xfrm_find_algo( 489static struct xfrm_algo_desc *xfrm_find_algo(
@@ -461,6 +572,33 @@ struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
461} 572}
462EXPORT_SYMBOL_GPL(xfrm_calg_get_byname); 573EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
463 574
575struct xfrm_aead_name {
576 const char *name;
577 int icvbits;
578};
579
580static int xfrm_aead_name_match(const struct xfrm_algo_desc *entry,
581 const void *data)
582{
583 const struct xfrm_aead_name *aead = data;
584 const char *name = aead->name;
585
586 return aead->icvbits == entry->uinfo.aead.icv_truncbits && name &&
587 !strcmp(name, entry->name);
588}
589
590struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len, int probe)
591{
592 struct xfrm_aead_name data = {
593 .name = name,
594 .icvbits = icv_len,
595 };
596
597 return xfrm_find_algo(&xfrm_aead_list, xfrm_aead_name_match, &data,
598 probe);
599}
600EXPORT_SYMBOL_GPL(xfrm_aead_get_byname);
601
464struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx) 602struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
465{ 603{
466 if (idx >= aalg_entries()) 604 if (idx >= aalg_entries())
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 039e7019c48a..4d6ebc633a94 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -81,7 +81,6 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
81 *seq = *(__be32*)(skb_transport_header(skb) + offset_seq); 81 *seq = *(__be32*)(skb_transport_header(skb) + offset_seq);
82 return 0; 82 return 0;
83} 83}
84EXPORT_SYMBOL(xfrm_parse_spi);
85 84
86int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) 85int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
87{ 86{
@@ -160,12 +159,12 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
160 } 159 }
161 160
162 if ((x->encap ? x->encap->encap_type : 0) != encap_type) { 161 if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
163 XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEINVALID); 162 XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEMISMATCH);
164 goto drop_unlock; 163 goto drop_unlock;
165 } 164 }
166 165
167 if (x->props.replay_window && xfrm_replay_check(x, skb, seq)) { 166 if (x->props.replay_window && xfrm_replay_check(x, skb, seq)) {
168 XFRM_INC_STATS(LINUX_MIB_XFRMINSEQOUTOFWINDOW); 167 XFRM_INC_STATS(LINUX_MIB_XFRMINSTATESEQERROR);
169 goto drop_unlock; 168 goto drop_unlock;
170 } 169 }
171 170
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index f4a1047a5573..fc690368325f 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -64,6 +64,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
64 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 64 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
65 XFRM_SKB_CB(skb)->seq = ++x->replay.oseq; 65 XFRM_SKB_CB(skb)->seq = ++x->replay.oseq;
66 if (unlikely(x->replay.oseq == 0)) { 66 if (unlikely(x->replay.oseq == 0)) {
67 XFRM_INC_STATS(LINUX_MIB_XFRMOUTSTATESEQERROR);
67 x->replay.oseq--; 68 x->replay.oseq--;
68 xfrm_audit_state_replay_overflow(x, skb); 69 xfrm_audit_state_replay_overflow(x, skb);
69 err = -EOVERFLOW; 70 err = -EOVERFLOW;
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 31d035415ecd..2b0db13f0cda 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -22,7 +22,7 @@ static struct snmp_mib xfrm_mib_list[] = {
22 SNMP_MIB_ITEM("XfrmInNoStates", LINUX_MIB_XFRMINNOSTATES), 22 SNMP_MIB_ITEM("XfrmInNoStates", LINUX_MIB_XFRMINNOSTATES),
23 SNMP_MIB_ITEM("XfrmInStateProtoError", LINUX_MIB_XFRMINSTATEPROTOERROR), 23 SNMP_MIB_ITEM("XfrmInStateProtoError", LINUX_MIB_XFRMINSTATEPROTOERROR),
24 SNMP_MIB_ITEM("XfrmInStateModeError", LINUX_MIB_XFRMINSTATEMODEERROR), 24 SNMP_MIB_ITEM("XfrmInStateModeError", LINUX_MIB_XFRMINSTATEMODEERROR),
25 SNMP_MIB_ITEM("XfrmInSeqOutOfWindow", LINUX_MIB_XFRMINSEQOUTOFWINDOW), 25 SNMP_MIB_ITEM("XfrmInStateSeqError", LINUX_MIB_XFRMINSTATESEQERROR),
26 SNMP_MIB_ITEM("XfrmInStateExpired", LINUX_MIB_XFRMINSTATEEXPIRED), 26 SNMP_MIB_ITEM("XfrmInStateExpired", LINUX_MIB_XFRMINSTATEEXPIRED),
27 SNMP_MIB_ITEM("XfrmInStateMismatch", LINUX_MIB_XFRMINSTATEMISMATCH), 27 SNMP_MIB_ITEM("XfrmInStateMismatch", LINUX_MIB_XFRMINSTATEMISMATCH),
28 SNMP_MIB_ITEM("XfrmInStateInvalid", LINUX_MIB_XFRMINSTATEINVALID), 28 SNMP_MIB_ITEM("XfrmInStateInvalid", LINUX_MIB_XFRMINSTATEINVALID),
@@ -36,6 +36,7 @@ static struct snmp_mib xfrm_mib_list[] = {
36 SNMP_MIB_ITEM("XfrmOutNoStates", LINUX_MIB_XFRMOUTNOSTATES), 36 SNMP_MIB_ITEM("XfrmOutNoStates", LINUX_MIB_XFRMOUTNOSTATES),
37 SNMP_MIB_ITEM("XfrmOutStateProtoError", LINUX_MIB_XFRMOUTSTATEPROTOERROR), 37 SNMP_MIB_ITEM("XfrmOutStateProtoError", LINUX_MIB_XFRMOUTSTATEPROTOERROR),
38 SNMP_MIB_ITEM("XfrmOutStateModeError", LINUX_MIB_XFRMOUTSTATEMODEERROR), 38 SNMP_MIB_ITEM("XfrmOutStateModeError", LINUX_MIB_XFRMOUTSTATEMODEERROR),
39 SNMP_MIB_ITEM("XfrmOutStateSeqError", LINUX_MIB_XFRMOUTSTATESEQERROR),
39 SNMP_MIB_ITEM("XfrmOutStateExpired", LINUX_MIB_XFRMOUTSTATEEXPIRED), 40 SNMP_MIB_ITEM("XfrmOutStateExpired", LINUX_MIB_XFRMOUTSTATEEXPIRED),
40 SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK), 41 SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK),
41 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD), 42 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD),
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 3003503d0c94..3ff76e84d548 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -216,10 +216,10 @@ static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
216 write_unlock_bh(&xfrm_state_afinfo_lock); 216 write_unlock_bh(&xfrm_state_afinfo_lock);
217} 217}
218 218
219int xfrm_register_type(struct xfrm_type *type, unsigned short family) 219int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
220{ 220{
221 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); 221 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
222 struct xfrm_type **typemap; 222 const struct xfrm_type **typemap;
223 int err = 0; 223 int err = 0;
224 224
225 if (unlikely(afinfo == NULL)) 225 if (unlikely(afinfo == NULL))
@@ -235,10 +235,10 @@ int xfrm_register_type(struct xfrm_type *type, unsigned short family)
235} 235}
236EXPORT_SYMBOL(xfrm_register_type); 236EXPORT_SYMBOL(xfrm_register_type);
237 237
238int xfrm_unregister_type(struct xfrm_type *type, unsigned short family) 238int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
239{ 239{
240 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); 240 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
241 struct xfrm_type **typemap; 241 const struct xfrm_type **typemap;
242 int err = 0; 242 int err = 0;
243 243
244 if (unlikely(afinfo == NULL)) 244 if (unlikely(afinfo == NULL))
@@ -254,11 +254,11 @@ int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
254} 254}
255EXPORT_SYMBOL(xfrm_unregister_type); 255EXPORT_SYMBOL(xfrm_unregister_type);
256 256
257static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 257static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
258{ 258{
259 struct xfrm_state_afinfo *afinfo; 259 struct xfrm_state_afinfo *afinfo;
260 struct xfrm_type **typemap; 260 const struct xfrm_type **typemap;
261 struct xfrm_type *type; 261 const struct xfrm_type *type;
262 int modload_attempted = 0; 262 int modload_attempted = 0;
263 263
264retry: 264retry:
@@ -281,7 +281,7 @@ retry:
281 return type; 281 return type;
282} 282}
283 283
284static void xfrm_put_type(struct xfrm_type *type) 284static void xfrm_put_type(const struct xfrm_type *type)
285{ 285{
286 module_put(type->owner); 286 module_put(type->owner);
287} 287}
@@ -1645,7 +1645,6 @@ err:
1645 xfrm_audit_state_replay(x, skb, net_seq); 1645 xfrm_audit_state_replay(x, skb, net_seq);
1646 return -EINVAL; 1646 return -EINVAL;
1647} 1647}
1648EXPORT_SYMBOL(xfrm_replay_check);
1649 1648
1650void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq) 1649void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1651{ 1650{
@@ -1667,7 +1666,6 @@ void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1667 if (xfrm_aevent_is_on()) 1666 if (xfrm_aevent_is_on())
1668 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 1667 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1669} 1668}
1670EXPORT_SYMBOL(xfrm_replay_advance);
1671 1669
1672static LIST_HEAD(xfrm_km_list); 1670static LIST_HEAD(xfrm_km_list);
1673static DEFINE_RWLOCK(xfrm_km_lock); 1671static DEFINE_RWLOCK(xfrm_km_lock);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index e0ccdf267813..78338079b7f5 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -31,6 +31,11 @@
31#include <linux/in6.h> 31#include <linux/in6.h>
32#endif 32#endif
33 33
34static inline int aead_len(struct xfrm_algo_aead *alg)
35{
36 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
37}
38
34static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) 39static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
35{ 40{
36 struct nlattr *rt = attrs[type]; 41 struct nlattr *rt = attrs[type];
@@ -68,6 +73,22 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
68 return 0; 73 return 0;
69} 74}
70 75
76static int verify_aead(struct nlattr **attrs)
77{
78 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
79 struct xfrm_algo_aead *algp;
80
81 if (!rt)
82 return 0;
83
84 algp = nla_data(rt);
85 if (nla_len(rt) < aead_len(algp))
86 return -EINVAL;
87
88 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
89 return 0;
90}
91
71static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, 92static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
72 xfrm_address_t **addrp) 93 xfrm_address_t **addrp)
73{ 94{
@@ -119,20 +140,28 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
119 switch (p->id.proto) { 140 switch (p->id.proto) {
120 case IPPROTO_AH: 141 case IPPROTO_AH:
121 if (!attrs[XFRMA_ALG_AUTH] || 142 if (!attrs[XFRMA_ALG_AUTH] ||
143 attrs[XFRMA_ALG_AEAD] ||
122 attrs[XFRMA_ALG_CRYPT] || 144 attrs[XFRMA_ALG_CRYPT] ||
123 attrs[XFRMA_ALG_COMP]) 145 attrs[XFRMA_ALG_COMP])
124 goto out; 146 goto out;
125 break; 147 break;
126 148
127 case IPPROTO_ESP: 149 case IPPROTO_ESP:
128 if ((!attrs[XFRMA_ALG_AUTH] && 150 if (attrs[XFRMA_ALG_COMP])
129 !attrs[XFRMA_ALG_CRYPT]) || 151 goto out;
130 attrs[XFRMA_ALG_COMP]) 152 if (!attrs[XFRMA_ALG_AUTH] &&
153 !attrs[XFRMA_ALG_CRYPT] &&
154 !attrs[XFRMA_ALG_AEAD])
155 goto out;
156 if ((attrs[XFRMA_ALG_AUTH] ||
157 attrs[XFRMA_ALG_CRYPT]) &&
158 attrs[XFRMA_ALG_AEAD])
131 goto out; 159 goto out;
132 break; 160 break;
133 161
134 case IPPROTO_COMP: 162 case IPPROTO_COMP:
135 if (!attrs[XFRMA_ALG_COMP] || 163 if (!attrs[XFRMA_ALG_COMP] ||
164 attrs[XFRMA_ALG_AEAD] ||
136 attrs[XFRMA_ALG_AUTH] || 165 attrs[XFRMA_ALG_AUTH] ||
137 attrs[XFRMA_ALG_CRYPT]) 166 attrs[XFRMA_ALG_CRYPT])
138 goto out; 167 goto out;
@@ -143,6 +172,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
143 case IPPROTO_ROUTING: 172 case IPPROTO_ROUTING:
144 if (attrs[XFRMA_ALG_COMP] || 173 if (attrs[XFRMA_ALG_COMP] ||
145 attrs[XFRMA_ALG_AUTH] || 174 attrs[XFRMA_ALG_AUTH] ||
175 attrs[XFRMA_ALG_AEAD] ||
146 attrs[XFRMA_ALG_CRYPT] || 176 attrs[XFRMA_ALG_CRYPT] ||
147 attrs[XFRMA_ENCAP] || 177 attrs[XFRMA_ENCAP] ||
148 attrs[XFRMA_SEC_CTX] || 178 attrs[XFRMA_SEC_CTX] ||
@@ -155,6 +185,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
155 goto out; 185 goto out;
156 } 186 }
157 187
188 if ((err = verify_aead(attrs)))
189 goto out;
158 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH))) 190 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
159 goto out; 191 goto out;
160 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT))) 192 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
@@ -208,6 +240,31 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
208 return 0; 240 return 0;
209} 241}
210 242
243static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
244 struct nlattr *rta)
245{
246 struct xfrm_algo_aead *p, *ualg;
247 struct xfrm_algo_desc *algo;
248
249 if (!rta)
250 return 0;
251
252 ualg = nla_data(rta);
253
254 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
255 if (!algo)
256 return -ENOSYS;
257 *props = algo->desc.sadb_alg_id;
258
259 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
260 if (!p)
261 return -ENOMEM;
262
263 strcpy(p->alg_name, algo->name);
264 *algpp = p;
265 return 0;
266}
267
211static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 268static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
212{ 269{
213 int len = 0; 270 int len = 0;
@@ -286,6 +343,9 @@ static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
286 343
287 copy_from_user_state(x, p); 344 copy_from_user_state(x, p);
288 345
346 if ((err = attach_aead(&x->aead, &x->props.ealgo,
347 attrs[XFRMA_ALG_AEAD])))
348 goto error;
289 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo, 349 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
290 xfrm_aalg_get_byname, 350 xfrm_aalg_get_byname,
291 attrs[XFRMA_ALG_AUTH]))) 351 attrs[XFRMA_ALG_AUTH])))
@@ -510,6 +570,8 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
510 if (x->lastused) 570 if (x->lastused)
511 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); 571 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
512 572
573 if (x->aead)
574 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
513 if (x->aalg) 575 if (x->aalg)
514 NLA_PUT(skb, XFRMA_ALG_AUTH, xfrm_alg_len(x->aalg), x->aalg); 576 NLA_PUT(skb, XFRMA_ALG_AUTH, xfrm_alg_len(x->aalg), x->aalg);
515 if (x->ealg) 577 if (x->ealg)
@@ -1808,6 +1870,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1808#undef XMSGSIZE 1870#undef XMSGSIZE
1809 1871
1810static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 1872static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
1873 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
1811 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 1874 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
1812 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 1875 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
1813 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, 1876 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
@@ -1972,6 +2035,8 @@ static int xfrm_notify_sa_flush(struct km_event *c)
1972static inline size_t xfrm_sa_len(struct xfrm_state *x) 2035static inline size_t xfrm_sa_len(struct xfrm_state *x)
1973{ 2036{
1974 size_t l = 0; 2037 size_t l = 0;
2038 if (x->aead)
2039 l += nla_total_size(aead_len(x->aead));
1975 if (x->aalg) 2040 if (x->aalg)
1976 l += nla_total_size(xfrm_alg_len(x->aalg)); 2041 l += nla_total_size(xfrm_alg_len(x->aalg));
1977 if (x->ealg) 2042 if (x->ealg)