aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt27
-rw-r--r--Documentation/networking/ip-sysctl.txt3
-rw-r--r--Documentation/networking/l2tp.txt169
-rw-r--r--Documentation/networking/multiqueue.txt111
-rw-r--r--Documentation/networking/netdevices.txt38
-rw-r--r--MAINTAINERS5
-rw-r--r--arch/ppc/8260_io/enet.c4
-rw-r--r--arch/ppc/8260_io/fcc_enet.c4
-rw-r--r--arch/ppc/8xx_io/enet.c4
-rw-r--r--arch/ppc/8xx_io/fec.c2
-rw-r--r--drivers/bluetooth/hci_usb.c88
-rw-r--r--drivers/bluetooth/hci_usb.h5
-rw-r--r--drivers/bluetooth/hci_vhci.c6
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/7990.c4
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig21
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/a2065.c4
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/bnx2.c503
-rw-r--r--drivers/net/bnx2.h66
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/dummy.c82
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/ifb.c78
-rw-r--r--drivers/net/irda/kingsun-sir.c4
-rw-r--r--drivers/net/irda/vlsi_ir.c27
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c4
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/pppol2tp.c2486
-rw-r--r--drivers/net/saa9730.c4
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sun3lance.c5
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sunlance.c4
-rw-r--r--drivers/net/sunqe.c4
-rw-r--r--drivers/net/tg3.c140
-rw-r--r--drivers/net/tg3.h9
-rw-r--r--drivers/net/tulip/interrupt.c8
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c4
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/via-rhine.c4
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/etherdevice.h9
-rw-r--r--include/linux/if_link.h47
-rw-r--r--include/linux/if_ppp.h16
-rw-r--r--include/linux/if_pppol2tp.h69
-rw-r--r--include/linux/if_pppox.h16
-rw-r--r--include/linux/if_tun.h2
-rw-r--r--include/linux/if_vlan.h12
-rw-r--r--include/linux/ip_mp_alg.h22
-rw-r--r--include/linux/ipv6.h10
-rw-r--r--include/linux/irda.h28
-rw-r--r--include/linux/ktime.h10
-rw-r--r--include/linux/netdevice.h143
-rw-r--r--include/linux/netfilter.h3
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h2
-rw-r--r--include/linux/netfilter/x_tables.h36
-rw-r--r--include/linux/netfilter/xt_u32.h40
-rw-r--r--include/linux/netfilter_ipv4/ipt_CLUSTERIP.h4
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h10
-rw-r--r--include/linux/pkt_cls.h17
-rw-r--r--include/linux/pkt_sched.h9
-rw-r--r--include/linux/rtnetlink.h20
-rw-r--r--include/linux/skbuff.h65
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/udp.h6
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/bluetooth/hci.h18
-rw-r--r--include/net/bluetooth/hci_core.h5
-rw-r--r--include/net/bluetooth/rfcomm.h1
-rw-r--r--include/net/dn.h1
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/flow.h4
-rw-r--r--include/net/ip_fib.h16
-rw-r--r--include/net/ip_mp_alg.h96
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/irda/irda.h3
-rw-r--r--include/net/irda/irlap.h2
-rw-r--r--include/net/mip6.h4
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h23
-rw-r--r--include/net/netfilter/nf_conntrack.h66
-rw-r--r--include/net/netfilter/nf_conntrack_core.h11
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h17
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h42
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h85
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h16
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h2
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h78
-rw-r--r--include/net/netfilter/nf_nat.h28
-rw-r--r--include/net/netfilter/nf_nat_core.h1
-rw-r--r--include/net/netlink.h84
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/rawv6.h9
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/rtnetlink.h58
-rw-r--r--include/net/tipc/tipc_port.h6
-rw-r--r--include/net/xfrm.h21
-rw-r--r--net/802/tr.c2
-rw-r--r--net/8021q/Makefile2
-rw-r--r--net/8021q/vlan.c495
-rw-r--r--net/8021q/vlan.h24
-rw-r--r--net/8021q/vlan_dev.c217
-rw-r--r--net/8021q/vlan_netlink.c243
-rw-r--r--net/8021q/vlanproc.c6
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/appletalk/atalk_proc.c6
-rw-r--r--net/atm/br2684.c24
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/mpoa_proc.c2
-rw-r--r--net/atm/proc.c8
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/ax25/ax25_uid.c2
-rw-r--r--net/bluetooth/hci_conn.c51
-rw-r--r--net/bluetooth/hci_core.c100
-rw-r--r--net/bluetooth/hci_event.c29
-rw-r--r--net/bluetooth/rfcomm/tty.c34
-rw-r--r--net/bridge/br_if.c10
-rw-r--r--net/core/dev.c270
-rw-r--r--net/core/dev_mcast.c129
-rw-r--r--net/core/gen_estimator.c3
-rw-r--r--net/core/netpoll.c18
-rw-r--r--net/core/pktgen.c249
-rw-r--r--net/core/rtnetlink.c470
-rw-r--r--net/core/skbuff.c13
-rw-r--r--net/core/sock.c42
-rw-r--r--net/dccp/ccids/ccid3.c212
-rw-r--r--net/dccp/ccids/ccid3.h5
-rw-r--r--net/dccp/ccids/lib/loss_interval.c246
-rw-r--r--net/dccp/ccids/lib/loss_interval.h46
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/ipv6.c20
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_dev.c5
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ipv4/Kconfig42
-rw-r--r--net/ipv4/Makefile5
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/ah4.c1
-rw-r--r--net/ipv4/esp4.c1
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c16
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/ipcomp.c1
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c2
-rw-r--r--net/ipv4/multipath.c55
-rw-r--r--net/ipv4/multipath_drr.c249
-rw-r--r--net/ipv4/multipath_random.c114
-rw-r--r--net/ipv4/multipath_rr.c95
-rw-r--r--net/ipv4/multipath_wrandom.c329
-rw-r--r--net/ipv4/netfilter/Kconfig2
-rw-r--r--net/ipv4/netfilter/arp_tables.c6
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c10
-rw-r--r--net/ipv4/netfilter/ip_tables.c177
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c118
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c36
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c56
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c30
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c23
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c20
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c30
-rw-r--r--net/ipv4/netfilter/ipt_SAME.c69
-rw-r--r--net/ipv4/netfilter/ipt_TOS.c8
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c14
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c68
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c14
-rw-r--r--net/ipv4/netfilter/ipt_ah.c25
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c59
-rw-r--r--net/ipv4/netfilter/ipt_iprange.c50
-rw-r--r--net/ipv4/netfilter/ipt_owner.c20
-rw-r--r--net/ipv4/netfilter/ipt_recent.c45
-rw-r--r--net/ipv4/netfilter/ipt_tos.c6
-rw-r--r--net/ipv4/netfilter/ipt_ttl.c26
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c36
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c112
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c26
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c127
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c121
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c59
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c43
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c48
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c47
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c2
-rw-r--r--net/ipv4/route.c261
-rw-r--r--net/ipv4/tcp_ipv4.c19
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv4/tcp_probe.c194
-rw-r--r--net/ipv4/udp.c140
-rw-r--r--net/ipv4/xfrm4_input.c114
-rw-r--r--net/ipv4/xfrm4_tunnel.c1
-rw-r--r--net/ipv6/Kconfig2
-rw-r--r--net/ipv6/Makefile2
-rw-r--r--net/ipv6/addrconf.c9
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/ah6.c13
-rw-r--r--net/ipv6/anycast.c2
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/esp6.c1
-rw-r--r--net/ipv6/exthdrs.c140
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c15
-rw-r--r--net/ipv6/mcast.c4
-rw-r--r--net/ipv6/mip6.c24
-rw-r--r--net/ipv6/netfilter/ip6_tables.c200
-rw-r--r--net/ipv6/netfilter/ip6t_HL.c14
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c57
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c45
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c82
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c20
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c111
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c88
-rw-r--r--net/ipv6/netfilter/ip6t_hl.c22
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c22
-rw-r--r--net/ipv6/netfilter/ip6t_mh.c30
-rw-r--r--net/ipv6/netfilter/ip6t_owner.c26
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c134
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c6
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c16
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c26
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c52
-rw-r--r--net/ipv6/raw.c40
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/tcp_ipv6.c20
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/ipv6/xfrm6_state.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c1
-rw-r--r--net/ipx/ipx_proc.c6
-rw-r--r--net/irda/Makefile2
-rw-r--r--net/irda/discovery.c2
-rw-r--r--net/irda/ircomm/ircomm_core.c2
-rw-r--r--net/irda/iriap.c2
-rw-r--r--net/irda/irlan/irlan_common.c2
-rw-r--r--net/irda/irlap.c2
-rw-r--r--net/irda/irlap_frame.c7
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/irda/irmod.c48
-rw-r--r--net/irda/irnetlink.c170
-rw-r--r--net/irda/irttp.c31
-rw-r--r--net/llc/llc_proc.c4
-rw-r--r--net/mac80211/ieee80211_ioctl.c25
-rw-r--r--net/mac80211/rc80211_simple.c12
-rw-r--r--net/netfilter/Kconfig25
-rw-r--r--net/netfilter/Makefile4
-rw-r--r--net/netfilter/core.c10
-rw-r--r--net/netfilter/nf_conntrack_amanda.c17
-rw-r--r--net/netfilter/nf_conntrack_core.c513
-rw-r--r--net/netfilter/nf_conntrack_ecache.c16
-rw-r--r--net/netfilter/nf_conntrack_expect.c367
-rw-r--r--net/netfilter/nf_conntrack_extend.c195
-rw-r--r--net/netfilter/nf_conntrack_ftp.c143
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c18
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c307
-rw-r--r--net/netfilter/nf_conntrack_helper.c131
-rw-r--r--net/netfilter/nf_conntrack_irc.c39
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c13
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c12
-rw-r--r--net/netfilter/nf_conntrack_netlink.c182
-rw-r--r--net/netfilter/nf_conntrack_pptp.c120
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c28
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c95
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c129
-rw-r--r--net/netfilter/nf_conntrack_sane.c45
-rw-r--r--net/netfilter/nf_conntrack_sip.c37
-rw-r--r--net/netfilter/nf_conntrack_standalone.c47
-rw-r--r--net/netfilter/nf_conntrack_tftp.c32
-rw-r--r--net/netfilter/nf_log.c2
-rw-r--r--net/netfilter/nf_queue.c59
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c6
-rw-r--r--net/netfilter/x_tables.c11
-rw-r--r--net/netfilter/xt_CLASSIFY.c2
-rw-r--r--net/netfilter/xt_CONNMARK.c18
-rw-r--r--net/netfilter/xt_CONNSECMARK.c18
-rw-r--r--net/netfilter/xt_DSCP.c18
-rw-r--r--net/netfilter/xt_MARK.c24
-rw-r--r--net/netfilter/xt_NFLOG.c12
-rw-r--r--net/netfilter/xt_NFQUEUE.c2
-rw-r--r--net/netfilter/xt_NOTRACK.c2
-rw-r--r--net/netfilter/xt_SECMARK.c26
-rw-r--r--net/netfilter/xt_TCPMSS.c28
-rw-r--r--net/netfilter/xt_TRACE.c53
-rw-r--r--net/netfilter/xt_comment.c8
-rw-r--r--net/netfilter/xt_connbytes.c32
-rw-r--r--net/netfilter/xt_connmark.c26
-rw-r--r--net/netfilter/xt_conntrack.c42
-rw-r--r--net/netfilter/xt_dccp.c50
-rw-r--r--net/netfilter/xt_dscp.c48
-rw-r--r--net/netfilter/xt_esp.c24
-rw-r--r--net/netfilter/xt_hashlimit.c65
-rw-r--r--net/netfilter/xt_helper.c61
-rw-r--r--net/netfilter/xt_length.c14
-rw-r--r--net/netfilter/xt_limit.c23
-rw-r--r--net/netfilter/xt_mac.c16
-rw-r--r--net/netfilter/xt_mark.c16
-rw-r--r--net/netfilter/xt_multiport.c54
-rw-r--r--net/netfilter/xt_physdev.c48
-rw-r--r--net/netfilter/xt_pkttype.c10
-rw-r--r--net/netfilter/xt_policy.c50
-rw-r--r--net/netfilter/xt_quota.c21
-rw-r--r--net/netfilter/xt_realm.c8
-rw-r--r--net/netfilter/xt_sctp.c61
-rw-r--r--net/netfilter/xt_state.c20
-rw-r--r--net/netfilter/xt_statistic.c20
-rw-r--r--net/netfilter/xt_string.c38
-rw-r--r--net/netfilter/xt_tcpmss.c10
-rw-r--r--net/netfilter/xt_tcpudp.c63
-rw-r--r--net/netfilter/xt_u32.c135
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netlink/attr.c11
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/netrom/nr_route.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/rose/rose_route.c6
-rw-r--r--net/rxrpc/ar-proc.c52
-rw-r--r--net/sched/Kconfig23
-rw-r--r--net/sched/act_api.c23
-rw-r--r--net/sched/act_gact.c11
-rw-r--r--net/sched/act_ipt.c12
-rw-r--r--net/sched/act_mirred.c12
-rw-r--r--net/sched/act_pedit.c11
-rw-r--r--net/sched/act_police.c36
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/cls_api.c10
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_fw.c19
-rw-r--r--net/sched/cls_route.c20
-rw-r--r--net/sched/cls_rsvp.c17
-rw-r--r--net/sched/cls_rsvp6.c16
-rw-r--r--net/sched/cls_tcindex.c3
-rw-r--r--net/sched/cls_u32.c18
-rw-r--r--net/sched/em_cmp.c1
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/em_text.c2
-rw-r--r--net/sched/em_u32.c2
-rw-r--r--net/sched/ematch.c15
-rw-r--r--net/sched/sch_api.c18
-rw-r--r--net/sched/sch_atm.c4
-rw-r--r--net/sched/sch_blackhole.c1
-rw-r--r--net/sched/sch_cbq.c34
-rw-r--r--net/sched/sch_dsmark.c1
-rw-r--r--net/sched/sch_fifo.c1
-rw-r--r--net/sched/sch_generic.c219
-rw-r--r--net/sched/sch_gred.c1
-rw-r--r--net/sched/sch_hfsc.c21
-rw-r--r--net/sched/sch_htb.c123
-rw-r--r--net/sched/sch_ingress.c9
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_prio.c145
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfq.c18
-rw-r--r--net/sched/sch_tbf.c19
-rw-r--r--net/sched/sch_teql.c24
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/tipc/eth_media.c11
-rw-r--r--net/tipc/link.c16
-rw-r--r--net/tipc/port.c10
-rw-r--r--net/tipc/port.h6
-rw-r--r--net/tipc/socket.c80
-rw-r--r--net/unix/af_unix.c8
-rw-r--r--net/unix/garbage.c325
-rw-r--r--net/wanrouter/wanproc.c4
-rw-r--r--net/x25/x25_proc.c6
-rw-r--r--net/xfrm/xfrm_state.c31
412 files changed, 11191 insertions, 7282 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 281458b47d75..0599a0c7c026 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -262,25 +262,6 @@ Who: Richard Purdie <rpurdie@rpsys.net>
262 262
263--------------------------- 263---------------------------
264 264
265What: Multipath cached routing support in ipv4
266When: in 2.6.23
267Why: Code was merged, then submitter immediately disappeared leaving
268 us with no maintainer and lots of bugs. The code should not have
269 been merged in the first place, and many aspects of it's
270 implementation are blocking more critical core networking
271 development. It's marked EXPERIMENTAL and no distribution
272 enables it because it cause obscure crashes due to unfixable bugs
273 (interfaces don't return errors so memory allocation can't be
274 handled, calling contexts of these interfaces make handling
275 errors impossible too because they get called after we've
276 totally commited to creating a route object, for example).
277 This problem has existed for years and no forward progress
278 has ever been made, and nobody steps up to try and salvage
279 this code, so we're going to finally just get rid of it.
280Who: David S. Miller <davem@davemloft.net>
281
282---------------------------
283
284What: read_dev_chars(), read_conf_data{,_lpm}() (s390 common I/O layer) 265What: read_dev_chars(), read_conf_data{,_lpm}() (s390 common I/O layer)
285When: December 2007 266When: December 2007
286Why: These functions are a leftover from 2.4 times. They have several 267Why: These functions are a leftover from 2.4 times. They have several
@@ -337,3 +318,11 @@ Who: Jean Delvare <khali@linux-fr.org>
337 318
338--------------------------- 319---------------------------
339 320
321What: iptables SAME target
322When: 1.1. 2008
323Files: net/ipv4/netfilter/ipt_SAME.c, include/linux/netfilter_ipv4/ipt_SAME.h
324Why: Obsolete for multiple years now, NAT core provides the same behaviour.
325 Unfixable broken wrt. 32/64 bit cleanness.
326Who: Patrick McHardy <kaber@trash.net>
327
328---------------------------
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index af6a63ab9026..09c184e41cf8 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -874,8 +874,7 @@ accept_redirects - BOOLEAN
874accept_source_route - INTEGER 874accept_source_route - INTEGER
875 Accept source routing (routing extension header). 875 Accept source routing (routing extension header).
876 876
877 > 0: Accept routing header. 877 >= 0: Accept only routing header type 2.
878 = 0: Accept only routing header type 2.
879 < 0: Do not accept routing header. 878 < 0: Do not accept routing header.
880 879
881 Default: 0 880 Default: 0
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
new file mode 100644
index 000000000000..2451f551c505
--- /dev/null
+++ b/Documentation/networking/l2tp.txt
@@ -0,0 +1,169 @@
1This brief document describes how to use the kernel's PPPoL2TP driver
2to provide L2TP functionality. L2TP is a protocol that tunnels one or
3more PPP sessions over a UDP tunnel. It is commonly used for VPNs
4(L2TP/IPSec) and by ISPs to tunnel subscriber PPP sessions over an IP
5network infrastructure.
6
7Design
8======
9
10The PPPoL2TP driver, drivers/net/pppol2tp.c, provides a mechanism by
11which PPP frames carried through an L2TP session are passed through
12the kernel's PPP subsystem. The standard PPP daemon, pppd, handles all
13PPP interaction with the peer. PPP network interfaces are created for
14each local PPP endpoint.
15
16The L2TP protocol http://www.faqs.org/rfcs/rfc2661.html defines L2TP
17control and data frames. L2TP control frames carry messages between
18L2TP clients/servers and are used to setup / teardown tunnels and
19sessions. An L2TP client or server is implemented in userspace and
20will use a regular UDP socket per tunnel. L2TP data frames carry PPP
21frames, which may be PPP control or PPP data. The kernel's PPP
22subsystem arranges for PPP control frames to be delivered to pppd,
23while data frames are forwarded as usual.
24
25Each tunnel and session within a tunnel is assigned a unique tunnel_id
26and session_id. These ids are carried in the L2TP header of every
27control and data packet. The pppol2tp driver uses them to lookup
28internal tunnel and/or session contexts. Zero tunnel / session ids are
29treated specially - zero ids are never assigned to tunnels or sessions
30in the network. In the driver, the tunnel context keeps a pointer to
31the tunnel UDP socket. The session context keeps a pointer to the
32PPPoL2TP socket, as well as other data that lets the driver interface
33to the kernel PPP subsystem.
34
35Note that the pppol2tp kernel driver handles only L2TP data frames;
36L2TP control frames are simply passed up to userspace in the UDP
37tunnel socket. The kernel handles all datapath aspects of the
38protocol, including data packet resequencing (if enabled).
39
40There are a number of requirements on the userspace L2TP daemon in
41order to use the pppol2tp driver.
42
431. Use a UDP socket per tunnel.
44
452. Create a single PPPoL2TP socket per tunnel bound to a special null
46 session id. This is used only for communicating with the driver but
47 must remain open while the tunnel is active. Opening this tunnel
48 management socket causes the driver to mark the tunnel socket as an
49 L2TP UDP encapsulation socket and flags it for use by the
50 referenced tunnel id. This hooks up the UDP receive path via
51 udp_encap_rcv() in net/ipv4/udp.c. PPP data frames are never passed
52 in this special PPPoX socket.
53
543. Create a PPPoL2TP socket per L2TP session. This is typically done
55 by starting pppd with the pppol2tp plugin and appropriate
56 arguments. A PPPoL2TP tunnel management socket (Step 2) must be
57 created before the first PPPoL2TP session socket is created.
58
59When creating PPPoL2TP sockets, the application provides information
60to the driver about the socket in a socket connect() call. Source and
61destination tunnel and session ids are provided, as well as the file
62descriptor of a UDP socket. See struct pppol2tp_addr in
63include/linux/if_ppp.h. Note that zero tunnel / session ids are
64treated specially. When creating the per-tunnel PPPoL2TP management
65socket in Step 2 above, zero source and destination session ids are
66specified, which tells the driver to prepare the supplied UDP file
67descriptor for use as an L2TP tunnel socket.
68
69Userspace may control behavior of the tunnel or session using
70setsockopt and ioctl on the PPPoX socket. The following socket
71options are supported:-
72
73DEBUG - bitmask of debug message categories. See below.
74SENDSEQ - 0 => don't send packets with sequence numbers
75 1 => send packets with sequence numbers
76RECVSEQ - 0 => receive packet sequence numbers are optional
77 1 => drop receive packets without sequence numbers
78LNSMODE - 0 => act as LAC.
79 1 => act as LNS.
80REORDERTO - reorder timeout (in millisecs). If 0, don't try to reorder.
81
82Only the DEBUG option is supported by the special tunnel management
83PPPoX socket.
84
85In addition to the standard PPP ioctls, a PPPIOCGL2TPSTATS is provided
86to retrieve tunnel and session statistics from the kernel using the
87PPPoX socket of the appropriate tunnel or session.
88
89Debugging
90=========
91
92The driver supports a flexible debug scheme where kernel trace
93messages may be optionally enabled per tunnel and per session. Care is
94needed when debugging a live system since the messages are not
95rate-limited and a busy system could be swamped. Userspace uses
96setsockopt on the PPPoX socket to set a debug mask.
97
98The following debug mask bits are available:
99
100PPPOL2TP_MSG_DEBUG verbose debug (if compiled in)
101PPPOL2TP_MSG_CONTROL userspace - kernel interface
102PPPOL2TP_MSG_SEQ sequence numbers handling
103PPPOL2TP_MSG_DATA data packets
104
105Sample Userspace Code
106=====================
107
1081. Create tunnel management PPPoX socket
109
110 kernel_fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
111 if (kernel_fd >= 0) {
112 struct sockaddr_pppol2tp sax;
113 struct sockaddr_in const *peer_addr;
114
115 peer_addr = l2tp_tunnel_get_peer_addr(tunnel);
116 memset(&sax, 0, sizeof(sax));
117 sax.sa_family = AF_PPPOX;
118 sax.sa_protocol = PX_PROTO_OL2TP;
119 sax.pppol2tp.fd = udp_fd; /* fd of tunnel UDP socket */
120 sax.pppol2tp.addr.sin_addr.s_addr = peer_addr->sin_addr.s_addr;
121 sax.pppol2tp.addr.sin_port = peer_addr->sin_port;
122 sax.pppol2tp.addr.sin_family = AF_INET;
123 sax.pppol2tp.s_tunnel = tunnel_id;
124 sax.pppol2tp.s_session = 0; /* special case: mgmt socket */
125 sax.pppol2tp.d_tunnel = 0;
126 sax.pppol2tp.d_session = 0; /* special case: mgmt socket */
127
128 if(connect(kernel_fd, (struct sockaddr *)&sax, sizeof(sax) ) < 0 ) {
129 perror("connect failed");
130 result = -errno;
131 goto err;
132 }
133 }
134
1352. Create session PPPoX data socket
136
137 struct sockaddr_pppol2tp sax;
138 int fd;
139
140 /* Note, the target socket must be bound already, else it will not be ready */
141 sax.sa_family = AF_PPPOX;
142 sax.sa_protocol = PX_PROTO_OL2TP;
143 sax.pppol2tp.fd = tunnel_fd;
144 sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
145 sax.pppol2tp.addr.sin_port = addr->sin_port;
146 sax.pppol2tp.addr.sin_family = AF_INET;
147 sax.pppol2tp.s_tunnel = tunnel_id;
148 sax.pppol2tp.s_session = session_id;
149 sax.pppol2tp.d_tunnel = peer_tunnel_id;
150 sax.pppol2tp.d_session = peer_session_id;
151
152 /* session_fd is the fd of the session's PPPoL2TP socket.
153 * tunnel_fd is the fd of the tunnel UDP socket.
154 */
155 fd = connect(session_fd, (struct sockaddr *)&sax, sizeof(sax));
156 if (fd < 0 ) {
157 return -errno;
158 }
159 return 0;
160
161Miscellanous
162============
163
164The PPPoL2TP driver was developed as part of the OpenL2TP project by
165Katalix Systems Ltd. OpenL2TP is a full-featured L2TP client / server,
166designed from the ground up to have the L2TP datapath in the
167kernel. The project also implemented the pppol2tp plugin for pppd
168which allows pppd to use the kernel driver. Details can be found at
169http://openl2tp.sourceforge.net.
diff --git a/Documentation/networking/multiqueue.txt b/Documentation/networking/multiqueue.txt
new file mode 100644
index 000000000000..00b60cce2224
--- /dev/null
+++ b/Documentation/networking/multiqueue.txt
@@ -0,0 +1,111 @@
1
2 HOWTO for multiqueue network device support
3 ===========================================
4
5Section 1: Base driver requirements for implementing multiqueue support
6Section 2: Qdisc support for multiqueue devices
7Section 3: Brief howto using PRIO or RR for multiqueue devices
8
9
10Intro: Kernel support for multiqueue devices
11---------------------------------------------------------
12
13Kernel support for multiqueue devices is only an API that is presented to the
14netdevice layer for base drivers to implement. This feature is part of the
15core networking stack, and all network devices will be running on the
16multiqueue-aware stack. If a base driver only has one queue, then these
17changes are transparent to that driver.
18
19
20Section 1: Base driver requirements for implementing multiqueue support
21-----------------------------------------------------------------------
22
23Base drivers are required to use the new alloc_etherdev_mq() or
24alloc_netdev_mq() functions to allocate the subqueues for the device. The
25underlying kernel API will take care of the allocation and deallocation of
26the subqueue memory, as well as netdev configuration of where the queues
27exist in memory.
28
29The base driver will also need to manage the queues as it does the global
30netdev->queue_lock today. Therefore base drivers should use the
31netif_{start|stop|wake}_subqueue() functions to manage each queue while the
32device is still operational. netdev->queue_lock is still used when the device
33comes online or when it's completely shut down (unregister_netdev(), etc.).
34
35Finally, the base driver should indicate that it is a multiqueue device. The
36feature flag NETIF_F_MULTI_QUEUE should be added to the netdev->features
37bitmap on device initialization. Below is an example from e1000:
38
39#ifdef CONFIG_E1000_MQ
40 if ( (adapter->hw.mac.type == e1000_82571) ||
41 (adapter->hw.mac.type == e1000_82572) ||
42 (adapter->hw.mac.type == e1000_80003es2lan))
43 netdev->features |= NETIF_F_MULTI_QUEUE;
44#endif
45
46
47Section 2: Qdisc support for multiqueue devices
48-----------------------------------------------
49
50Currently two qdiscs support multiqueue devices. A new round-robin qdisc,
51sch_rr, and sch_prio. The qdisc is responsible for classifying the skb's to
52bands and queues, and will store the queue mapping into skb->queue_mapping.
53Use this field in the base driver to determine which queue to send the skb
54to.
55
56sch_rr has been added for hardware that doesn't want scheduling policies from
57software, so it's a straight round-robin qdisc. It uses the same syntax and
58classification priomap that sch_prio uses, so it should be intuitive to
59configure for people who've used sch_prio.
60
61The PRIO qdisc naturally plugs into a multiqueue device. If PRIO has been
62built with NET_SCH_PRIO_MQ, then upon load, it will make sure the number of
63bands requested is equal to the number of queues on the hardware. If they
64are equal, it sets a one-to-one mapping up between the queues and bands. If
65they're not equal, it will not load the qdisc. This is the same behavior
66for RR. Once the association is made, any skb that is classified will have
67skb->queue_mapping set, which will allow the driver to properly queue skb's
68to multiple queues.
69
70
71Section 3: Brief howto using PRIO and RR for multiqueue devices
72---------------------------------------------------------------
73
74The userspace command 'tc,' part of the iproute2 package, is used to configure
75qdiscs. To add the PRIO qdisc to your network device, assuming the device is
76called eth0, run the following command:
77
78# tc qdisc add dev eth0 root handle 1: prio bands 4 multiqueue
79
80This will create 4 bands, 0 being highest priority, and associate those bands
81to the queues on your NIC. Assuming eth0 has 4 Tx queues, the band mapping
82would look like:
83
84band 0 => queue 0
85band 1 => queue 1
86band 2 => queue 2
87band 3 => queue 3
88
89Traffic will begin flowing through each queue if your TOS values are assigning
90traffic across the various bands. For example, ssh traffic will always try to
91go out band 0 based on TOS -> Linux priority conversion (realtime traffic),
92so it will be sent out queue 0. ICMP traffic (pings) fall into the "normal"
93traffic classification, which is band 1. Therefore pings will be send out
94queue 1 on the NIC.
95
96Note the use of the multiqueue keyword. This is only in versions of iproute2
97that support multiqueue networking devices; if this is omitted when loading
98a qdisc onto a multiqueue device, the qdisc will load and operate the same
99if it were loaded onto a single-queue device (i.e. - sends all traffic to
100queue 0).
101
102Another alternative to multiqueue band allocation can be done by using the
103multiqueue option and specify 0 bands. If this is the case, the qdisc will
104allocate the number of bands to equal the number of queues that the device
105reports, and bring the qdisc online.
106
107The behavior of tc filters remains the same, where it will override TOS priority
108classification.
109
110
111Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index ce1361f95243..37869295fc70 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -20,6 +20,30 @@ private data which gets freed when the network device is freed. If
20separately allocated data is attached to the network device 20separately allocated data is attached to the network device
21(dev->priv) then it is up to the module exit handler to free that. 21(dev->priv) then it is up to the module exit handler to free that.
22 22
23MTU
24===
25Each network device has a Maximum Transfer Unit. The MTU does not
26include any link layer protocol overhead. Upper layer protocols must
27not pass a socket buffer (skb) to a device to transmit with more data
28than the mtu. The MTU does not include link layer header overhead, so
29for example on Ethernet if the standard MTU is 1500 bytes used, the
30actual skb will contain up to 1514 bytes because of the Ethernet
31header. Devices should allow for the 4 byte VLAN header as well.
32
33Segmentation Offload (GSO, TSO) is an exception to this rule. The
34upper layer protocol may pass a large socket buffer to the device
35transmit routine, and the device will break that up into separate
36packets based on the current MTU.
37
38MTU is symmetrical and applies both to receive and transmit. A device
39must be able to receive at least the maximum size packet allowed by
40the MTU. A network device may use the MTU as mechanism to size receive
41buffers, but the device should allow packets with VLAN header. With
42standard Ethernet mtu of 1500 bytes, the device should allow up to
431518 byte packets (1500 + 14 header + 4 tag). The device may either:
44drop, truncate, or pass up oversize packets, but dropping oversize
45packets is preferred.
46
23 47
24struct net_device synchronization rules 48struct net_device synchronization rules
25======================================= 49=======================================
@@ -43,16 +67,17 @@ dev->get_stats:
43 67
44dev->hard_start_xmit: 68dev->hard_start_xmit:
45 Synchronization: netif_tx_lock spinlock. 69 Synchronization: netif_tx_lock spinlock.
70
46 When the driver sets NETIF_F_LLTX in dev->features this will be 71 When the driver sets NETIF_F_LLTX in dev->features this will be
47 called without holding netif_tx_lock. In this case the driver 72 called without holding netif_tx_lock. In this case the driver
48 has to lock by itself when needed. It is recommended to use a try lock 73 has to lock by itself when needed. It is recommended to use a try lock
49 for this and return -1 when the spin lock fails. 74 for this and return NETDEV_TX_LOCKED when the spin lock fails.
50 The locking there should also properly protect against 75 The locking there should also properly protect against
51 set_multicast_list 76 set_multicast_list.
52 Context: Process with BHs disabled or BH (timer). 77
53 Notes: netif_queue_stopped() is guaranteed false 78 Context: Process with BHs disabled or BH (timer),
54 Interrupts must be enabled when calling hard_start_xmit. 79 will be called with interrupts disabled by netconsole.
55 (Interrupts must also be enabled when enabling the BH handler.) 80
56 Return codes: 81 Return codes:
57 o NETDEV_TX_OK everything ok. 82 o NETDEV_TX_OK everything ok.
58 o NETDEV_TX_BUSY Cannot transmit packet, try later 83 o NETDEV_TX_BUSY Cannot transmit packet, try later
@@ -74,4 +99,5 @@ dev->poll:
74 Synchronization: __LINK_STATE_RX_SCHED bit in dev->state. See 99 Synchronization: __LINK_STATE_RX_SCHED bit in dev->state. See
75 dev_close code and comments in net/core/dev.c for more info. 100 dev_close code and comments in net/core/dev.c for more info.
76 Context: softirq 101 Context: softirq
102 will be called with interrupts disabled by netconsole.
77 103
diff --git a/MAINTAINERS b/MAINTAINERS
index 3db68bf1d213..cba5f4df0e21 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2903,6 +2903,11 @@ P: Michal Ostrowski
2903M: mostrows@speakeasy.net 2903M: mostrows@speakeasy.net
2904S: Maintained 2904S: Maintained
2905 2905
2906PPP OVER L2TP
2907P: James Chapman
2908M: jchapman@katalix.com
2909S: Maintained
2910
2906PREEMPTIBLE KERNEL 2911PREEMPTIBLE KERNEL
2907P: Robert Love 2912P: Robert Love
2908M: rml@tech9.net 2913M: rml@tech9.net
diff --git a/arch/ppc/8260_io/enet.c b/arch/ppc/8260_io/enet.c
index 4c0a7d732f69..615b6583d9b0 100644
--- a/arch/ppc/8260_io/enet.c
+++ b/arch/ppc/8260_io/enet.c
@@ -477,9 +477,9 @@ for (;;) {
477 } 477 }
478 else { 478 else {
479 skb_put(skb,pkt_len-4); /* Make room */ 479 skb_put(skb,pkt_len-4); /* Make room */
480 eth_copy_and_sum(skb, 480 skb_copy_to_linear_data(skb,
481 (unsigned char *)__va(bdp->cbd_bufaddr), 481 (unsigned char *)__va(bdp->cbd_bufaddr),
482 pkt_len-4, 0); 482 pkt_len-4);
483 skb->protocol=eth_type_trans(skb,dev); 483 skb->protocol=eth_type_trans(skb,dev);
484 netif_rx(skb); 484 netif_rx(skb);
485 } 485 }
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index cab395da25da..6f3ed6a72e0b 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -734,9 +734,9 @@ for (;;) {
734 } 734 }
735 else { 735 else {
736 skb_put(skb,pkt_len); /* Make room */ 736 skb_put(skb,pkt_len); /* Make room */
737 eth_copy_and_sum(skb, 737 skb_copy_to_linear_data(skb,
738 (unsigned char *)__va(bdp->cbd_bufaddr), 738 (unsigned char *)__va(bdp->cbd_bufaddr),
739 pkt_len, 0); 739 pkt_len);
740 skb->protocol=eth_type_trans(skb,dev); 740 skb->protocol=eth_type_trans(skb,dev);
741 netif_rx(skb); 741 netif_rx(skb);
742 } 742 }
diff --git a/arch/ppc/8xx_io/enet.c b/arch/ppc/8xx_io/enet.c
index e58288e14369..703d47eee436 100644
--- a/arch/ppc/8xx_io/enet.c
+++ b/arch/ppc/8xx_io/enet.c
@@ -506,9 +506,9 @@ for (;;) {
506 } 506 }
507 else { 507 else {
508 skb_put(skb,pkt_len-4); /* Make room */ 508 skb_put(skb,pkt_len-4); /* Make room */
509 eth_copy_and_sum(skb, 509 skb_copy_to_linear_data(skb,
510 cep->rx_vaddr[bdp - cep->rx_bd_base], 510 cep->rx_vaddr[bdp - cep->rx_bd_base],
511 pkt_len-4, 0); 511 pkt_len-4);
512 skb->protocol=eth_type_trans(skb,dev); 512 skb->protocol=eth_type_trans(skb,dev);
513 netif_rx(skb); 513 netif_rx(skb);
514 } 514 }
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
index d38335d2d710..0288279be9aa 100644
--- a/arch/ppc/8xx_io/fec.c
+++ b/arch/ppc/8xx_io/fec.c
@@ -725,7 +725,7 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
725 fep->stats.rx_dropped++; 725 fep->stats.rx_dropped++;
726 } else { 726 } else {
727 skb_put(skb,pkt_len-4); /* Make room */ 727 skb_put(skb,pkt_len-4); /* Make room */
728 eth_copy_and_sum(skb, data, pkt_len-4, 0); 728 skb_copy_to_linear_data(skb, data, pkt_len-4);
729 skb->protocol=eth_type_trans(skb,dev); 729 skb->protocol=eth_type_trans(skb,dev);
730 netif_rx(skb); 730 netif_rx(skb);
731 } 731 }
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 7e04dd69f609..59b054810ed0 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -199,7 +199,6 @@ static void hci_usb_tx_complete(struct urb *urb);
199#define __pending_q(husb, type) (&husb->pending_q[type-1]) 199#define __pending_q(husb, type) (&husb->pending_q[type-1])
200#define __completed_q(husb, type) (&husb->completed_q[type-1]) 200#define __completed_q(husb, type) (&husb->completed_q[type-1])
201#define __transmit_q(husb, type) (&husb->transmit_q[type-1]) 201#define __transmit_q(husb, type) (&husb->transmit_q[type-1])
202#define __reassembly(husb, type) (husb->reassembly[type-1])
203 202
204static inline struct _urb *__get_completed(struct hci_usb *husb, int type) 203static inline struct _urb *__get_completed(struct hci_usb *husb, int type)
205{ 204{
@@ -429,12 +428,6 @@ static void hci_usb_unlink_urbs(struct hci_usb *husb)
429 kfree(urb->transfer_buffer); 428 kfree(urb->transfer_buffer);
430 _urb_free(_urb); 429 _urb_free(_urb);
431 } 430 }
432
433 /* Release reassembly buffers */
434 if (husb->reassembly[i]) {
435 kfree_skb(husb->reassembly[i]);
436 husb->reassembly[i] = NULL;
437 }
438 } 431 }
439} 432}
440 433
@@ -671,83 +664,6 @@ static int hci_usb_send_frame(struct sk_buff *skb)
671 return 0; 664 return 0;
672} 665}
673 666
674static inline int __recv_frame(struct hci_usb *husb, int type, void *data, int count)
675{
676 BT_DBG("%s type %d data %p count %d", husb->hdev->name, type, data, count);
677
678 husb->hdev->stat.byte_rx += count;
679
680 while (count) {
681 struct sk_buff *skb = __reassembly(husb, type);
682 struct { int expect; } *scb;
683 int len = 0;
684
685 if (!skb) {
686 /* Start of the frame */
687
688 switch (type) {
689 case HCI_EVENT_PKT:
690 if (count >= HCI_EVENT_HDR_SIZE) {
691 struct hci_event_hdr *h = data;
692 len = HCI_EVENT_HDR_SIZE + h->plen;
693 } else
694 return -EILSEQ;
695 break;
696
697 case HCI_ACLDATA_PKT:
698 if (count >= HCI_ACL_HDR_SIZE) {
699 struct hci_acl_hdr *h = data;
700 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
701 } else
702 return -EILSEQ;
703 break;
704#ifdef CONFIG_BT_HCIUSB_SCO
705 case HCI_SCODATA_PKT:
706 if (count >= HCI_SCO_HDR_SIZE) {
707 struct hci_sco_hdr *h = data;
708 len = HCI_SCO_HDR_SIZE + h->dlen;
709 } else
710 return -EILSEQ;
711 break;
712#endif
713 }
714 BT_DBG("new packet len %d", len);
715
716 skb = bt_skb_alloc(len, GFP_ATOMIC);
717 if (!skb) {
718 BT_ERR("%s no memory for the packet", husb->hdev->name);
719 return -ENOMEM;
720 }
721 skb->dev = (void *) husb->hdev;
722 bt_cb(skb)->pkt_type = type;
723
724 __reassembly(husb, type) = skb;
725
726 scb = (void *) skb->cb;
727 scb->expect = len;
728 } else {
729 /* Continuation */
730 scb = (void *) skb->cb;
731 len = scb->expect;
732 }
733
734 len = min(len, count);
735
736 memcpy(skb_put(skb, len), data, len);
737
738 scb->expect -= len;
739 if (!scb->expect) {
740 /* Complete frame */
741 __reassembly(husb, type) = NULL;
742 bt_cb(skb)->pkt_type = type;
743 hci_recv_frame(skb);
744 }
745
746 count -= len; data += len;
747 }
748 return 0;
749}
750
751static void hci_usb_rx_complete(struct urb *urb) 667static void hci_usb_rx_complete(struct urb *urb)
752{ 668{
753 struct _urb *_urb = container_of(urb, struct _urb, urb); 669 struct _urb *_urb = container_of(urb, struct _urb, urb);
@@ -776,7 +692,7 @@ static void hci_usb_rx_complete(struct urb *urb)
776 urb->iso_frame_desc[i].actual_length); 692 urb->iso_frame_desc[i].actual_length);
777 693
778 if (!urb->iso_frame_desc[i].status) 694 if (!urb->iso_frame_desc[i].status)
779 __recv_frame(husb, _urb->type, 695 hci_recv_fragment(husb->hdev, _urb->type,
780 urb->transfer_buffer + urb->iso_frame_desc[i].offset, 696 urb->transfer_buffer + urb->iso_frame_desc[i].offset,
781 urb->iso_frame_desc[i].actual_length); 697 urb->iso_frame_desc[i].actual_length);
782 } 698 }
@@ -784,7 +700,7 @@ static void hci_usb_rx_complete(struct urb *urb)
784 ; 700 ;
785#endif 701#endif
786 } else { 702 } else {
787 err = __recv_frame(husb, _urb->type, urb->transfer_buffer, count); 703 err = hci_recv_fragment(husb->hdev, _urb->type, urb->transfer_buffer, count);
788 if (err < 0) { 704 if (err < 0) {
789 BT_ERR("%s corrupted packet: type %d count %d", 705 BT_ERR("%s corrupted packet: type %d count %d",
790 husb->hdev->name, _urb->type, count); 706 husb->hdev->name, _urb->type, count);
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 963fc55cdc85..56cd3a92ceca 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -102,9 +102,9 @@ struct hci_usb {
102 struct hci_dev *hdev; 102 struct hci_dev *hdev;
103 103
104 unsigned long state; 104 unsigned long state;
105 105
106 struct usb_device *udev; 106 struct usb_device *udev;
107 107
108 struct usb_host_endpoint *bulk_in_ep; 108 struct usb_host_endpoint *bulk_in_ep;
109 struct usb_host_endpoint *bulk_out_ep; 109 struct usb_host_endpoint *bulk_out_ep;
110 struct usb_host_endpoint *intr_in_ep; 110 struct usb_host_endpoint *intr_in_ep;
@@ -116,7 +116,6 @@ struct hci_usb {
116 __u8 ctrl_req; 116 __u8 ctrl_req;
117 117
118 struct sk_buff_head transmit_q[4]; 118 struct sk_buff_head transmit_q[4];
119 struct sk_buff *reassembly[4]; /* Reassembly buffers */
120 119
121 rwlock_t completion_lock; 120 rwlock_t completion_lock;
122 121
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index b71a5ccc587f..0638730a4a19 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -180,11 +180,6 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
180 return total; 180 return total;
181} 181}
182 182
183static loff_t vhci_llseek(struct file *file, loff_t offset, int origin)
184{
185 return -ESPIPE;
186}
187
188static ssize_t vhci_read(struct file *file, 183static ssize_t vhci_read(struct file *file,
189 char __user *buf, size_t count, loff_t *pos) 184 char __user *buf, size_t count, loff_t *pos)
190{ 185{
@@ -334,7 +329,6 @@ static int vhci_fasync(int fd, struct file *file, int on)
334 329
335static const struct file_operations vhci_fops = { 330static const struct file_operations vhci_fops = {
336 .owner = THIS_MODULE, 331 .owner = THIS_MODULE,
337 .llseek = vhci_llseek,
338 .read = vhci_read, 332 .read = vhci_read,
339 .write = vhci_write, 333 .write = vhci_write,
340 .poll = vhci_poll, 334 .poll = vhci_poll,
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index da1a22c13865..ab18343e58ef 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -990,7 +990,7 @@ static void elmc_rcv_int(struct net_device *dev)
990 if (skb != NULL) { 990 if (skb != NULL) {
991 skb_reserve(skb, 2); /* 16 byte alignment */ 991 skb_reserve(skb, 2); /* 16 byte alignment */
992 skb_put(skb,totlen); 992 skb_put(skb,totlen);
993 eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); 993 skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
994 skb->protocol = eth_type_trans(skb, dev); 994 skb->protocol = eth_type_trans(skb, dev);
995 netif_rx(skb); 995 netif_rx(skb);
996 dev->last_rx = jiffies; 996 dev->last_rx = jiffies;
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 0877fc372f4b..e89ace109a5d 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -333,9 +333,9 @@ static int lance_rx (struct net_device *dev)
333 333
334 skb_reserve (skb, 2); /* 16 byte align */ 334 skb_reserve (skb, 2); /* 16 byte align */
335 skb_put (skb, len); /* make room */ 335 skb_put (skb, len); /* make room */
336 eth_copy_and_sum(skb, 336 skb_copy_to_linear_data(skb,
337 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), 337 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
338 len, 0); 338 len);
339 skb->protocol = eth_type_trans (skb, dev); 339 skb->protocol = eth_type_trans (skb, dev);
340 netif_rx (skb); 340 netif_rx (skb);
341 dev->last_rx = jiffies; 341 dev->last_rx = jiffies;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a844b1fe2dc4..21a6ccbf92e0 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2017,7 +2017,7 @@ no_early_rx:
2017#if RX_BUF_IDX == 3 2017#if RX_BUF_IDX == 3
2018 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2018 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
2019#else 2019#else
2020 eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); 2020 skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
2021#endif 2021#endif
2022 skb_put (skb, pkt_size); 2022 skb_put (skb, pkt_size);
2023 2023
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 80572e2c9dab..ba314adf68b8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -25,6 +25,14 @@ menuconfig NETDEVICES
25# that for each of the symbols. 25# that for each of the symbols.
26if NETDEVICES 26if NETDEVICES
27 27
28config NETDEVICES_MULTIQUEUE
29 bool "Netdevice multiple hardware queue support"
30 ---help---
31 Say Y here if you want to allow the network stack to use multiple
32 hardware TX queues on an ethernet device.
33
34 Most people will say N here.
35
28config IFB 36config IFB
29 tristate "Intermediate Functional Block support" 37 tristate "Intermediate Functional Block support"
30 depends on NET_CLS_ACT 38 depends on NET_CLS_ACT
@@ -2784,6 +2792,19 @@ config PPPOATM
2784 which can lead to bad results if the ATM peer loses state and 2792 which can lead to bad results if the ATM peer loses state and
2785 changes its encapsulation unilaterally. 2793 changes its encapsulation unilaterally.
2786 2794
2795config PPPOL2TP
2796 tristate "PPP over L2TP (EXPERIMENTAL)"
2797 depends on EXPERIMENTAL && PPP
2798 help
2799 Support for PPP-over-L2TP socket family. L2TP is a protocol
2800 used by ISPs and enterprises to tunnel PPP traffic over UDP
2801 tunnels. L2TP is replacing PPTP for VPN uses.
2802
2803 This kernel component handles only L2TP data packets: a
2804 userland daemon handles L2TP the control protocol (tunnel
2805 and session setup). One such daemon is OpenL2TP
2806 (http://openl2tp.sourceforge.net/).
2807
2787config SLIP 2808config SLIP
2788 tristate "SLIP (serial line) support" 2809 tristate "SLIP (serial line) support"
2789 ---help--- 2810 ---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1bbcbedad04a..a2241e6e1457 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -121,6 +121,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
121obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o 121obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
122obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o 122obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
123obj-$(CONFIG_PPPOE) += pppox.o pppoe.o 123obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
124obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
124 125
125obj-$(CONFIG_SLIP) += slip.o 126obj-$(CONFIG_SLIP) += slip.o
126obj-$(CONFIG_SLHC) += slhc.o 127obj-$(CONFIG_SLHC) += slhc.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 81d5a374042a..a45de6975bfe 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -322,9 +322,9 @@ static int lance_rx (struct net_device *dev)
322 322
323 skb_reserve (skb, 2); /* 16 byte align */ 323 skb_reserve (skb, 2); /* 16 byte align */
324 skb_put (skb, len); /* make room */ 324 skb_put (skb, len); /* make room */
325 eth_copy_and_sum(skb, 325 skb_copy_to_linear_data(skb,
326 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), 326 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
327 len, 0); 327 len);
328 skb->protocol = eth_type_trans (skb, dev); 328 skb->protocol = eth_type_trans (skb, dev);
329 netif_rx (skb); 329 netif_rx (skb);
330 dev->last_rx = jiffies; 330 dev->last_rx = jiffies;
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index a241ae7855a3..bc5a38a6705f 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -746,7 +746,7 @@ static int ariadne_rx(struct net_device *dev)
746 746
747 skb_reserve(skb,2); /* 16 byte align */ 747 skb_reserve(skb,2); /* 16 byte align */
748 skb_put(skb,pkt_len); /* Make room */ 748 skb_put(skb,pkt_len); /* Make room */
749 eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); 749 skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len);
750 skb->protocol=eth_type_trans(skb,dev); 750 skb->protocol=eth_type_trans(skb,dev);
751#if 0 751#if 0
752 printk(KERN_DEBUG "RX pkt type 0x%04x from ", 752 printk(KERN_DEBUG "RX pkt type 0x%04x from ",
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 2438c5bff237..f6ece1d43f6e 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -258,7 +258,7 @@ static int ep93xx_rx(struct net_device *dev, int *budget)
258 skb_reserve(skb, 2); 258 skb_reserve(skb, 2);
259 dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, 259 dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
260 length, DMA_FROM_DEVICE); 260 length, DMA_FROM_DEVICE);
261 eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0); 261 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
262 skb_put(skb, length); 262 skb_put(skb, length);
263 skb->protocol = eth_type_trans(skb, dev); 263 skb->protocol = eth_type_trans(skb, dev);
264 264
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index c27cfcef45fa..e86b3691765b 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1205,8 +1205,8 @@ static int au1000_rx(struct net_device *dev)
1205 continue; 1205 continue;
1206 } 1206 }
1207 skb_reserve(skb, 2); /* 16 byte IP header align */ 1207 skb_reserve(skb, 2); /* 16 byte IP header align */
1208 eth_copy_and_sum(skb, 1208 skb_copy_to_linear_data(skb,
1209 (unsigned char *)pDB->vaddr, frmlen, 0); 1209 (unsigned char *)pDB->vaddr, frmlen);
1210 skb_put(skb, frmlen); 1210 skb_put(skb, frmlen);
1211 skb->protocol = eth_type_trans(skb, dev); 1211 skb->protocol = eth_type_trans(skb, dev);
1212 netif_rx(skb); /* pass the packet to upper layers */ 1212 netif_rx(skb); /* pass the packet to upper layers */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ce3ed67a878e..d681903c592d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -40,7 +40,6 @@
40#define BCM_VLAN 1 40#define BCM_VLAN 1
41#endif 41#endif
42#include <net/ip.h> 42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h> 43#include <net/checksum.h>
45#include <linux/workqueue.h> 44#include <linux/workqueue.h>
46#include <linux/crc32.h> 45#include <linux/crc32.h>
@@ -54,8 +53,8 @@
54 53
55#define DRV_MODULE_NAME "bnx2" 54#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": " 55#define PFX DRV_MODULE_NAME ": "
57#define DRV_MODULE_VERSION "1.5.11" 56#define DRV_MODULE_VERSION "1.6.2"
58#define DRV_MODULE_RELDATE "June 4, 2007" 57#define DRV_MODULE_RELDATE "July 6, 2007"
59 58
60#define RUN_AT(x) (jiffies + (x)) 59#define RUN_AT(x) (jiffies + (x))
61 60
@@ -550,6 +549,9 @@ bnx2_report_fw_link(struct bnx2 *bp)
550{ 549{
551 u32 fw_link_status = 0; 550 u32 fw_link_status = 0;
552 551
552 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
553 return;
554
553 if (bp->link_up) { 555 if (bp->link_up) {
554 u32 bmsr; 556 u32 bmsr;
555 557
@@ -601,12 +603,21 @@ bnx2_report_fw_link(struct bnx2 *bp)
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status); 603 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602} 604}
603 605
606static char *
607bnx2_xceiver_str(struct bnx2 *bp)
608{
609 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
610 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
611 "Copper"));
612}
613
604static void 614static void
605bnx2_report_link(struct bnx2 *bp) 615bnx2_report_link(struct bnx2 *bp)
606{ 616{
607 if (bp->link_up) { 617 if (bp->link_up) {
608 netif_carrier_on(bp->dev); 618 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); 619 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
620 bnx2_xceiver_str(bp));
610 621
611 printk("%d Mbps ", bp->line_speed); 622 printk("%d Mbps ", bp->line_speed);
612 623
@@ -630,7 +641,8 @@ bnx2_report_link(struct bnx2 *bp)
630 } 641 }
631 else { 642 else {
632 netif_carrier_off(bp->dev); 643 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); 644 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
645 bnx2_xceiver_str(bp));
634 } 646 }
635 647
636 bnx2_report_fw_link(bp); 648 bnx2_report_fw_link(bp);
@@ -1100,6 +1112,9 @@ bnx2_set_link(struct bnx2 *bp)
1100 return 0; 1112 return 0;
1101 } 1113 }
1102 1114
1115 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1116 return 0;
1117
1103 link_up = bp->link_up; 1118 link_up = bp->link_up;
1104 1119
1105 bnx2_enable_bmsr1(bp); 1120 bnx2_enable_bmsr1(bp);
@@ -1210,12 +1225,74 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp)
1210 return adv; 1225 return adv;
1211} 1226}
1212 1227
1228static int bnx2_fw_sync(struct bnx2 *, u32, int);
1229
1213static int 1230static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp) 1231bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1232{
1233 u32 speed_arg = 0, pause_adv;
1234
1235 pause_adv = bnx2_phy_get_pause_adv(bp);
1236
1237 if (bp->autoneg & AUTONEG_SPEED) {
1238 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1239 if (bp->advertising & ADVERTISED_10baseT_Half)
1240 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1241 if (bp->advertising & ADVERTISED_10baseT_Full)
1242 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1243 if (bp->advertising & ADVERTISED_100baseT_Half)
1244 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1245 if (bp->advertising & ADVERTISED_100baseT_Full)
1246 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1247 if (bp->advertising & ADVERTISED_1000baseT_Full)
1248 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1249 if (bp->advertising & ADVERTISED_2500baseX_Full)
1250 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1251 } else {
1252 if (bp->req_line_speed == SPEED_2500)
1253 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1254 else if (bp->req_line_speed == SPEED_1000)
1255 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1256 else if (bp->req_line_speed == SPEED_100) {
1257 if (bp->req_duplex == DUPLEX_FULL)
1258 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1259 else
1260 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1261 } else if (bp->req_line_speed == SPEED_10) {
1262 if (bp->req_duplex == DUPLEX_FULL)
1263 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1264 else
1265 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1266 }
1267 }
1268
1269 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1270 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1271 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1272 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1273
1274 if (port == PORT_TP)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1276 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1277
1278 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1279
1280 spin_unlock_bh(&bp->phy_lock);
1281 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1282 spin_lock_bh(&bp->phy_lock);
1283
1284 return 0;
1285}
1286
1287static int
1288bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1215{ 1289{
1216 u32 adv, bmcr; 1290 u32 adv, bmcr;
1217 u32 new_adv = 0; 1291 u32 new_adv = 0;
1218 1292
1293 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1294 return (bnx2_setup_remote_phy(bp, port));
1295
1219 if (!(bp->autoneg & AUTONEG_SPEED)) { 1296 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr; 1297 u32 new_bmcr;
1221 int force_link_down = 0; 1298 int force_link_down = 0;
@@ -1323,7 +1400,9 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
1323} 1400}
1324 1401
1325#define ETHTOOL_ALL_FIBRE_SPEED \ 1402#define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full) 1403 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1404 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1405 (ADVERTISED_1000baseT_Full)
1327 1406
1328#define ETHTOOL_ALL_COPPER_SPEED \ 1407#define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ 1408 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
@@ -1335,6 +1414,188 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
1335 1414
1336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL) 1415#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337 1416
1417static void
1418bnx2_set_default_remote_link(struct bnx2 *bp)
1419{
1420 u32 link;
1421
1422 if (bp->phy_port == PORT_TP)
1423 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1424 else
1425 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1426
1427 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1428 bp->req_line_speed = 0;
1429 bp->autoneg |= AUTONEG_SPEED;
1430 bp->advertising = ADVERTISED_Autoneg;
1431 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1432 bp->advertising |= ADVERTISED_10baseT_Half;
1433 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1434 bp->advertising |= ADVERTISED_10baseT_Full;
1435 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1436 bp->advertising |= ADVERTISED_100baseT_Half;
1437 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1438 bp->advertising |= ADVERTISED_100baseT_Full;
1439 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1440 bp->advertising |= ADVERTISED_1000baseT_Full;
1441 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1442 bp->advertising |= ADVERTISED_2500baseX_Full;
1443 } else {
1444 bp->autoneg = 0;
1445 bp->advertising = 0;
1446 bp->req_duplex = DUPLEX_FULL;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1448 bp->req_line_speed = SPEED_10;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1450 bp->req_duplex = DUPLEX_HALF;
1451 }
1452 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1453 bp->req_line_speed = SPEED_100;
1454 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1455 bp->req_duplex = DUPLEX_HALF;
1456 }
1457 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1458 bp->req_line_speed = SPEED_1000;
1459 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1460 bp->req_line_speed = SPEED_2500;
1461 }
1462}
1463
1464static void
1465bnx2_set_default_link(struct bnx2 *bp)
1466{
1467 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1468 return bnx2_set_default_remote_link(bp);
1469
1470 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1471 bp->req_line_speed = 0;
1472 if (bp->phy_flags & PHY_SERDES_FLAG) {
1473 u32 reg;
1474
1475 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1476
1477 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1478 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1479 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1480 bp->autoneg = 0;
1481 bp->req_line_speed = bp->line_speed = SPEED_1000;
1482 bp->req_duplex = DUPLEX_FULL;
1483 }
1484 } else
1485 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1486}
1487
1488static void
1489bnx2_send_heart_beat(struct bnx2 *bp)
1490{
1491 u32 msg;
1492 u32 addr;
1493
1494 spin_lock(&bp->indirect_lock);
1495 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1496 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1497 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1498 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1499 spin_unlock(&bp->indirect_lock);
1500}
1501
1502static void
1503bnx2_remote_phy_event(struct bnx2 *bp)
1504{
1505 u32 msg;
1506 u8 link_up = bp->link_up;
1507 u8 old_port;
1508
1509 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1510
1511 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1512 bnx2_send_heart_beat(bp);
1513
1514 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1515
1516 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1517 bp->link_up = 0;
1518 else {
1519 u32 speed;
1520
1521 bp->link_up = 1;
1522 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1523 bp->duplex = DUPLEX_FULL;
1524 switch (speed) {
1525 case BNX2_LINK_STATUS_10HALF:
1526 bp->duplex = DUPLEX_HALF;
1527 case BNX2_LINK_STATUS_10FULL:
1528 bp->line_speed = SPEED_10;
1529 break;
1530 case BNX2_LINK_STATUS_100HALF:
1531 bp->duplex = DUPLEX_HALF;
1532 case BNX2_LINK_STATUS_100BASE_T4:
1533 case BNX2_LINK_STATUS_100FULL:
1534 bp->line_speed = SPEED_100;
1535 break;
1536 case BNX2_LINK_STATUS_1000HALF:
1537 bp->duplex = DUPLEX_HALF;
1538 case BNX2_LINK_STATUS_1000FULL:
1539 bp->line_speed = SPEED_1000;
1540 break;
1541 case BNX2_LINK_STATUS_2500HALF:
1542 bp->duplex = DUPLEX_HALF;
1543 case BNX2_LINK_STATUS_2500FULL:
1544 bp->line_speed = SPEED_2500;
1545 break;
1546 default:
1547 bp->line_speed = 0;
1548 break;
1549 }
1550
1551 spin_lock(&bp->phy_lock);
1552 bp->flow_ctrl = 0;
1553 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1554 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1555 if (bp->duplex == DUPLEX_FULL)
1556 bp->flow_ctrl = bp->req_flow_ctrl;
1557 } else {
1558 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1559 bp->flow_ctrl |= FLOW_CTRL_TX;
1560 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1561 bp->flow_ctrl |= FLOW_CTRL_RX;
1562 }
1563
1564 old_port = bp->phy_port;
1565 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1566 bp->phy_port = PORT_FIBRE;
1567 else
1568 bp->phy_port = PORT_TP;
1569
1570 if (old_port != bp->phy_port)
1571 bnx2_set_default_link(bp);
1572
1573 spin_unlock(&bp->phy_lock);
1574 }
1575 if (bp->link_up != link_up)
1576 bnx2_report_link(bp);
1577
1578 bnx2_set_mac_link(bp);
1579}
1580
1581static int
1582bnx2_set_remote_link(struct bnx2 *bp)
1583{
1584 u32 evt_code;
1585
1586 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1587 switch (evt_code) {
1588 case BNX2_FW_EVT_CODE_LINK_EVENT:
1589 bnx2_remote_phy_event(bp);
1590 break;
1591 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1592 default:
1593 bnx2_send_heart_beat(bp);
1594 break;
1595 }
1596 return 0;
1597}
1598
1338static int 1599static int
1339bnx2_setup_copper_phy(struct bnx2 *bp) 1600bnx2_setup_copper_phy(struct bnx2 *bp)
1340{ 1601{
@@ -1433,13 +1694,13 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
1433} 1694}
1434 1695
1435static int 1696static int
1436bnx2_setup_phy(struct bnx2 *bp) 1697bnx2_setup_phy(struct bnx2 *bp, u8 port)
1437{ 1698{
1438 if (bp->loopback == MAC_LOOPBACK) 1699 if (bp->loopback == MAC_LOOPBACK)
1439 return 0; 1700 return 0;
1440 1701
1441 if (bp->phy_flags & PHY_SERDES_FLAG) { 1702 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp)); 1703 return (bnx2_setup_serdes_phy(bp, port));
1443 } 1704 }
1444 else { 1705 else {
1445 return (bnx2_setup_copper_phy(bp)); 1706 return (bnx2_setup_copper_phy(bp));
@@ -1659,6 +1920,9 @@ bnx2_init_phy(struct bnx2 *bp)
1659 1920
1660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); 1921 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661 1922
1923 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1924 goto setup_phy;
1925
1662 bnx2_read_phy(bp, MII_PHYSID1, &val); 1926 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16; 1927 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val); 1928 bnx2_read_phy(bp, MII_PHYSID2, &val);
@@ -1676,7 +1940,9 @@ bnx2_init_phy(struct bnx2 *bp)
1676 rc = bnx2_init_copper_phy(bp); 1940 rc = bnx2_init_copper_phy(bp);
1677 } 1941 }
1678 1942
1679 bnx2_setup_phy(bp); 1943setup_phy:
1944 if (!rc)
1945 rc = bnx2_setup_phy(bp, bp->phy_port);
1680 1946
1681 return rc; 1947 return rc;
1682} 1948}
@@ -1984,6 +2250,9 @@ bnx2_phy_int(struct bnx2 *bp)
1984 bnx2_set_link(bp); 2250 bnx2_set_link(bp);
1985 spin_unlock(&bp->phy_lock); 2251 spin_unlock(&bp->phy_lock);
1986 } 2252 }
2253 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2254 bnx2_set_remote_link(bp);
2255
1987} 2256}
1988 2257
1989static void 2258static void
@@ -2297,6 +2566,7 @@ bnx2_interrupt(int irq, void *dev_instance)
2297{ 2566{
2298 struct net_device *dev = dev_instance; 2567 struct net_device *dev = dev_instance;
2299 struct bnx2 *bp = netdev_priv(dev); 2568 struct bnx2 *bp = netdev_priv(dev);
2569 struct status_block *sblk = bp->status_blk;
2300 2570
2301 /* When using INTx, it is possible for the interrupt to arrive 2571 /* When using INTx, it is possible for the interrupt to arrive
2302 * at the CPU before the status block posted prior to the 2572 * at the CPU before the status block posted prior to the
@@ -2304,7 +2574,7 @@ bnx2_interrupt(int irq, void *dev_instance)
2304 * When using MSI, the MSI message will always complete after 2574 * When using MSI, the MSI message will always complete after
2305 * the status block write. 2575 * the status block write.
2306 */ 2576 */
2307 if ((bp->status_blk->status_idx == bp->last_status_idx) && 2577 if ((sblk->status_idx == bp->last_status_idx) &&
2308 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & 2578 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2309 BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) 2579 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2310 return IRQ_NONE; 2580 return IRQ_NONE;
@@ -2313,16 +2583,25 @@ bnx2_interrupt(int irq, void *dev_instance)
2313 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 2583 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2314 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 2584 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2315 2585
2586 /* Read back to deassert IRQ immediately to avoid too many
2587 * spurious interrupts.
2588 */
2589 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2590
2316 /* Return here if interrupt is shared and is disabled. */ 2591 /* Return here if interrupt is shared and is disabled. */
2317 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2592 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2318 return IRQ_HANDLED; 2593 return IRQ_HANDLED;
2319 2594
2320 netif_rx_schedule(dev); 2595 if (netif_rx_schedule_prep(dev)) {
2596 bp->last_status_idx = sblk->status_idx;
2597 __netif_rx_schedule(dev);
2598 }
2321 2599
2322 return IRQ_HANDLED; 2600 return IRQ_HANDLED;
2323} 2601}
2324 2602
2325#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE 2603#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2604 STATUS_ATTN_BITS_TIMER_ABORT)
2326 2605
2327static inline int 2606static inline int
2328bnx2_has_work(struct bnx2 *bp) 2607bnx2_has_work(struct bnx2 *bp)
@@ -3562,6 +3841,36 @@ nvram_write_end:
3562 return rc; 3841 return rc;
3563} 3842}
3564 3843
3844static void
3845bnx2_init_remote_phy(struct bnx2 *bp)
3846{
3847 u32 val;
3848
3849 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3850 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3851 return;
3852
3853 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3854 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3855 return;
3856
3857 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3858 if (netif_running(bp->dev)) {
3859 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3860 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3861 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3862 val);
3863 }
3864 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3865
3866 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3867 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3868 bp->phy_port = PORT_FIBRE;
3869 else
3870 bp->phy_port = PORT_TP;
3871 }
3872}
3873
3565static int 3874static int
3566bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) 3875bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3567{ 3876{
@@ -3642,6 +3951,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3642 if (rc) 3951 if (rc)
3643 return rc; 3952 return rc;
3644 3953
3954 spin_lock_bh(&bp->phy_lock);
3955 bnx2_init_remote_phy(bp);
3956 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3957 bnx2_set_default_remote_link(bp);
3958 spin_unlock_bh(&bp->phy_lock);
3959
3645 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 3960 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3646 /* Adjust the voltage regular to two steps lower. The default 3961 /* Adjust the voltage regular to two steps lower. The default
3647 * of this register is 0x0000000e. */ 3962 * of this register is 0x0000000e. */
@@ -3826,7 +4141,7 @@ bnx2_init_chip(struct bnx2 *bp)
3826 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, 4141 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3827 0); 4142 0);
3828 4143
3829 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff); 4144 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
3830 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); 4145 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3831 4146
3832 udelay(20); 4147 udelay(20);
@@ -4069,8 +4384,8 @@ bnx2_init_nic(struct bnx2 *bp)
4069 4384
4070 spin_lock_bh(&bp->phy_lock); 4385 spin_lock_bh(&bp->phy_lock);
4071 bnx2_init_phy(bp); 4386 bnx2_init_phy(bp);
4072 spin_unlock_bh(&bp->phy_lock);
4073 bnx2_set_link(bp); 4387 bnx2_set_link(bp);
4388 spin_unlock_bh(&bp->phy_lock);
4074 return 0; 4389 return 0;
4075} 4390}
4076 4391
@@ -4600,6 +4915,9 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
4600static void 4915static void
4601bnx2_5708_serdes_timer(struct bnx2 *bp) 4916bnx2_5708_serdes_timer(struct bnx2 *bp)
4602{ 4917{
4918 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4919 return;
4920
4603 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) { 4921 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4604 bp->serdes_an_pending = 0; 4922 bp->serdes_an_pending = 0;
4605 return; 4923 return;
@@ -4631,7 +4949,6 @@ static void
4631bnx2_timer(unsigned long data) 4949bnx2_timer(unsigned long data)
4632{ 4950{
4633 struct bnx2 *bp = (struct bnx2 *) data; 4951 struct bnx2 *bp = (struct bnx2 *) data;
4634 u32 msg;
4635 4952
4636 if (!netif_running(bp->dev)) 4953 if (!netif_running(bp->dev))
4637 return; 4954 return;
@@ -4639,8 +4956,7 @@ bnx2_timer(unsigned long data)
4639 if (atomic_read(&bp->intr_sem) != 0) 4956 if (atomic_read(&bp->intr_sem) != 0)
4640 goto bnx2_restart_timer; 4957 goto bnx2_restart_timer;
4641 4958
4642 msg = (u32) ++bp->fw_drv_pulse_wr_seq; 4959 bnx2_send_heart_beat(bp);
4643 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4644 4960
4645 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT); 4961 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4646 4962
@@ -5083,17 +5399,25 @@ static int
5083bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 5399bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5084{ 5400{
5085 struct bnx2 *bp = netdev_priv(dev); 5401 struct bnx2 *bp = netdev_priv(dev);
5402 int support_serdes = 0, support_copper = 0;
5086 5403
5087 cmd->supported = SUPPORTED_Autoneg; 5404 cmd->supported = SUPPORTED_Autoneg;
5088 if (bp->phy_flags & PHY_SERDES_FLAG) { 5405 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5406 support_serdes = 1;
5407 support_copper = 1;
5408 } else if (bp->phy_port == PORT_FIBRE)
5409 support_serdes = 1;
5410 else
5411 support_copper = 1;
5412
5413 if (support_serdes) {
5089 cmd->supported |= SUPPORTED_1000baseT_Full | 5414 cmd->supported |= SUPPORTED_1000baseT_Full |
5090 SUPPORTED_FIBRE; 5415 SUPPORTED_FIBRE;
5091 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) 5416 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5092 cmd->supported |= SUPPORTED_2500baseX_Full; 5417 cmd->supported |= SUPPORTED_2500baseX_Full;
5093 5418
5094 cmd->port = PORT_FIBRE;
5095 } 5419 }
5096 else { 5420 if (support_copper) {
5097 cmd->supported |= SUPPORTED_10baseT_Half | 5421 cmd->supported |= SUPPORTED_10baseT_Half |
5098 SUPPORTED_10baseT_Full | 5422 SUPPORTED_10baseT_Full |
5099 SUPPORTED_100baseT_Half | 5423 SUPPORTED_100baseT_Half |
@@ -5101,9 +5425,10 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5101 SUPPORTED_1000baseT_Full | 5425 SUPPORTED_1000baseT_Full |
5102 SUPPORTED_TP; 5426 SUPPORTED_TP;
5103 5427
5104 cmd->port = PORT_TP;
5105 } 5428 }
5106 5429
5430 spin_lock_bh(&bp->phy_lock);
5431 cmd->port = bp->phy_port;
5107 cmd->advertising = bp->advertising; 5432 cmd->advertising = bp->advertising;
5108 5433
5109 if (bp->autoneg & AUTONEG_SPEED) { 5434 if (bp->autoneg & AUTONEG_SPEED) {
@@ -5121,6 +5446,7 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5121 cmd->speed = -1; 5446 cmd->speed = -1;
5122 cmd->duplex = -1; 5447 cmd->duplex = -1;
5123 } 5448 }
5449 spin_unlock_bh(&bp->phy_lock);
5124 5450
5125 cmd->transceiver = XCVR_INTERNAL; 5451 cmd->transceiver = XCVR_INTERNAL;
5126 cmd->phy_address = bp->phy_addr; 5452 cmd->phy_address = bp->phy_addr;
@@ -5136,6 +5462,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5136 u8 req_duplex = bp->req_duplex; 5462 u8 req_duplex = bp->req_duplex;
5137 u16 req_line_speed = bp->req_line_speed; 5463 u16 req_line_speed = bp->req_line_speed;
5138 u32 advertising = bp->advertising; 5464 u32 advertising = bp->advertising;
5465 int err = -EINVAL;
5466
5467 spin_lock_bh(&bp->phy_lock);
5468
5469 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5470 goto err_out_unlock;
5471
5472 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5473 goto err_out_unlock;
5139 5474
5140 if (cmd->autoneg == AUTONEG_ENABLE) { 5475 if (cmd->autoneg == AUTONEG_ENABLE) {
5141 autoneg |= AUTONEG_SPEED; 5476 autoneg |= AUTONEG_SPEED;
@@ -5148,44 +5483,41 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5148 (cmd->advertising == ADVERTISED_100baseT_Half) || 5483 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5149 (cmd->advertising == ADVERTISED_100baseT_Full)) { 5484 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5150 5485
5151 if (bp->phy_flags & PHY_SERDES_FLAG) 5486 if (cmd->port == PORT_FIBRE)
5152 return -EINVAL; 5487 goto err_out_unlock;
5153 5488
5154 advertising = cmd->advertising; 5489 advertising = cmd->advertising;
5155 5490
5156 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) { 5491 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5157 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) 5492 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5158 return -EINVAL; 5493 (cmd->port == PORT_TP))
5159 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) { 5494 goto err_out_unlock;
5495 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5160 advertising = cmd->advertising; 5496 advertising = cmd->advertising;
5161 } 5497 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5162 else if (cmd->advertising == ADVERTISED_1000baseT_Half) { 5498 goto err_out_unlock;
5163 return -EINVAL;
5164 }
5165 else { 5499 else {
5166 if (bp->phy_flags & PHY_SERDES_FLAG) { 5500 if (cmd->port == PORT_FIBRE)
5167 advertising = ETHTOOL_ALL_FIBRE_SPEED; 5501 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5168 } 5502 else
5169 else {
5170 advertising = ETHTOOL_ALL_COPPER_SPEED; 5503 advertising = ETHTOOL_ALL_COPPER_SPEED;
5171 }
5172 } 5504 }
5173 advertising |= ADVERTISED_Autoneg; 5505 advertising |= ADVERTISED_Autoneg;
5174 } 5506 }
5175 else { 5507 else {
5176 if (bp->phy_flags & PHY_SERDES_FLAG) { 5508 if (cmd->port == PORT_FIBRE) {
5177 if ((cmd->speed != SPEED_1000 && 5509 if ((cmd->speed != SPEED_1000 &&
5178 cmd->speed != SPEED_2500) || 5510 cmd->speed != SPEED_2500) ||
5179 (cmd->duplex != DUPLEX_FULL)) 5511 (cmd->duplex != DUPLEX_FULL))
5180 return -EINVAL; 5512 goto err_out_unlock;
5181 5513
5182 if (cmd->speed == SPEED_2500 && 5514 if (cmd->speed == SPEED_2500 &&
5183 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) 5515 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5184 return -EINVAL; 5516 goto err_out_unlock;
5185 }
5186 else if (cmd->speed == SPEED_1000) {
5187 return -EINVAL;
5188 } 5517 }
5518 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5519 goto err_out_unlock;
5520
5189 autoneg &= ~AUTONEG_SPEED; 5521 autoneg &= ~AUTONEG_SPEED;
5190 req_line_speed = cmd->speed; 5522 req_line_speed = cmd->speed;
5191 req_duplex = cmd->duplex; 5523 req_duplex = cmd->duplex;
@@ -5197,13 +5529,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5197 bp->req_line_speed = req_line_speed; 5529 bp->req_line_speed = req_line_speed;
5198 bp->req_duplex = req_duplex; 5530 bp->req_duplex = req_duplex;
5199 5531
5200 spin_lock_bh(&bp->phy_lock); 5532 err = bnx2_setup_phy(bp, cmd->port);
5201
5202 bnx2_setup_phy(bp);
5203 5533
5534err_out_unlock:
5204 spin_unlock_bh(&bp->phy_lock); 5535 spin_unlock_bh(&bp->phy_lock);
5205 5536
5206 return 0; 5537 return err;
5207} 5538}
5208 5539
5209static void 5540static void
@@ -5214,11 +5545,7 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5214 strcpy(info->driver, DRV_MODULE_NAME); 5545 strcpy(info->driver, DRV_MODULE_NAME);
5215 strcpy(info->version, DRV_MODULE_VERSION); 5546 strcpy(info->version, DRV_MODULE_VERSION);
5216 strcpy(info->bus_info, pci_name(bp->pdev)); 5547 strcpy(info->bus_info, pci_name(bp->pdev));
5217 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0'; 5548 strcpy(info->fw_version, bp->fw_version);
5218 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5219 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5220 info->fw_version[1] = info->fw_version[3] = '.';
5221 info->fw_version[5] = 0;
5222} 5549}
5223 5550
5224#define BNX2_REGDUMP_LEN (32 * 1024) 5551#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -5330,6 +5657,14 @@ bnx2_nway_reset(struct net_device *dev)
5330 5657
5331 spin_lock_bh(&bp->phy_lock); 5658 spin_lock_bh(&bp->phy_lock);
5332 5659
5660 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5661 int rc;
5662
5663 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5664 spin_unlock_bh(&bp->phy_lock);
5665 return rc;
5666 }
5667
5333 /* Force a link down visible on the other side */ 5668 /* Force a link down visible on the other side */
5334 if (bp->phy_flags & PHY_SERDES_FLAG) { 5669 if (bp->phy_flags & PHY_SERDES_FLAG) {
5335 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); 5670 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
@@ -5543,7 +5878,7 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5543 5878
5544 spin_lock_bh(&bp->phy_lock); 5879 spin_lock_bh(&bp->phy_lock);
5545 5880
5546 bnx2_setup_phy(bp); 5881 bnx2_setup_phy(bp, bp->phy_port);
5547 5882
5548 spin_unlock_bh(&bp->phy_lock); 5883 spin_unlock_bh(&bp->phy_lock);
5549 5884
@@ -5939,6 +6274,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5939 case SIOCGMIIREG: { 6274 case SIOCGMIIREG: {
5940 u32 mii_regval; 6275 u32 mii_regval;
5941 6276
6277 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6278 return -EOPNOTSUPP;
6279
5942 if (!netif_running(dev)) 6280 if (!netif_running(dev))
5943 return -EAGAIN; 6281 return -EAGAIN;
5944 6282
@@ -5955,6 +6293,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5955 if (!capable(CAP_NET_ADMIN)) 6293 if (!capable(CAP_NET_ADMIN))
5956 return -EPERM; 6294 return -EPERM;
5957 6295
6296 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6297 return -EOPNOTSUPP;
6298
5958 if (!netif_running(dev)) 6299 if (!netif_running(dev))
5959 return -EAGAIN; 6300 return -EAGAIN;
5960 6301
@@ -6116,7 +6457,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6116{ 6457{
6117 struct bnx2 *bp; 6458 struct bnx2 *bp;
6118 unsigned long mem_len; 6459 unsigned long mem_len;
6119 int rc; 6460 int rc, i, j;
6120 u32 reg; 6461 u32 reg;
6121 u64 dma_mask, persist_dma_mask; 6462 u64 dma_mask, persist_dma_mask;
6122 6463
@@ -6273,7 +6614,35 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6273 goto err_out_unmap; 6614 goto err_out_unmap;
6274 } 6615 }
6275 6616
6276 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV); 6617 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6618 for (i = 0, j = 0; i < 3; i++) {
6619 u8 num, k, skip0;
6620
6621 num = (u8) (reg >> (24 - (i * 8)));
6622 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6623 if (num >= k || !skip0 || k == 1) {
6624 bp->fw_version[j++] = (num / k) + '0';
6625 skip0 = 0;
6626 }
6627 }
6628 if (i != 2)
6629 bp->fw_version[j++] = '.';
6630 }
6631 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6632 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6633 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6634 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6635 int i;
6636 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6637
6638 bp->fw_version[j++] = ' ';
6639 for (i = 0; i < 3; i++) {
6640 reg = REG_RD_IND(bp, addr + i * 4);
6641 reg = swab32(reg);
6642 memcpy(&bp->fw_version[j], &reg, 4);
6643 j += 4;
6644 }
6645 }
6277 6646
6278 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER); 6647 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6279 bp->mac_addr[0] = (u8) (reg >> 8); 6648 bp->mac_addr[0] = (u8) (reg >> 8);
@@ -6315,7 +6684,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6315 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) 6684 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6316 bp->phy_flags |= PHY_SERDES_FLAG; 6685 bp->phy_flags |= PHY_SERDES_FLAG;
6317 6686
6687 bp->phy_port = PORT_TP;
6318 if (bp->phy_flags & PHY_SERDES_FLAG) { 6688 if (bp->phy_flags & PHY_SERDES_FLAG) {
6689 bp->phy_port = PORT_FIBRE;
6319 bp->flags |= NO_WOL_FLAG; 6690 bp->flags |= NO_WOL_FLAG;
6320 if (CHIP_NUM(bp) != CHIP_NUM_5706) { 6691 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6321 bp->phy_addr = 2; 6692 bp->phy_addr = 2;
@@ -6324,6 +6695,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6324 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) 6695 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6325 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; 6696 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6326 } 6697 }
6698 bnx2_init_remote_phy(bp);
6699
6327 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || 6700 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6328 CHIP_NUM(bp) == CHIP_NUM_5708) 6701 CHIP_NUM(bp) == CHIP_NUM_5708)
6329 bp->phy_flags |= PHY_CRC_FIX_FLAG; 6702 bp->phy_flags |= PHY_CRC_FIX_FLAG;
@@ -6374,23 +6747,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6374 } 6747 }
6375 } 6748 }
6376 6749
6377 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; 6750 bnx2_set_default_link(bp);
6378 bp->req_line_speed = 0;
6379 if (bp->phy_flags & PHY_SERDES_FLAG) {
6380 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6381
6382 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6383 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6384 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6385 bp->autoneg = 0;
6386 bp->req_line_speed = bp->line_speed = SPEED_1000;
6387 bp->req_duplex = DUPLEX_FULL;
6388 }
6389 }
6390 else {
6391 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6392 }
6393
6394 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 6751 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6395 6752
6396 init_timer(&bp->timer); 6753 init_timer(&bp->timer);
@@ -6490,10 +6847,10 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6490 memcpy(dev->perm_addr, bp->mac_addr, 6); 6847 memcpy(dev->perm_addr, bp->mac_addr, 6);
6491 bp->name = board_info[ent->driver_data].name; 6848 bp->name = board_info[ent->driver_data].name;
6492 6849
6850 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6493 if (CHIP_NUM(bp) == CHIP_NUM_5709) 6851 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6494 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 6852 dev->features |= NETIF_F_IPV6_CSUM;
6495 else 6853
6496 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6497#ifdef BCM_VLAN 6854#ifdef BCM_VLAN
6498 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 6855 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6499#endif 6856#endif
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 49a5de253b17..d8cd1afeb23d 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6338,6 +6338,8 @@ struct l2_fhdr {
6338 6338
6339#define RX_COPY_THRESH 92 6339#define RX_COPY_THRESH 92
6340 6340
6341#define BNX2_MISC_ENABLE_DEFAULT 0x7ffffff
6342
6341#define DMA_READ_CHANS 5 6343#define DMA_READ_CHANS 5
6342#define DMA_WRITE_CHANS 3 6344#define DMA_WRITE_CHANS 3
6343 6345
@@ -6537,6 +6539,7 @@ struct bnx2 {
6537#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100 6539#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100
6538#define PHY_INT_MODE_LINK_READY_FLAG 0x200 6540#define PHY_INT_MODE_LINK_READY_FLAG 0x200
6539#define PHY_DIS_EARLY_DAC_FLAG 0x400 6541#define PHY_DIS_EARLY_DAC_FLAG 0x400
6542#define REMOTE_PHY_CAP_FLAG 0x800
6540 6543
6541 u32 mii_bmcr; 6544 u32 mii_bmcr;
6542 u32 mii_bmsr; 6545 u32 mii_bmsr;
@@ -6625,6 +6628,7 @@ struct bnx2 {
6625 u16 req_line_speed; 6628 u16 req_line_speed;
6626 u8 req_duplex; 6629 u8 req_duplex;
6627 6630
6631 u8 phy_port;
6628 u8 link_up; 6632 u8 link_up;
6629 6633
6630 u16 line_speed; 6634 u16 line_speed;
@@ -6656,7 +6660,7 @@ struct bnx2 {
6656 6660
6657 u32 shmem_base; 6661 u32 shmem_base;
6658 6662
6659 u32 fw_ver; 6663 char fw_version[32];
6660 6664
6661 int pm_cap; 6665 int pm_cap;
6662 int pcix_cap; 6666 int pcix_cap;
@@ -6770,7 +6774,7 @@ struct fw_info {
6770 * the firmware has timed out, the driver will assume there is no firmware 6774 * the firmware has timed out, the driver will assume there is no firmware
6771 * running and there won't be any firmware-driver synchronization during a 6775 * running and there won't be any firmware-driver synchronization during a
6772 * driver reset. */ 6776 * driver reset. */
6773#define FW_ACK_TIME_OUT_MS 100 6777#define FW_ACK_TIME_OUT_MS 1000
6774 6778
6775 6779
6776#define BNX2_DRV_RESET_SIGNATURE 0x00000000 6780#define BNX2_DRV_RESET_SIGNATURE 0x00000000
@@ -6788,6 +6792,7 @@ struct fw_info {
6788#define BNX2_DRV_MSG_CODE_DIAG 0x07000000 6792#define BNX2_DRV_MSG_CODE_DIAG 0x07000000
6789#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 6793#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000
6790#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000 6794#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000
6795#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000
6791 6796
6792#define BNX2_DRV_MSG_DATA 0x00ff0000 6797#define BNX2_DRV_MSG_DATA 0x00ff0000
6793#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000 6798#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000
@@ -6836,6 +6841,7 @@ struct fw_info {
6836#define BNX2_LINK_STATUS_SERDES_LINK (1<<20) 6841#define BNX2_LINK_STATUS_SERDES_LINK (1<<20)
6837#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21) 6842#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21)
6838#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22) 6843#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22)
6844#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31)
6839 6845
6840#define BNX2_DRV_PULSE_MB 0x00000010 6846#define BNX2_DRV_PULSE_MB 0x00000010
6841#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff 6847#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff
@@ -6845,6 +6851,30 @@ struct fw_info {
6845 * This is used for debugging. */ 6851 * This is used for debugging. */
6846#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000 6852#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000
6847 6853
6854#define BNX2_DRV_MB_ARG0 0x00000014
6855#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0)
6856#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1)
6857#define BNX2_NETLINK_SET_LINK_SPEED_10 \
6858 (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \
6859 BNX2_NETLINK_SET_LINK_SPEED_10FULL)
6860#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2)
6861#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3)
6862#define BNX2_NETLINK_SET_LINK_SPEED_100 \
6863 (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \
6864 BNX2_NETLINK_SET_LINK_SPEED_100FULL)
6865#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4)
6866#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5)
6867#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6)
6868#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7)
6869#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8)
6870#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9)
6871#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10)
6872#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11)
6873#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12)
6874#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13)
6875#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14)
6876#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15)
6877
6848#define BNX2_DEV_INFO_SIGNATURE 0x00000020 6878#define BNX2_DEV_INFO_SIGNATURE 0x00000020
6849#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900 6879#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900
6850#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00 6880#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00
@@ -7006,6 +7036,8 @@ struct fw_info {
7006#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff 7036#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff
7007#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000 7037#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000
7008 7038
7039#define BNX2_MFW_VER_PTR 0x00000014c
7040
7009#define BNX2_BC_STATE_RESET_TYPE 0x000001c0 7041#define BNX2_BC_STATE_RESET_TYPE 0x000001c0
7010#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254 7042#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254
7011#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff 7043#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff
@@ -7059,12 +7091,42 @@ struct fw_info {
7059#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600) 7091#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600)
7060#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700) 7092#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700)
7061 7093
7094#define BNX2_BC_STATE_CONDITION 0x000001c8
7095#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000
7096#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000
7097#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000
7098#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
7099#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
7100#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
7101
7062#define BNX2_BC_STATE_DEBUG_CMD 0x1dc 7102#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
7063#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 7103#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
7064#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000 7104#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000
7065#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff 7105#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff
7066#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff 7106#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff
7067 7107
7108#define BNX2_FW_EVT_CODE_MB 0x354
7109#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000
7110#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001
7111
7112#define BNX2_DRV_ACK_CAP_MB 0x364
7113#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000
7114#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000
7115
7116#define BNX2_FW_CAP_MB 0x368
7117#define BNX2_FW_CAP_SIGNATURE 0xaa550000
7118#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000
7119#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000
7120#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001
7121#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002
7122
7123#define BNX2_RPHY_SIGNATURE 0x36c
7124#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a
7125
7126#define BNX2_RPHY_FLAGS 0x370
7127#define BNX2_RPHY_SERDES_LINK 0x374
7128#define BNX2_RPHY_COPPER_LINK 0x378
7129
7068#define HOST_VIEW_SHMEM_BASE 0x167c00 7130#define HOST_VIEW_SHMEM_BASE 0x167c00
7069 7131
7070#endif 7132#endif
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 74ec64a1625d..a4ace071f1cb 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -866,9 +866,9 @@ receive_packet (struct net_device *dev)
866 PCI_DMA_FROMDEVICE); 866 PCI_DMA_FROMDEVICE);
867 /* 16 byte align the IP header */ 867 /* 16 byte align the IP header */
868 skb_reserve (skb, 2); 868 skb_reserve (skb, 2);
869 eth_copy_and_sum (skb, 869 skb_copy_to_linear_data (skb,
870 np->rx_skbuff[entry]->data, 870 np->rx_skbuff[entry]->data,
871 pkt_len, 0); 871 pkt_len);
872 skb_put (skb, pkt_len); 872 skb_put (skb, pkt_len);
873 pci_dma_sync_single_for_device(np->pdev, 873 pci_dma_sync_single_for_device(np->pdev,
874 desc->fraginfo & 874 desc->fraginfo &
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 60673bc292c0..756a6bcb038d 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -34,11 +34,12 @@
34#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
37#include <linux/rtnetlink.h>
38#include <net/rtnetlink.h>
37 39
38static int numdummies = 1; 40static int numdummies = 1;
39 41
40static int dummy_xmit(struct sk_buff *skb, struct net_device *dev); 42static int dummy_xmit(struct sk_buff *skb, struct net_device *dev);
41static struct net_device_stats *dummy_get_stats(struct net_device *dev);
42 43
43static int dummy_set_address(struct net_device *dev, void *p) 44static int dummy_set_address(struct net_device *dev, void *p)
44{ 45{
@@ -56,13 +57,13 @@ static void set_multicast_list(struct net_device *dev)
56{ 57{
57} 58}
58 59
59static void __init dummy_setup(struct net_device *dev) 60static void dummy_setup(struct net_device *dev)
60{ 61{
61 /* Initialize the device structure. */ 62 /* Initialize the device structure. */
62 dev->get_stats = dummy_get_stats;
63 dev->hard_start_xmit = dummy_xmit; 63 dev->hard_start_xmit = dummy_xmit;
64 dev->set_multicast_list = set_multicast_list; 64 dev->set_multicast_list = set_multicast_list;
65 dev->set_mac_address = dummy_set_address; 65 dev->set_mac_address = dummy_set_address;
66 dev->destructor = free_netdev;
66 67
67 /* Fill in device structure with ethernet-generic values. */ 68 /* Fill in device structure with ethernet-generic values. */
68 ether_setup(dev); 69 ether_setup(dev);
@@ -76,77 +77,80 @@ static void __init dummy_setup(struct net_device *dev)
76 77
77static int dummy_xmit(struct sk_buff *skb, struct net_device *dev) 78static int dummy_xmit(struct sk_buff *skb, struct net_device *dev)
78{ 79{
79 struct net_device_stats *stats = netdev_priv(dev); 80 dev->stats.tx_packets++;
80 81 dev->stats.tx_bytes += skb->len;
81 stats->tx_packets++;
82 stats->tx_bytes+=skb->len;
83 82
84 dev_kfree_skb(skb); 83 dev_kfree_skb(skb);
85 return 0; 84 return 0;
86} 85}
87 86
88static struct net_device_stats *dummy_get_stats(struct net_device *dev) 87static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
89{ 88{
90 return netdev_priv(dev); 89 if (tb[IFLA_ADDRESS]) {
90 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
91 return -EINVAL;
92 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
93 return -EADDRNOTAVAIL;
94 }
95 return 0;
91} 96}
92 97
93static struct net_device **dummies; 98static struct rtnl_link_ops dummy_link_ops __read_mostly = {
99 .kind = "dummy",
100 .setup = dummy_setup,
101 .validate = dummy_validate,
102};
94 103
95/* Number of dummy devices to be set up by this module. */ 104/* Number of dummy devices to be set up by this module. */
96module_param(numdummies, int, 0); 105module_param(numdummies, int, 0);
97MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); 106MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
98 107
99static int __init dummy_init_one(int index) 108static int __init dummy_init_one(void)
100{ 109{
101 struct net_device *dev_dummy; 110 struct net_device *dev_dummy;
102 int err; 111 int err;
103 112
104 dev_dummy = alloc_netdev(sizeof(struct net_device_stats), 113 dev_dummy = alloc_netdev(0, "dummy%d", dummy_setup);
105 "dummy%d", dummy_setup);
106
107 if (!dev_dummy) 114 if (!dev_dummy)
108 return -ENOMEM; 115 return -ENOMEM;
109 116
110 if ((err = register_netdev(dev_dummy))) { 117 err = dev_alloc_name(dev_dummy, dev_dummy->name);
111 free_netdev(dev_dummy); 118 if (err < 0)
112 dev_dummy = NULL; 119 goto err;
113 } else {
114 dummies[index] = dev_dummy;
115 }
116 120
117 return err; 121 dev_dummy->rtnl_link_ops = &dummy_link_ops;
118} 122 err = register_netdevice(dev_dummy);
123 if (err < 0)
124 goto err;
125 return 0;
119 126
120static void dummy_free_one(int index) 127err:
121{ 128 free_netdev(dev_dummy);
122 unregister_netdev(dummies[index]); 129 return err;
123 free_netdev(dummies[index]);
124} 130}
125 131
126static int __init dummy_init_module(void) 132static int __init dummy_init_module(void)
127{ 133{
128 int i, err = 0; 134 int i, err = 0;
129 dummies = kmalloc(numdummies * sizeof(void *), GFP_KERNEL); 135
130 if (!dummies) 136 rtnl_lock();
131 return -ENOMEM; 137 err = __rtnl_link_register(&dummy_link_ops);
138
132 for (i = 0; i < numdummies && !err; i++) 139 for (i = 0; i < numdummies && !err; i++)
133 err = dummy_init_one(i); 140 err = dummy_init_one();
134 if (err) { 141 if (err < 0)
135 i--; 142 __rtnl_link_unregister(&dummy_link_ops);
136 while (--i >= 0) 143 rtnl_unlock();
137 dummy_free_one(i); 144
138 }
139 return err; 145 return err;
140} 146}
141 147
142static void __exit dummy_cleanup_module(void) 148static void __exit dummy_cleanup_module(void)
143{ 149{
144 int i; 150 rtnl_link_unregister(&dummy_link_ops);
145 for (i = 0; i < numdummies; i++)
146 dummy_free_one(i);
147 kfree(dummies);
148} 151}
149 152
150module_init(dummy_init_module); 153module_init(dummy_init_module);
151module_exit(dummy_cleanup_module); 154module_exit(dummy_cleanup_module);
152MODULE_LICENSE("GPL"); 155MODULE_LICENSE("GPL");
156MODULE_ALIAS_RTNL_LINK("dummy");
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 9800341956a2..9afa47edfc58 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1801,7 +1801,7 @@ speedo_rx(struct net_device *dev)
1801 1801
1802#if 1 || USE_IP_CSUM 1802#if 1 || USE_IP_CSUM
1803 /* Packet is in one chunk -- we can copy + cksum. */ 1803 /* Packet is in one chunk -- we can copy + cksum. */
1804 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); 1804 skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
1805 skb_put(skb, pkt_len); 1805 skb_put(skb, pkt_len);
1806#else 1806#else
1807 skb_copy_from_linear_data(sp->rx_skbuff[entry], 1807 skb_copy_from_linear_data(sp->rx_skbuff[entry],
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 5e517946f46a..119778401e48 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1201,7 +1201,7 @@ static int epic_rx(struct net_device *dev, int budget)
1201 ep->rx_ring[entry].bufaddr, 1201 ep->rx_ring[entry].bufaddr,
1202 ep->rx_buf_sz, 1202 ep->rx_buf_sz,
1203 PCI_DMA_FROMDEVICE); 1203 PCI_DMA_FROMDEVICE);
1204 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0); 1204 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1205 skb_put(skb, pkt_len); 1205 skb_put(skb, pkt_len);
1206 pci_dma_sync_single_for_device(ep->pci_dev, 1206 pci_dma_sync_single_for_device(ep->pci_dev,
1207 ep->rx_ring[entry].bufaddr, 1207 ep->rx_ring[entry].bufaddr,
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index abe9b089c610..ff9f177d7157 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1727,8 +1727,8 @@ static int netdev_rx(struct net_device *dev)
1727 /* Call copy + cksum if available. */ 1727 /* Call copy + cksum if available. */
1728 1728
1729#if ! defined(__alpha__) 1729#if ! defined(__alpha__)
1730 eth_copy_and_sum(skb, 1730 skb_copy_to_linear_data(skb,
1731 np->cur_rx->skbuff->data, pkt_len, 0); 1731 np->cur_rx->skbuff->data, pkt_len);
1732 skb_put(skb, pkt_len); 1732 skb_put(skb, pkt_len);
1733#else 1733#else
1734 memcpy(skb_put(skb, pkt_len), 1734 memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 255b09124e11..03023dd17829 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -648,7 +648,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
648 fep->stats.rx_dropped++; 648 fep->stats.rx_dropped++;
649 } else { 649 } else {
650 skb_put(skb,pkt_len-4); /* Make room */ 650 skb_put(skb,pkt_len-4); /* Make room */
651 eth_copy_and_sum(skb, data, pkt_len-4, 0); 651 skb_copy_to_linear_data(skb, data, pkt_len-4);
652 skb->protocol=eth_type_trans(skb,dev); 652 skb->protocol=eth_type_trans(skb,dev);
653 netif_rx(skb); 653 netif_rx(skb);
654 } 654 }
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 2521b111b3a5..15254dc7876a 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1575,8 +1575,8 @@ static int hamachi_rx(struct net_device *dev)
1575 PCI_DMA_FROMDEVICE); 1575 PCI_DMA_FROMDEVICE);
1576 /* Call copy + cksum if available. */ 1576 /* Call copy + cksum if available. */
1577#if 1 || USE_IP_COPYSUM 1577#if 1 || USE_IP_COPYSUM
1578 eth_copy_and_sum(skb, 1578 skb_copy_to_linear_data(skb,
1579 hmp->rx_skbuff[entry]->data, pkt_len, 0); 1579 hmp->rx_skbuff[entry]->data, pkt_len);
1580 skb_put(skb, pkt_len); 1580 skb_put(skb, pkt_len);
1581#else 1581#else
1582 memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma 1582 memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 07b4c0d7a75c..f5c3598e59af 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -136,13 +136,14 @@ resched:
136 136
137} 137}
138 138
139static void __init ifb_setup(struct net_device *dev) 139static void ifb_setup(struct net_device *dev)
140{ 140{
141 /* Initialize the device structure. */ 141 /* Initialize the device structure. */
142 dev->get_stats = ifb_get_stats; 142 dev->get_stats = ifb_get_stats;
143 dev->hard_start_xmit = ifb_xmit; 143 dev->hard_start_xmit = ifb_xmit;
144 dev->open = &ifb_open; 144 dev->open = &ifb_open;
145 dev->stop = &ifb_close; 145 dev->stop = &ifb_close;
146 dev->destructor = free_netdev;
146 147
147 /* Fill in device structure with ethernet-generic values. */ 148 /* Fill in device structure with ethernet-generic values. */
148 ether_setup(dev); 149 ether_setup(dev);
@@ -197,12 +198,6 @@ static struct net_device_stats *ifb_get_stats(struct net_device *dev)
197 return stats; 198 return stats;
198} 199}
199 200
200static struct net_device **ifbs;
201
202/* Number of ifb devices to be set up by this module. */
203module_param(numifbs, int, 0);
204MODULE_PARM_DESC(numifbs, "Number of ifb devices");
205
206static int ifb_close(struct net_device *dev) 201static int ifb_close(struct net_device *dev)
207{ 202{
208 struct ifb_private *dp = netdev_priv(dev); 203 struct ifb_private *dp = netdev_priv(dev);
@@ -226,6 +221,28 @@ static int ifb_open(struct net_device *dev)
226 return 0; 221 return 0;
227} 222}
228 223
224static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
225{
226 if (tb[IFLA_ADDRESS]) {
227 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
228 return -EINVAL;
229 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
230 return -EADDRNOTAVAIL;
231 }
232 return 0;
233}
234
235static struct rtnl_link_ops ifb_link_ops __read_mostly = {
236 .kind = "ifb",
237 .priv_size = sizeof(struct ifb_private),
238 .setup = ifb_setup,
239 .validate = ifb_validate,
240};
241
242/* Number of ifb devices to be set up by this module. */
243module_param(numifbs, int, 0);
244MODULE_PARM_DESC(numifbs, "Number of ifb devices");
245
229static int __init ifb_init_one(int index) 246static int __init ifb_init_one(int index)
230{ 247{
231 struct net_device *dev_ifb; 248 struct net_device *dev_ifb;
@@ -237,49 +254,44 @@ static int __init ifb_init_one(int index)
237 if (!dev_ifb) 254 if (!dev_ifb)
238 return -ENOMEM; 255 return -ENOMEM;
239 256
240 if ((err = register_netdev(dev_ifb))) { 257 err = dev_alloc_name(dev_ifb, dev_ifb->name);
241 free_netdev(dev_ifb); 258 if (err < 0)
242 dev_ifb = NULL; 259 goto err;
243 } else {
244 ifbs[index] = dev_ifb;
245 }
246 260
247 return err; 261 dev_ifb->rtnl_link_ops = &ifb_link_ops;
248} 262 err = register_netdevice(dev_ifb);
263 if (err < 0)
264 goto err;
265 return 0;
249 266
250static void ifb_free_one(int index) 267err:
251{ 268 free_netdev(dev_ifb);
252 unregister_netdev(ifbs[index]); 269 return err;
253 free_netdev(ifbs[index]);
254} 270}
255 271
256static int __init ifb_init_module(void) 272static int __init ifb_init_module(void)
257{ 273{
258 int i, err = 0; 274 int i, err;
259 ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL); 275
260 if (!ifbs) 276 rtnl_lock();
261 return -ENOMEM; 277 err = __rtnl_link_register(&ifb_link_ops);
278
262 for (i = 0; i < numifbs && !err; i++) 279 for (i = 0; i < numifbs && !err; i++)
263 err = ifb_init_one(i); 280 err = ifb_init_one(i);
264 if (err) { 281 if (err)
265 i--; 282 __rtnl_link_unregister(&ifb_link_ops);
266 while (--i >= 0) 283 rtnl_unlock();
267 ifb_free_one(i);
268 }
269 284
270 return err; 285 return err;
271} 286}
272 287
273static void __exit ifb_cleanup_module(void) 288static void __exit ifb_cleanup_module(void)
274{ 289{
275 int i; 290 rtnl_link_unregister(&ifb_link_ops);
276
277 for (i = 0; i < numifbs; i++)
278 ifb_free_one(i);
279 kfree(ifbs);
280} 291}
281 292
282module_init(ifb_init_module); 293module_init(ifb_init_module);
283module_exit(ifb_cleanup_module); 294module_exit(ifb_cleanup_module);
284MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
285MODULE_AUTHOR("Jamal Hadi Salim"); 296MODULE_AUTHOR("Jamal Hadi Salim");
297MODULE_ALIAS_RTNL_LINK("ifb");
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 217429122e79..bdd5c979bead 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -4,7 +4,7 @@
4* Version: 0.1.1 4* Version: 0.1.1
5* Description: Irda KingSun/DonShine USB Dongle 5* Description: Irda KingSun/DonShine USB Dongle
6* Status: Experimental 6* Status: Experimental
7* Author: Alex Villac�s Lasso <a_villacis@palosanto.com> 7* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
8* 8*
9* Based on stir4200 and mcs7780 drivers, with (strange?) differences 9* Based on stir4200 and mcs7780 drivers, with (strange?) differences
10* 10*
@@ -652,6 +652,6 @@ static void __exit kingsun_cleanup(void)
652} 652}
653module_exit(kingsun_cleanup); 653module_exit(kingsun_cleanup);
654 654
655MODULE_AUTHOR("Alex Villac�s Lasso <a_villacis@palosanto.com>"); 655MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
656MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine"); 656MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
657MODULE_LICENSE("GPL"); 657MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bf78ef1120ad..0538ca9ce058 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -44,6 +44,7 @@ MODULE_LICENSE("GPL");
44#include <linux/time.h> 44#include <linux/time.h>
45#include <linux/proc_fs.h> 45#include <linux/proc_fs.h>
46#include <linux/seq_file.h> 46#include <linux/seq_file.h>
47#include <linux/mutex.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48#include <asm/byteorder.h> 49#include <asm/byteorder.h>
49 50
@@ -1660,8 +1661,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1660 idev = ndev->priv; 1661 idev = ndev->priv;
1661 1662
1662 spin_lock_init(&idev->lock); 1663 spin_lock_init(&idev->lock);
1663 init_MUTEX(&idev->sem); 1664 mutex_init(&idev->mtx);
1664 down(&idev->sem); 1665 mutex_lock(&idev->mtx);
1665 idev->pdev = pdev; 1666 idev->pdev = pdev;
1666 1667
1667 if (vlsi_irda_init(ndev) < 0) 1668 if (vlsi_irda_init(ndev) < 0)
@@ -1689,12 +1690,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1689 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); 1690 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name);
1690 1691
1691 pci_set_drvdata(pdev, ndev); 1692 pci_set_drvdata(pdev, ndev);
1692 up(&idev->sem); 1693 mutex_unlock(&idev->mtx);
1693 1694
1694 return 0; 1695 return 0;
1695 1696
1696out_freedev: 1697out_freedev:
1697 up(&idev->sem); 1698 mutex_unlock(&idev->mtx);
1698 free_netdev(ndev); 1699 free_netdev(ndev);
1699out_disable: 1700out_disable:
1700 pci_disable_device(pdev); 1701 pci_disable_device(pdev);
@@ -1716,12 +1717,12 @@ static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
1716 unregister_netdev(ndev); 1717 unregister_netdev(ndev);
1717 1718
1718 idev = ndev->priv; 1719 idev = ndev->priv;
1719 down(&idev->sem); 1720 mutex_lock(&idev->mtx);
1720 if (idev->proc_entry) { 1721 if (idev->proc_entry) {
1721 remove_proc_entry(ndev->name, vlsi_proc_root); 1722 remove_proc_entry(ndev->name, vlsi_proc_root);
1722 idev->proc_entry = NULL; 1723 idev->proc_entry = NULL;
1723 } 1724 }
1724 up(&idev->sem); 1725 mutex_unlock(&idev->mtx);
1725 1726
1726 free_netdev(ndev); 1727 free_netdev(ndev);
1727 1728
@@ -1751,7 +1752,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1751 return 0; 1752 return 0;
1752 } 1753 }
1753 idev = ndev->priv; 1754 idev = ndev->priv;
1754 down(&idev->sem); 1755 mutex_lock(&idev->mtx);
1755 if (pdev->current_state != 0) { /* already suspended */ 1756 if (pdev->current_state != 0) { /* already suspended */
1756 if (state.event > pdev->current_state) { /* simply go deeper */ 1757 if (state.event > pdev->current_state) { /* simply go deeper */
1757 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1758 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1759,7 +1760,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1759 } 1760 }
1760 else 1761 else
1761 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event); 1762 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event);
1762 up(&idev->sem); 1763 mutex_unlock(&idev->mtx);
1763 return 0; 1764 return 0;
1764 } 1765 }
1765 1766
@@ -1775,7 +1776,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1775 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1776 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1776 pdev->current_state = state.event; 1777 pdev->current_state = state.event;
1777 idev->resume_ok = 1; 1778 idev->resume_ok = 1;
1778 up(&idev->sem); 1779 mutex_unlock(&idev->mtx);
1779 return 0; 1780 return 0;
1780} 1781}
1781 1782
@@ -1790,9 +1791,9 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1790 return 0; 1791 return 0;
1791 } 1792 }
1792 idev = ndev->priv; 1793 idev = ndev->priv;
1793 down(&idev->sem); 1794 mutex_lock(&idev->mtx);
1794 if (pdev->current_state == 0) { 1795 if (pdev->current_state == 0) {
1795 up(&idev->sem); 1796 mutex_unlock(&idev->mtx);
1796 IRDA_WARNING("%s - %s: already resumed\n", 1797 IRDA_WARNING("%s - %s: already resumed\n",
1797 __FUNCTION__, pci_name(pdev)); 1798 __FUNCTION__, pci_name(pdev));
1798 return 0; 1799 return 0;
@@ -1814,7 +1815,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1814 * device and independently resume_ok should catch any garbage config. 1815 * device and independently resume_ok should catch any garbage config.
1815 */ 1816 */
1816 IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__); 1817 IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__);
1817 up(&idev->sem); 1818 mutex_unlock(&idev->mtx);
1818 return 0; 1819 return 0;
1819 } 1820 }
1820 1821
@@ -1824,7 +1825,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1824 netif_device_attach(ndev); 1825 netif_device_attach(ndev);
1825 } 1826 }
1826 idev->resume_ok = 0; 1827 idev->resume_ok = 0;
1827 up(&idev->sem); 1828 mutex_unlock(&idev->mtx);
1828 return 0; 1829 return 0;
1829} 1830}
1830 1831
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 2d3b773d8e35..ca12a6096419 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -728,7 +728,7 @@ typedef struct vlsi_irda_dev {
728 struct timeval last_rx; 728 struct timeval last_rx;
729 729
730 spinlock_t lock; 730 spinlock_t lock;
731 struct semaphore sem; 731 struct mutex mtx;
732 732
733 u8 resume_ok; 733 u8 resume_ok;
734 struct proc_dir_entry *proc_entry; 734 struct proc_dir_entry *proc_entry;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5f694fc4a21..d9ce1aef148a 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -111,7 +111,7 @@ static int ixpdev_rx(struct net_device *dev, int *budget)
111 skb = dev_alloc_skb(desc->pkt_length + 2); 111 skb = dev_alloc_skb(desc->pkt_length + 2);
112 if (likely(skb != NULL)) { 112 if (likely(skb != NULL)) {
113 skb_reserve(skb, 2); 113 skb_reserve(skb, 2);
114 eth_copy_and_sum(skb, buf, desc->pkt_length, 0); 114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
115 skb_put(skb, desc->pkt_length); 115 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
117 117
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 0fe96c85828b..a2f37e52b928 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -1186,9 +1186,9 @@ lance_rx(struct net_device *dev)
1186 } 1186 }
1187 skb_reserve(skb,2); /* 16 byte align */ 1187 skb_reserve(skb,2); /* 16 byte align */
1188 skb_put(skb,pkt_len); /* Make room */ 1188 skb_put(skb,pkt_len); /* Make room */
1189 eth_copy_and_sum(skb, 1189 skb_copy_to_linear_data(skb,
1190 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), 1190 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1191 pkt_len,0); 1191 pkt_len);
1192 skb->protocol=eth_type_trans(skb,dev); 1192 skb->protocol=eth_type_trans(skb,dev);
1193 netif_rx(skb); 1193 netif_rx(skb);
1194 dev->last_rx = jiffies; 1194 dev->last_rx = jiffies;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 460a08718c69..3450051ae56b 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2357,8 +2357,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2357 np->rx_dma[entry], 2357 np->rx_dma[entry],
2358 buflen, 2358 buflen,
2359 PCI_DMA_FROMDEVICE); 2359 PCI_DMA_FROMDEVICE);
2360 eth_copy_and_sum(skb, 2360 skb_copy_to_linear_data(skb,
2361 np->rx_skbuff[entry]->data, pkt_len, 0); 2361 np->rx_skbuff[entry]->data, pkt_len);
2362 skb_put(skb, pkt_len); 2362 skb_put(skb, pkt_len);
2363 pci_dma_sync_single_for_device(np->pci_dev, 2363 pci_dma_sync_single_for_device(np->pci_dev,
2364 np->rx_dma[entry], 2364 np->rx_dma[entry],
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 8dbd6d1900b5..5e7999db2096 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -936,7 +936,7 @@ static void ni52_rcv_int(struct net_device *dev)
936 { 936 {
937 skb_reserve(skb,2); 937 skb_reserve(skb,2);
938 skb_put(skb,totlen); 938 skb_put(skb,totlen);
939 eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); 939 skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen);
940 skb->protocol=eth_type_trans(skb,dev); 940 skb->protocol=eth_type_trans(skb,dev);
941 netif_rx(skb); 941 netif_rx(skb);
942 dev->last_rx = jiffies; 942 dev->last_rx = jiffies;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 3818edf0ac18..4ef5fe345191 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1096,7 +1096,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1096#ifdef RCV_VIA_SKB 1096#ifdef RCV_VIA_SKB
1097 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { 1097 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1098 skb_put(skb,len); 1098 skb_put(skb,len);
1099 eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0); 1099 skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
1100 } 1100 }
1101 else { 1101 else {
1102 struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; 1102 struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
@@ -1108,7 +1108,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1108 } 1108 }
1109#else 1109#else
1110 skb_put(skb,len); 1110 skb_put(skb,len);
1111 eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0); 1111 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
1112#endif 1112#endif
1113 p->stats.rx_packets++; 1113 p->stats.rx_packets++;
1114 p->stats.rx_bytes += len; 1114 p->stats.rx_bytes += len;
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index df8998b4f37e..3cdbe118200b 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1567,7 +1567,7 @@ static void netdrv_rx_interrupt (struct net_device *dev,
1567 if (skb) { 1567 if (skb) {
1568 skb_reserve (skb, 2); /* 16 byte align the IP fields. */ 1568 skb_reserve (skb, 2); /* 16 byte align the IP fields. */
1569 1569
1570 eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); 1570 skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
1571 skb_put (skb, pkt_size); 1571 skb_put (skb, pkt_size);
1572 1572
1573 skb->protocol = eth_type_trans (skb, dev); 1573 skb->protocol = eth_type_trans (skb, dev);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 9c171a7390e2..465485a3fbc6 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1235,9 +1235,9 @@ static void pcnet32_rx_entry(struct net_device *dev,
1235 lp->rx_dma_addr[entry], 1235 lp->rx_dma_addr[entry],
1236 pkt_len, 1236 pkt_len,
1237 PCI_DMA_FROMDEVICE); 1237 PCI_DMA_FROMDEVICE);
1238 eth_copy_and_sum(skb, 1238 skb_copy_to_linear_data(skb,
1239 (unsigned char *)(lp->rx_skbuff[entry]->data), 1239 (unsigned char *)(lp->rx_skbuff[entry]->data),
1240 pkt_len, 0); 1240 pkt_len);
1241 pci_dma_sync_single_for_device(lp->pci_dev, 1241 pci_dma_sync_single_for_device(lp->pci_dev,
1242 lp->rx_dma_addr[entry], 1242 lp->rx_dma_addr[entry],
1243 pkt_len, 1243 pkt_len,
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
new file mode 100644
index 000000000000..5891a0fbdc8b
--- /dev/null
+++ b/drivers/net/pppol2tp.c
@@ -0,0 +1,2486 @@
1/*****************************************************************************
2 * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
3 *
4 * PPPoX --- Generic PPP encapsulation socket family
5 * PPPoL2TP --- PPP over L2TP (RFC 2661)
6 *
7 * Version: 1.0.0
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 *
16 * License:
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24/* This driver handles only L2TP data frames; control frames are handled by a
25 * userspace application.
26 *
27 * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
28 * attaches it to a bound UDP socket with local tunnel_id / session_id and
29 * peer tunnel_id / session_id set. Data can then be sent or received using
30 * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
31 * can be read or modified using ioctl() or [gs]etsockopt() calls.
32 *
33 * When a PPPoL2TP socket is connected with local and peer session_id values
34 * zero, the socket is treated as a special tunnel management socket.
35 *
36 * Here's example userspace code to create a socket for sending/receiving data
37 * over an L2TP session:-
38 *
39 * struct sockaddr_pppol2tp sax;
40 * int fd;
41 * int session_fd;
42 *
43 * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
44 *
45 * sax.sa_family = AF_PPPOX;
46 * sax.sa_protocol = PX_PROTO_OL2TP;
47 * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
48 * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
49 * sax.pppol2tp.addr.sin_port = addr->sin_port;
50 * sax.pppol2tp.addr.sin_family = AF_INET;
51 * sax.pppol2tp.s_tunnel = tunnel_id;
52 * sax.pppol2tp.s_session = session_id;
53 * sax.pppol2tp.d_tunnel = peer_tunnel_id;
54 * sax.pppol2tp.d_session = peer_session_id;
55 *
56 * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
57 *
58 * A pppd plugin that allows PPP traffic to be carried over L2TP using
59 * this driver is available from the OpenL2TP project at
60 * http://openl2tp.sourceforge.net.
61 */
62
63#include <linux/module.h>
64#include <linux/version.h>
65#include <linux/string.h>
66#include <linux/list.h>
67#include <asm/uaccess.h>
68
69#include <linux/kernel.h>
70#include <linux/spinlock.h>
71#include <linux/kthread.h>
72#include <linux/sched.h>
73#include <linux/slab.h>
74#include <linux/errno.h>
75#include <linux/jiffies.h>
76
77#include <linux/netdevice.h>
78#include <linux/net.h>
79#include <linux/inetdevice.h>
80#include <linux/skbuff.h>
81#include <linux/init.h>
82#include <linux/ip.h>
83#include <linux/udp.h>
84#include <linux/if_pppox.h>
85#include <linux/if_pppol2tp.h>
86#include <net/sock.h>
87#include <linux/ppp_channel.h>
88#include <linux/ppp_defs.h>
89#include <linux/if_ppp.h>
90#include <linux/file.h>
91#include <linux/hash.h>
92#include <linux/sort.h>
93#include <linux/proc_fs.h>
94#include <net/dst.h>
95#include <net/ip.h>
96#include <net/udp.h>
97#include <net/xfrm.h>
98
99#include <asm/byteorder.h>
100#include <asm/atomic.h>
101
102
103#define PPPOL2TP_DRV_VERSION "V1.0"
104
105/* L2TP header constants */
106#define L2TP_HDRFLAG_T 0x8000
107#define L2TP_HDRFLAG_L 0x4000
108#define L2TP_HDRFLAG_S 0x0800
109#define L2TP_HDRFLAG_O 0x0200
110#define L2TP_HDRFLAG_P 0x0100
111
112#define L2TP_HDR_VER_MASK 0x000F
113#define L2TP_HDR_VER 0x0002
114
115/* Space for UDP, L2TP and PPP headers */
116#define PPPOL2TP_HEADER_OVERHEAD 40
117
118/* Just some random numbers */
119#define L2TP_TUNNEL_MAGIC 0x42114DDA
120#define L2TP_SESSION_MAGIC 0x0C04EB7D
121
122#define PPPOL2TP_HASH_BITS 4
123#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
124
125/* Default trace flags */
126#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
127
128#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
129 do { \
130 if ((_mask) & (_type)) \
131 printk(_lvl "PPPOL2TP: " _fmt, ##args); \
132 } while(0)
133
134/* Number of bytes to build transmit L2TP headers.
135 * Unfortunately the size is different depending on whether sequence numbers
136 * are enabled.
137 */
138#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
139#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
140
141struct pppol2tp_tunnel;
142
143/* Describes a session. It is the sk_user_data field in the PPPoL2TP
144 * socket. Contains information to determine incoming packets and transmit
145 * outgoing ones.
146 */
147struct pppol2tp_session
148{
149 int magic; /* should be
150 * L2TP_SESSION_MAGIC */
151 int owner; /* pid that opened the socket */
152
153 struct sock *sock; /* Pointer to the session
154 * PPPoX socket */
155 struct sock *tunnel_sock; /* Pointer to the tunnel UDP
156 * socket */
157
158 struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
159
160 struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
161 * context */
162
163 char name[20]; /* "sess xxxxx/yyyyy", where
164 * x=tunnel_id, y=session_id */
165 int mtu;
166 int mru;
167 int flags; /* accessed by PPPIOCGFLAGS.
168 * Unused. */
169 unsigned recv_seq:1; /* expect receive packets with
170 * sequence numbers? */
171 unsigned send_seq:1; /* send packets with sequence
172 * numbers? */
173 unsigned lns_mode:1; /* behave as LNS? LAC enables
174 * sequence numbers under
175 * control of LNS. */
176 int debug; /* bitmask of debug message
177 * categories */
178 int reorder_timeout; /* configured reorder timeout
179 * (in jiffies) */
180 u16 nr; /* session NR state (receive) */
181 u16 ns; /* session NR state (send) */
182 struct sk_buff_head reorder_q; /* receive reorder queue */
183 struct pppol2tp_ioc_stats stats;
184 struct hlist_node hlist; /* Hash list node */
185};
186
187/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
188 * all the associated sessions so incoming packets can be sorted out
189 */
190struct pppol2tp_tunnel
191{
192 int magic; /* Should be L2TP_TUNNEL_MAGIC */
193 rwlock_t hlist_lock; /* protect session_hlist */
194 struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
195 /* hashed list of sessions,
196 * hashed by id */
197 int debug; /* bitmask of debug message
198 * categories */
199 char name[12]; /* "tunl xxxxx" */
200 struct pppol2tp_ioc_stats stats;
201
202 void (*old_sk_destruct)(struct sock *);
203
204 struct sock *sock; /* Parent socket */
205 struct list_head list; /* Keep a list of all open
206 * prepared sockets */
207
208 atomic_t ref_count;
209};
210
211/* Private data stored for received packets in the skb.
212 */
213struct pppol2tp_skb_cb {
214 u16 ns;
215 u16 nr;
216 u16 has_seq;
217 u16 length;
218 unsigned long expires;
219};
220
221#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
222
223static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
224static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
225
226static atomic_t pppol2tp_tunnel_count;
227static atomic_t pppol2tp_session_count;
228static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
229static struct proto_ops pppol2tp_ops;
230static LIST_HEAD(pppol2tp_tunnel_list);
231static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock);
232
233/* Helpers to obtain tunnel/session contexts from sockets.
234 */
235static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
236{
237 struct pppol2tp_session *session;
238
239 if (sk == NULL)
240 return NULL;
241
242 session = (struct pppol2tp_session *)(sk->sk_user_data);
243 if (session == NULL)
244 return NULL;
245
246 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
247
248 return session;
249}
250
251static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
252{
253 struct pppol2tp_tunnel *tunnel;
254
255 if (sk == NULL)
256 return NULL;
257
258 tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
259 if (tunnel == NULL)
260 return NULL;
261
262 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
263
264 return tunnel;
265}
266
267/* Tunnel reference counts. Incremented per session that is added to
268 * the tunnel.
269 */
270static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
271{
272 atomic_inc(&tunnel->ref_count);
273}
274
275static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
276{
277 if (atomic_dec_and_test(&tunnel->ref_count))
278 pppol2tp_tunnel_free(tunnel);
279}
280
281/* Session hash list.
282 * The session_id SHOULD be random according to RFC2661, but several
283 * L2TP implementations (Cisco and Microsoft) use incrementing
284 * session_ids. So we do a real hash on the session_id, rather than a
285 * simple bitmask.
286 */
287static inline struct hlist_head *
288pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
289{
290 unsigned long hash_val = (unsigned long) session_id;
291 return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
292}
293
294/* Lookup a session by id
295 */
296static struct pppol2tp_session *
297pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
298{
299 struct hlist_head *session_list =
300 pppol2tp_session_id_hash(tunnel, session_id);
301 struct pppol2tp_session *session;
302 struct hlist_node *walk;
303
304 read_lock(&tunnel->hlist_lock);
305 hlist_for_each_entry(session, walk, session_list, hlist) {
306 if (session->tunnel_addr.s_session == session_id) {
307 read_unlock(&tunnel->hlist_lock);
308 return session;
309 }
310 }
311 read_unlock(&tunnel->hlist_lock);
312
313 return NULL;
314}
315
316/* Lookup a tunnel by id
317 */
318static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
319{
320 struct pppol2tp_tunnel *tunnel = NULL;
321
322 read_lock(&pppol2tp_tunnel_list_lock);
323 list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
324 if (tunnel->stats.tunnel_id == tunnel_id) {
325 read_unlock(&pppol2tp_tunnel_list_lock);
326 return tunnel;
327 }
328 }
329 read_unlock(&pppol2tp_tunnel_list_lock);
330
331 return NULL;
332}
333
334/*****************************************************************************
335 * Receive data handling
336 *****************************************************************************/
337
338/* Queue a skb in order. We come here only if the skb has an L2TP sequence
339 * number.
340 */
341static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
342{
343 struct sk_buff *skbp;
344 u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
345
346 spin_lock(&session->reorder_q.lock);
347 skb_queue_walk(&session->reorder_q, skbp) {
348 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
349 __skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
350 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
351 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
352 session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
353 skb_queue_len(&session->reorder_q));
354 session->stats.rx_oos_packets++;
355 goto out;
356 }
357 }
358
359 __skb_queue_tail(&session->reorder_q, skb);
360
361out:
362 spin_unlock(&session->reorder_q.lock);
363}
364
365/* Dequeue a single skb.
366 */
367static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
368{
369 struct pppol2tp_tunnel *tunnel = session->tunnel;
370 int length = PPPOL2TP_SKB_CB(skb)->length;
371 struct sock *session_sock = NULL;
372
373 /* We're about to requeue the skb, so unlink it and return resources
374 * to its current owner (a socket receive buffer).
375 */
376 skb_unlink(skb, &session->reorder_q);
377 skb_orphan(skb);
378
379 tunnel->stats.rx_packets++;
380 tunnel->stats.rx_bytes += length;
381 session->stats.rx_packets++;
382 session->stats.rx_bytes += length;
383
384 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
385 /* Bump our Nr */
386 session->nr++;
387 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
388 "%s: updated nr to %hu\n", session->name, session->nr);
389 }
390
391 /* If the socket is bound, send it in to PPP's input queue. Otherwise
392 * queue it on the session socket.
393 */
394 session_sock = session->sock;
395 if (session_sock->sk_state & PPPOX_BOUND) {
396 struct pppox_sock *po;
397 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
398 "%s: recv %d byte data frame, passing to ppp\n",
399 session->name, length);
400
401 /* We need to forget all info related to the L2TP packet
402 * gathered in the skb as we are going to reuse the same
403 * skb for the inner packet.
404 * Namely we need to:
405 * - reset xfrm (IPSec) information as it applies to
406 * the outer L2TP packet and not to the inner one
407 * - release the dst to force a route lookup on the inner
408 * IP packet since skb->dst currently points to the dst
409 * of the UDP tunnel
410 * - reset netfilter information as it doesn't apply
411 * to the inner packet either
412 */
413 secpath_reset(skb);
414 dst_release(skb->dst);
415 skb->dst = NULL;
416 nf_reset(skb);
417
418 po = pppox_sk(session_sock);
419 ppp_input(&po->chan, skb);
420 } else {
421 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
422 "%s: socket not bound\n", session->name);
423
424 /* Not bound. Nothing we can do, so discard. */
425 session->stats.rx_errors++;
426 kfree_skb(skb);
427 }
428
429 sock_put(session->sock);
430}
431
432/* Dequeue skbs from the session's reorder_q, subject to packet order.
433 * Skbs that have been in the queue for too long are simply discarded.
434 */
435static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
436{
437 struct sk_buff *skb;
438 struct sk_buff *tmp;
439
440 /* If the pkt at the head of the queue has the nr that we
441 * expect to send up next, dequeue it and any other
442 * in-sequence packets behind it.
443 */
444 spin_lock(&session->reorder_q.lock);
445 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
446 if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
447 session->stats.rx_seq_discards++;
448 session->stats.rx_errors++;
449 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
450 "%s: oos pkt %hu len %d discarded (too old), "
451 "waiting for %hu, reorder_q_len=%d\n",
452 session->name, PPPOL2TP_SKB_CB(skb)->ns,
453 PPPOL2TP_SKB_CB(skb)->length, session->nr,
454 skb_queue_len(&session->reorder_q));
455 __skb_unlink(skb, &session->reorder_q);
456 kfree_skb(skb);
457 continue;
458 }
459
460 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
461 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
462 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
463 "%s: holding oos pkt %hu len %d, "
464 "waiting for %hu, reorder_q_len=%d\n",
465 session->name, PPPOL2TP_SKB_CB(skb)->ns,
466 PPPOL2TP_SKB_CB(skb)->length, session->nr,
467 skb_queue_len(&session->reorder_q));
468 goto out;
469 }
470 }
471 spin_unlock(&session->reorder_q.lock);
472 pppol2tp_recv_dequeue_skb(session, skb);
473 spin_lock(&session->reorder_q.lock);
474 }
475
476out:
477 spin_unlock(&session->reorder_q.lock);
478}
479
480/* Internal receive frame. Do the real work of receiving an L2TP data frame
481 * here. The skb is not on a list when we get here.
482 * Returns 0 if the packet was a data packet and was successfully passed on.
483 * Returns 1 if the packet was not a good data packet and could not be
484 * forwarded. All such packets are passed up to userspace to deal with.
485 */
486static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
487{
488 struct pppol2tp_session *session = NULL;
489 struct pppol2tp_tunnel *tunnel;
490 unsigned char *ptr;
491 u16 hdrflags;
492 u16 tunnel_id, session_id;
493 int length;
494 struct udphdr *uh;
495
496 tunnel = pppol2tp_sock_to_tunnel(sock);
497 if (tunnel == NULL)
498 goto error;
499
500 /* Short packet? */
501 if (skb->len < sizeof(struct udphdr)) {
502 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
503 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
504 goto error;
505 }
506
507 /* Point to L2TP header */
508 ptr = skb->data + sizeof(struct udphdr);
509
510 /* Get L2TP header flags */
511 hdrflags = ntohs(*(__be16*)ptr);
512
513 /* Trace packet contents, if enabled */
514 if (tunnel->debug & PPPOL2TP_MSG_DATA) {
515 printk(KERN_DEBUG "%s: recv: ", tunnel->name);
516
517 for (length = 0; length < 16; length++)
518 printk(" %02X", ptr[length]);
519 printk("\n");
520 }
521
522 /* Get length of L2TP packet */
523 uh = (struct udphdr *) skb_transport_header(skb);
524 length = ntohs(uh->len) - sizeof(struct udphdr);
525
526 /* Too short? */
527 if (length < 12) {
528 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
529 "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length);
530 goto error;
531 }
532
533 /* If type is control packet, it is handled by userspace. */
534 if (hdrflags & L2TP_HDRFLAG_T) {
535 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
536 "%s: recv control packet, len=%d\n", tunnel->name, length);
537 goto error;
538 }
539
540 /* Skip flags */
541 ptr += 2;
542
543 /* If length is present, skip it */
544 if (hdrflags & L2TP_HDRFLAG_L)
545 ptr += 2;
546
547 /* Extract tunnel and session ID */
548 tunnel_id = ntohs(*(__be16 *) ptr);
549 ptr += 2;
550 session_id = ntohs(*(__be16 *) ptr);
551 ptr += 2;
552
553 /* Find the session context */
554 session = pppol2tp_session_find(tunnel, session_id);
555 if (!session) {
556 /* Not found? Pass to userspace to deal with */
557 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
558 "%s: no socket found (%hu/%hu). Passing up.\n",
559 tunnel->name, tunnel_id, session_id);
560 goto error;
561 }
562 sock_hold(session->sock);
563
564 /* The ref count on the socket was increased by the above call since
565 * we now hold a pointer to the session. Take care to do sock_put()
566 * when exiting this function from now on...
567 */
568
569 /* Handle the optional sequence numbers. If we are the LAC,
570 * enable/disable sequence numbers under the control of the LNS. If
571 * no sequence numbers present but we were expecting them, discard
572 * frame.
573 */
574 if (hdrflags & L2TP_HDRFLAG_S) {
575 u16 ns, nr;
576 ns = ntohs(*(__be16 *) ptr);
577 ptr += 2;
578 nr = ntohs(*(__be16 *) ptr);
579 ptr += 2;
580
581 /* Received a packet with sequence numbers. If we're the LNS,
582 * check if we sre sending sequence numbers and if not,
583 * configure it so.
584 */
585 if ((!session->lns_mode) && (!session->send_seq)) {
586 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
587 "%s: requested to enable seq numbers by LNS\n",
588 session->name);
589 session->send_seq = -1;
590 }
591
592 /* Store L2TP info in the skb */
593 PPPOL2TP_SKB_CB(skb)->ns = ns;
594 PPPOL2TP_SKB_CB(skb)->nr = nr;
595 PPPOL2TP_SKB_CB(skb)->has_seq = 1;
596
597 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
598 "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
599 session->name, ns, nr, session->nr);
600 } else {
601 /* No sequence numbers.
602 * If user has configured mandatory sequence numbers, discard.
603 */
604 if (session->recv_seq) {
605 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
606 "%s: recv data has no seq numbers when required. "
607 "Discarding\n", session->name);
608 session->stats.rx_seq_discards++;
609 session->stats.rx_errors++;
610 goto discard;
611 }
612
613 /* If we're the LAC and we're sending sequence numbers, the
614 * LNS has requested that we no longer send sequence numbers.
615 * If we're the LNS and we're sending sequence numbers, the
616 * LAC is broken. Discard the frame.
617 */
618 if ((!session->lns_mode) && (session->send_seq)) {
619 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
620 "%s: requested to disable seq numbers by LNS\n",
621 session->name);
622 session->send_seq = 0;
623 } else if (session->send_seq) {
624 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
625 "%s: recv data has no seq numbers when required. "
626 "Discarding\n", session->name);
627 session->stats.rx_seq_discards++;
628 session->stats.rx_errors++;
629 goto discard;
630 }
631
632 /* Store L2TP info in the skb */
633 PPPOL2TP_SKB_CB(skb)->has_seq = 0;
634 }
635
636 /* If offset bit set, skip it. */
637 if (hdrflags & L2TP_HDRFLAG_O)
638 ptr += 2 + ntohs(*(__be16 *) ptr);
639
640 skb_pull(skb, ptr - skb->data);
641
642 /* Skip PPP header, if present. In testing, Microsoft L2TP clients
643 * don't send the PPP header (PPP header compression enabled), but
644 * other clients can include the header. So we cope with both cases
645 * here. The PPP header is always FF03 when using L2TP.
646 *
647 * Note that skb->data[] isn't dereferenced from a u16 ptr here since
648 * the field may be unaligned.
649 */
650 if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
651 skb_pull(skb, 2);
652
653 /* Prepare skb for adding to the session's reorder_q. Hold
654 * packets for max reorder_timeout or 1 second if not
655 * reordering.
656 */
657 PPPOL2TP_SKB_CB(skb)->length = length;
658 PPPOL2TP_SKB_CB(skb)->expires = jiffies +
659 (session->reorder_timeout ? session->reorder_timeout : HZ);
660
661 /* Add packet to the session's receive queue. Reordering is done here, if
662 * enabled. Saved L2TP protocol info is stored in skb->sb[].
663 */
664 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
665 if (session->reorder_timeout != 0) {
666 /* Packet reordering enabled. Add skb to session's
667 * reorder queue, in order of ns.
668 */
669 pppol2tp_recv_queue_skb(session, skb);
670 } else {
671 /* Packet reordering disabled. Discard out-of-sequence
672 * packets
673 */
674 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
675 session->stats.rx_seq_discards++;
676 session->stats.rx_errors++;
677 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
678 "%s: oos pkt %hu len %d discarded, "
679 "waiting for %hu, reorder_q_len=%d\n",
680 session->name, PPPOL2TP_SKB_CB(skb)->ns,
681 PPPOL2TP_SKB_CB(skb)->length, session->nr,
682 skb_queue_len(&session->reorder_q));
683 goto discard;
684 }
685 skb_queue_tail(&session->reorder_q, skb);
686 }
687 } else {
688 /* No sequence numbers. Add the skb to the tail of the
689 * reorder queue. This ensures that it will be
690 * delivered after all previous sequenced skbs.
691 */
692 skb_queue_tail(&session->reorder_q, skb);
693 }
694
695 /* Try to dequeue as many skbs from reorder_q as we can. */
696 pppol2tp_recv_dequeue(session);
697
698 return 0;
699
700discard:
701 kfree_skb(skb);
702 sock_put(session->sock);
703
704 return 0;
705
706error:
707 return 1;
708}
709
710/* UDP encapsulation receive handler. See net/ipv4/udp.c.
711 * Return codes:
712 * 0 : success.
713 * <0: error
714 * >0: skb should be passed up to userspace as UDP.
715 */
716static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
717{
718 struct pppol2tp_tunnel *tunnel;
719
720 tunnel = pppol2tp_sock_to_tunnel(sk);
721 if (tunnel == NULL)
722 goto pass_up;
723
724 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
725 "%s: received %d bytes\n", tunnel->name, skb->len);
726
727 if (pppol2tp_recv_core(sk, skb))
728 goto pass_up;
729
730 return 0;
731
732pass_up:
733 return 1;
734}
735
736/* Receive message. This is the recvmsg for the PPPoL2TP socket.
737 */
738static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
739 struct msghdr *msg, size_t len,
740 int flags)
741{
742 int err;
743 struct sk_buff *skb;
744 struct sock *sk = sock->sk;
745
746 err = -EIO;
747 if (sk->sk_state & PPPOX_BOUND)
748 goto end;
749
750 msg->msg_namelen = 0;
751
752 err = 0;
753 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
754 flags & MSG_DONTWAIT, &err);
755 if (skb) {
756 err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data,
757 skb->len);
758 if (err < 0)
759 goto do_skb_free;
760 err = skb->len;
761 }
762do_skb_free:
763 kfree_skb(skb);
764end:
765 return err;
766}
767
768/************************************************************************
769 * Transmit handling
770 ***********************************************************************/
771
772/* Tell how big L2TP headers are for a particular session. This
773 * depends on whether sequence numbers are being used.
774 */
775static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
776{
777 if (session->send_seq)
778 return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
779
780 return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
781}
782
783/* Build an L2TP header for the session into the buffer provided.
784 */
785static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
786 void *buf)
787{
788 __be16 *bufp = buf;
789 u16 flags = L2TP_HDR_VER;
790
791 if (session->send_seq)
792 flags |= L2TP_HDRFLAG_S;
793
794 /* Setup L2TP header.
795 * FIXME: Can this ever be unaligned? Is direct dereferencing of
796 * 16-bit header fields safe here for all architectures?
797 */
798 *bufp++ = htons(flags);
799 *bufp++ = htons(session->tunnel_addr.d_tunnel);
800 *bufp++ = htons(session->tunnel_addr.d_session);
801 if (session->send_seq) {
802 *bufp++ = htons(session->ns);
803 *bufp++ = 0;
804 session->ns++;
805 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
806 "%s: updated ns to %hu\n", session->name, session->ns);
807 }
808}
809
810/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
811 * when a user application does a sendmsg() on the session socket. L2TP and
812 * PPP headers must be inserted into the user's data.
813 */
814static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
815 size_t total_len)
816{
817 static const unsigned char ppph[2] = { 0xff, 0x03 };
818 struct sock *sk = sock->sk;
819 struct inet_sock *inet;
820 __wsum csum = 0;
821 struct sk_buff *skb;
822 int error;
823 int hdr_len;
824 struct pppol2tp_session *session;
825 struct pppol2tp_tunnel *tunnel;
826 struct udphdr *uh;
827
828 error = -ENOTCONN;
829 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
830 goto error;
831
832 /* Get session and tunnel contexts */
833 error = -EBADF;
834 session = pppol2tp_sock_to_session(sk);
835 if (session == NULL)
836 goto error;
837
838 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
839 if (tunnel == NULL)
840 goto error;
841
842 /* What header length is configured for this session? */
843 hdr_len = pppol2tp_l2tp_header_len(session);
844
845 /* Allocate a socket buffer */
846 error = -ENOMEM;
847 skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
848 sizeof(struct udphdr) + hdr_len +
849 sizeof(ppph) + total_len,
850 0, GFP_KERNEL);
851 if (!skb)
852 goto error;
853
854 /* Reserve space for headers. */
855 skb_reserve(skb, NET_SKB_PAD);
856 skb_reset_network_header(skb);
857 skb_reserve(skb, sizeof(struct iphdr));
858 skb_reset_transport_header(skb);
859
860 /* Build UDP header */
861 inet = inet_sk(session->tunnel_sock);
862 uh = (struct udphdr *) skb->data;
863 uh->source = inet->sport;
864 uh->dest = inet->dport;
865 uh->len = htons(hdr_len + sizeof(ppph) + total_len);
866 uh->check = 0;
867 skb_put(skb, sizeof(struct udphdr));
868
869 /* Build L2TP header */
870 pppol2tp_build_l2tp_header(session, skb->data);
871 skb_put(skb, hdr_len);
872
873 /* Add PPP header */
874 skb->data[0] = ppph[0];
875 skb->data[1] = ppph[1];
876 skb_put(skb, 2);
877
878 /* Copy user data into skb */
879 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
880 if (error < 0) {
881 kfree_skb(skb);
882 goto error;
883 }
884 skb_put(skb, total_len);
885
886 /* Calculate UDP checksum if configured to do so */
887 if (session->tunnel_sock->sk_no_check != UDP_CSUM_NOXMIT)
888 csum = udp_csum_outgoing(sk, skb);
889
890 /* Debug */
891 if (session->send_seq)
892 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
893 "%s: send %Zd bytes, ns=%hu\n", session->name,
894 total_len, session->ns - 1);
895 else
896 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
897 "%s: send %Zd bytes\n", session->name, total_len);
898
899 if (session->debug & PPPOL2TP_MSG_DATA) {
900 int i;
901 unsigned char *datap = skb->data;
902
903 printk(KERN_DEBUG "%s: xmit:", session->name);
904 for (i = 0; i < total_len; i++) {
905 printk(" %02X", *datap++);
906 if (i == 15) {
907 printk(" ...");
908 break;
909 }
910 }
911 printk("\n");
912 }
913
914 /* Queue the packet to IP for output */
915 error = ip_queue_xmit(skb, 1);
916
917 /* Update stats */
918 if (error >= 0) {
919 tunnel->stats.tx_packets++;
920 tunnel->stats.tx_bytes += skb->len;
921 session->stats.tx_packets++;
922 session->stats.tx_bytes += skb->len;
923 } else {
924 tunnel->stats.tx_errors++;
925 session->stats.tx_errors++;
926 }
927
928error:
929 return error;
930}
931
932/* Transmit function called by generic PPP driver. Sends PPP frame
933 * over PPPoL2TP socket.
934 *
935 * This is almost the same as pppol2tp_sendmsg(), but rather than
936 * being called with a msghdr from userspace, it is called with a skb
937 * from the kernel.
938 *
939 * The supplied skb from ppp doesn't have enough headroom for the
940 * insertion of L2TP, UDP and IP headers so we need to allocate more
941 * headroom in the skb. This will create a cloned skb. But we must be
942 * careful in the error case because the caller will expect to free
943 * the skb it supplied, not our cloned skb. So we take care to always
944 * leave the original skb unfreed if we return an error.
945 */
946static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
947{
948 static const u8 ppph[2] = { 0xff, 0x03 };
949 struct sock *sk = (struct sock *) chan->private;
950 struct sock *sk_tun;
951 int hdr_len;
952 struct pppol2tp_session *session;
953 struct pppol2tp_tunnel *tunnel;
954 int rc;
955 int headroom;
956 int data_len = skb->len;
957 struct inet_sock *inet;
958 __wsum csum = 0;
959 struct sk_buff *skb2 = NULL;
960 struct udphdr *uh;
961
962 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
963 goto abort;
964
965 /* Get session and tunnel contexts from the socket */
966 session = pppol2tp_sock_to_session(sk);
967 if (session == NULL)
968 goto abort;
969
970 sk_tun = session->tunnel_sock;
971 if (sk_tun == NULL)
972 goto abort;
973 tunnel = pppol2tp_sock_to_tunnel(sk_tun);
974 if (tunnel == NULL)
975 goto abort;
976
977 /* What header length is configured for this session? */
978 hdr_len = pppol2tp_l2tp_header_len(session);
979
980 /* Check that there's enough headroom in the skb to insert IP,
981 * UDP and L2TP and PPP headers. If not enough, expand it to
982 * make room. Note that a new skb (or a clone) is
983 * allocated. If we return an error from this point on, make
984 * sure we free the new skb but do not free the original skb
985 * since that is done by the caller for the error case.
986 */
987 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
988 sizeof(struct udphdr) + hdr_len + sizeof(ppph);
989 if (skb_headroom(skb) < headroom) {
990 skb2 = skb_realloc_headroom(skb, headroom);
991 if (skb2 == NULL)
992 goto abort;
993 } else
994 skb2 = skb;
995
996 /* Check that the socket has room */
997 if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf)
998 skb_set_owner_w(skb2, sk_tun);
999 else
1000 goto discard;
1001
1002 /* Setup PPP header */
1003 skb_push(skb2, sizeof(ppph));
1004 skb2->data[0] = ppph[0];
1005 skb2->data[1] = ppph[1];
1006
1007 /* Setup L2TP header */
1008 skb_push(skb2, hdr_len);
1009 pppol2tp_build_l2tp_header(session, skb2->data);
1010
1011 /* Setup UDP header */
1012 inet = inet_sk(sk_tun);
1013 skb_push(skb2, sizeof(struct udphdr));
1014 skb_reset_transport_header(skb2);
1015 uh = (struct udphdr *) skb2->data;
1016 uh->source = inet->sport;
1017 uh->dest = inet->dport;
1018 uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len);
1019 uh->check = 0;
1020
1021 /* Calculate UDP checksum if configured to do so */
1022 if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT)
1023 csum = udp_csum_outgoing(sk_tun, skb2);
1024
1025 /* Debug */
1026 if (session->send_seq)
1027 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1028 "%s: send %d bytes, ns=%hu\n", session->name,
1029 data_len, session->ns - 1);
1030 else
1031 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1032 "%s: send %d bytes\n", session->name, data_len);
1033
1034 if (session->debug & PPPOL2TP_MSG_DATA) {
1035 int i;
1036 unsigned char *datap = skb2->data;
1037
1038 printk(KERN_DEBUG "%s: xmit:", session->name);
1039 for (i = 0; i < data_len; i++) {
1040 printk(" %02X", *datap++);
1041 if (i == 31) {
1042 printk(" ...");
1043 break;
1044 }
1045 }
1046 printk("\n");
1047 }
1048
1049 /* Get routing info from the tunnel socket */
1050 skb2->dst = sk_dst_get(sk_tun);
1051
1052 /* Queue the packet to IP for output */
1053 rc = ip_queue_xmit(skb2, 1);
1054
1055 /* Update stats */
1056 if (rc >= 0) {
1057 tunnel->stats.tx_packets++;
1058 tunnel->stats.tx_bytes += skb2->len;
1059 session->stats.tx_packets++;
1060 session->stats.tx_bytes += skb2->len;
1061 } else {
1062 tunnel->stats.tx_errors++;
1063 session->stats.tx_errors++;
1064 }
1065
1066 /* Free the original skb */
1067 kfree_skb(skb);
1068
1069 return 1;
1070
1071discard:
1072 /* Free the new skb. Caller will free original skb. */
1073 if (skb2 != skb)
1074 kfree_skb(skb2);
1075abort:
1076 return 0;
1077}
1078
1079/*****************************************************************************
1080 * Session (and tunnel control) socket create/destroy.
1081 *****************************************************************************/
1082
1083/* When the tunnel UDP socket is closed, all the attached sockets need to go
1084 * too.
1085 */
1086static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
1087{
1088 int hash;
1089 struct hlist_node *walk;
1090 struct hlist_node *tmp;
1091 struct pppol2tp_session *session;
1092 struct sock *sk;
1093
1094 if (tunnel == NULL)
1095 BUG();
1096
1097 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1098 "%s: closing all sessions...\n", tunnel->name);
1099
1100 write_lock(&tunnel->hlist_lock);
1101 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
1102again:
1103 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1104 session = hlist_entry(walk, struct pppol2tp_session, hlist);
1105
1106 sk = session->sock;
1107
1108 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1109 "%s: closing session\n", session->name);
1110
1111 hlist_del_init(&session->hlist);
1112
1113 /* Since we should hold the sock lock while
1114 * doing any unbinding, we need to release the
1115 * lock we're holding before taking that lock.
1116 * Hold a reference to the sock so it doesn't
1117 * disappear as we're jumping between locks.
1118 */
1119 sock_hold(sk);
1120 write_unlock(&tunnel->hlist_lock);
1121 lock_sock(sk);
1122
1123 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
1124 pppox_unbind_sock(sk);
1125 sk->sk_state = PPPOX_DEAD;
1126 sk->sk_state_change(sk);
1127 }
1128
1129 /* Purge any queued data */
1130 skb_queue_purge(&sk->sk_receive_queue);
1131 skb_queue_purge(&sk->sk_write_queue);
1132 skb_queue_purge(&session->reorder_q);
1133
1134 release_sock(sk);
1135 sock_put(sk);
1136
1137 /* Now restart from the beginning of this hash
1138 * chain. We always remove a session from the
1139 * list so we are guaranteed to make forward
1140 * progress.
1141 */
1142 write_lock(&tunnel->hlist_lock);
1143 goto again;
1144 }
1145 }
1146 write_unlock(&tunnel->hlist_lock);
1147}
1148
1149/* Really kill the tunnel.
1150 * Come here only when all sessions have been cleared from the tunnel.
1151 */
1152static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
1153{
1154 /* Remove from socket list */
1155 write_lock(&pppol2tp_tunnel_list_lock);
1156 list_del_init(&tunnel->list);
1157 write_unlock(&pppol2tp_tunnel_list_lock);
1158
1159 atomic_dec(&pppol2tp_tunnel_count);
1160 kfree(tunnel);
1161}
1162
1163/* Tunnel UDP socket destruct hook.
1164 * The tunnel context is deleted only when all session sockets have been
1165 * closed.
1166 */
1167static void pppol2tp_tunnel_destruct(struct sock *sk)
1168{
1169 struct pppol2tp_tunnel *tunnel;
1170
1171 tunnel = pppol2tp_sock_to_tunnel(sk);
1172 if (tunnel == NULL)
1173 goto end;
1174
1175 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1176 "%s: closing...\n", tunnel->name);
1177
1178 /* Close all sessions */
1179 pppol2tp_tunnel_closeall(tunnel);
1180
1181 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1182 (udp_sk(sk))->encap_type = 0;
1183 (udp_sk(sk))->encap_rcv = NULL;
1184
1185 /* Remove hooks into tunnel socket */
1186 tunnel->sock = NULL;
1187 sk->sk_destruct = tunnel->old_sk_destruct;
1188 sk->sk_user_data = NULL;
1189
1190 /* Call original (UDP) socket descructor */
1191 if (sk->sk_destruct != NULL)
1192 (*sk->sk_destruct)(sk);
1193
1194 pppol2tp_tunnel_dec_refcount(tunnel);
1195
1196end:
1197 return;
1198}
1199
1200/* Really kill the session socket. (Called from sock_put() if
1201 * refcnt == 0.)
1202 */
1203static void pppol2tp_session_destruct(struct sock *sk)
1204{
1205 struct pppol2tp_session *session = NULL;
1206
1207 if (sk->sk_user_data != NULL) {
1208 struct pppol2tp_tunnel *tunnel;
1209
1210 session = pppol2tp_sock_to_session(sk);
1211 if (session == NULL)
1212 goto out;
1213
1214 /* Don't use pppol2tp_sock_to_tunnel() here to
1215 * get the tunnel context because the tunnel
1216 * socket might have already been closed (its
1217 * sk->sk_user_data will be NULL) so use the
1218 * session's private tunnel ptr instead.
1219 */
1220 tunnel = session->tunnel;
1221 if (tunnel != NULL) {
1222 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1223
1224 /* If session_id is zero, this is a null
1225 * session context, which was created for a
1226 * socket that is being used only to manage
1227 * tunnels.
1228 */
1229 if (session->tunnel_addr.s_session != 0) {
1230 /* Delete the session socket from the
1231 * hash
1232 */
1233 write_lock(&tunnel->hlist_lock);
1234 hlist_del_init(&session->hlist);
1235 write_unlock(&tunnel->hlist_lock);
1236
1237 atomic_dec(&pppol2tp_session_count);
1238 }
1239
1240 /* This will delete the tunnel context if this
1241 * is the last session on the tunnel.
1242 */
1243 session->tunnel = NULL;
1244 session->tunnel_sock = NULL;
1245 pppol2tp_tunnel_dec_refcount(tunnel);
1246 }
1247 }
1248
1249 kfree(session);
1250out:
1251 return;
1252}
1253
1254/* Called when the PPPoX socket (session) is closed.
1255 */
1256static int pppol2tp_release(struct socket *sock)
1257{
1258 struct sock *sk = sock->sk;
1259 int error;
1260
1261 if (!sk)
1262 return 0;
1263
1264 error = -EBADF;
1265 lock_sock(sk);
1266 if (sock_flag(sk, SOCK_DEAD) != 0)
1267 goto error;
1268
1269 pppox_unbind_sock(sk);
1270
1271 /* Signal the death of the socket. */
1272 sk->sk_state = PPPOX_DEAD;
1273 sock_orphan(sk);
1274 sock->sk = NULL;
1275
1276 /* Purge any queued data */
1277 skb_queue_purge(&sk->sk_receive_queue);
1278 skb_queue_purge(&sk->sk_write_queue);
1279
1280 release_sock(sk);
1281
1282 /* This will delete the session context via
1283 * pppol2tp_session_destruct() if the socket's refcnt drops to
1284 * zero.
1285 */
1286 sock_put(sk);
1287
1288 return 0;
1289
1290error:
1291 release_sock(sk);
1292 return error;
1293}
1294
1295/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
1296 * sockets attached to it.
1297 */
1298static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
1299 int *error)
1300{
1301 int err;
1302 struct socket *sock = NULL;
1303 struct sock *sk;
1304 struct pppol2tp_tunnel *tunnel;
1305 struct sock *ret = NULL;
1306
1307 /* Get the tunnel UDP socket from the fd, which was opened by
1308 * the userspace L2TP daemon.
1309 */
1310 err = -EBADF;
1311 sock = sockfd_lookup(fd, &err);
1312 if (!sock) {
1313 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1314 "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
1315 tunnel_id, fd, err);
1316 goto err;
1317 }
1318
1319 /* Quick sanity checks */
1320 err = -ESOCKTNOSUPPORT;
1321 if (sock->type != SOCK_DGRAM) {
1322 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1323 "tunl %hu: fd %d wrong type, got %d, expected %d\n",
1324 tunnel_id, fd, sock->type, SOCK_DGRAM);
1325 goto err;
1326 }
1327 err = -EAFNOSUPPORT;
1328 if (sock->ops->family != AF_INET) {
1329 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1330 "tunl %hu: fd %d wrong family, got %d, expected %d\n",
1331 tunnel_id, fd, sock->ops->family, AF_INET);
1332 goto err;
1333 }
1334
1335 err = -ENOTCONN;
1336 sk = sock->sk;
1337
1338 /* Check if this socket has already been prepped */
1339 tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
1340 if (tunnel != NULL) {
1341 /* User-data field already set */
1342 err = -EBUSY;
1343 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1344
1345 /* This socket has already been prepped */
1346 ret = tunnel->sock;
1347 goto out;
1348 }
1349
1350 /* This socket is available and needs prepping. Create a new tunnel
1351 * context and init it.
1352 */
1353 sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
1354 if (sk->sk_user_data == NULL) {
1355 err = -ENOMEM;
1356 goto err;
1357 }
1358
1359 tunnel->magic = L2TP_TUNNEL_MAGIC;
1360 sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
1361
1362 tunnel->stats.tunnel_id = tunnel_id;
1363 tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
1364
1365 /* Hook on the tunnel socket destructor so that we can cleanup
1366 * if the tunnel socket goes away.
1367 */
1368 tunnel->old_sk_destruct = sk->sk_destruct;
1369 sk->sk_destruct = &pppol2tp_tunnel_destruct;
1370
1371 tunnel->sock = sk;
1372 sk->sk_allocation = GFP_ATOMIC;
1373
1374 /* Misc init */
1375 rwlock_init(&tunnel->hlist_lock);
1376
1377 /* Add tunnel to our list */
1378 INIT_LIST_HEAD(&tunnel->list);
1379 write_lock(&pppol2tp_tunnel_list_lock);
1380 list_add(&tunnel->list, &pppol2tp_tunnel_list);
1381 write_unlock(&pppol2tp_tunnel_list_lock);
1382 atomic_inc(&pppol2tp_tunnel_count);
1383
1384 /* Bump the reference count. The tunnel context is deleted
1385 * only when this drops to zero.
1386 */
1387 pppol2tp_tunnel_inc_refcount(tunnel);
1388
1389 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1390 (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
1391 (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
1392
1393 ret = tunnel->sock;
1394
1395 *error = 0;
1396out:
1397 if (sock)
1398 sockfd_put(sock);
1399
1400 return ret;
1401
1402err:
1403 *error = err;
1404 goto out;
1405}
1406
1407static struct proto pppol2tp_sk_proto = {
1408 .name = "PPPOL2TP",
1409 .owner = THIS_MODULE,
1410 .obj_size = sizeof(struct pppox_sock),
1411};
1412
1413/* socket() handler. Initialize a new struct sock.
1414 */
1415static int pppol2tp_create(struct socket *sock)
1416{
1417 int error = -ENOMEM;
1418 struct sock *sk;
1419
1420 sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1);
1421 if (!sk)
1422 goto out;
1423
1424 sock_init_data(sock, sk);
1425
1426 sock->state = SS_UNCONNECTED;
1427 sock->ops = &pppol2tp_ops;
1428
1429 sk->sk_backlog_rcv = pppol2tp_recv_core;
1430 sk->sk_protocol = PX_PROTO_OL2TP;
1431 sk->sk_family = PF_PPPOX;
1432 sk->sk_state = PPPOX_NONE;
1433 sk->sk_type = SOCK_STREAM;
1434 sk->sk_destruct = pppol2tp_session_destruct;
1435
1436 error = 0;
1437
1438out:
1439 return error;
1440}
1441
1442/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
1443 */
1444static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1445 int sockaddr_len, int flags)
1446{
1447 struct sock *sk = sock->sk;
1448 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
1449 struct pppox_sock *po = pppox_sk(sk);
1450 struct sock *tunnel_sock = NULL;
1451 struct pppol2tp_session *session = NULL;
1452 struct pppol2tp_tunnel *tunnel;
1453 struct dst_entry *dst;
1454 int error = 0;
1455
1456 lock_sock(sk);
1457
1458 error = -EINVAL;
1459 if (sp->sa_protocol != PX_PROTO_OL2TP)
1460 goto end;
1461
1462 /* Check for already bound sockets */
1463 error = -EBUSY;
1464 if (sk->sk_state & PPPOX_CONNECTED)
1465 goto end;
1466
1467 /* We don't supporting rebinding anyway */
1468 error = -EALREADY;
1469 if (sk->sk_user_data)
1470 goto end; /* socket is already attached */
1471
1472 /* Don't bind if s_tunnel is 0 */
1473 error = -EINVAL;
1474 if (sp->pppol2tp.s_tunnel == 0)
1475 goto end;
1476
1477 /* Special case: prepare tunnel socket if s_session and
1478 * d_session is 0. Otherwise look up tunnel using supplied
1479 * tunnel id.
1480 */
1481 if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
1482 tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd,
1483 sp->pppol2tp.s_tunnel,
1484 &error);
1485 if (tunnel_sock == NULL)
1486 goto end;
1487
1488 tunnel = tunnel_sock->sk_user_data;
1489 } else {
1490 tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel);
1491
1492 /* Error if we can't find the tunnel */
1493 error = -ENOENT;
1494 if (tunnel == NULL)
1495 goto end;
1496
1497 tunnel_sock = tunnel->sock;
1498 }
1499
1500 /* Check that this session doesn't already exist */
1501 error = -EEXIST;
1502 session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
1503 if (session != NULL)
1504 goto end;
1505
1506 /* Allocate and initialize a new session context. */
1507 session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
1508 if (session == NULL) {
1509 error = -ENOMEM;
1510 goto end;
1511 }
1512
1513 skb_queue_head_init(&session->reorder_q);
1514
1515 session->magic = L2TP_SESSION_MAGIC;
1516 session->owner = current->pid;
1517 session->sock = sk;
1518 session->tunnel = tunnel;
1519 session->tunnel_sock = tunnel_sock;
1520 session->tunnel_addr = sp->pppol2tp;
1521 sprintf(&session->name[0], "sess %hu/%hu",
1522 session->tunnel_addr.s_tunnel,
1523 session->tunnel_addr.s_session);
1524
1525 session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
1526 session->stats.session_id = session->tunnel_addr.s_session;
1527
1528 INIT_HLIST_NODE(&session->hlist);
1529
1530 /* Inherit debug options from tunnel */
1531 session->debug = tunnel->debug;
1532
1533 /* Default MTU must allow space for UDP/L2TP/PPP
1534 * headers.
1535 */
1536 session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
1537
1538 /* If PMTU discovery was enabled, use the MTU that was discovered */
1539 dst = sk_dst_get(sk);
1540 if (dst != NULL) {
1541 u32 pmtu = dst_mtu(__sk_dst_get(sk));
1542 if (pmtu != 0)
1543 session->mtu = session->mru = pmtu -
1544 PPPOL2TP_HEADER_OVERHEAD;
1545 dst_release(dst);
1546 }
1547
1548 /* Special case: if source & dest session_id == 0x0000, this socket is
1549 * being created to manage the tunnel. Don't add the session to the
1550 * session hash list, just set up the internal context for use by
1551 * ioctl() and sockopt() handlers.
1552 */
1553 if ((session->tunnel_addr.s_session == 0) &&
1554 (session->tunnel_addr.d_session == 0)) {
1555 error = 0;
1556 sk->sk_user_data = session;
1557 goto out_no_ppp;
1558 }
1559
1560 /* Get tunnel context from the tunnel socket */
1561 tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
1562 if (tunnel == NULL) {
1563 error = -EBADF;
1564 goto end;
1565 }
1566
1567 /* Right now, because we don't have a way to push the incoming skb's
1568 * straight through the UDP layer, the only header we need to worry
1569 * about is the L2TP header. This size is different depending on
1570 * whether sequence numbers are enabled for the data channel.
1571 */
1572 po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1573
1574 po->chan.private = sk;
1575 po->chan.ops = &pppol2tp_chan_ops;
1576 po->chan.mtu = session->mtu;
1577
1578 error = ppp_register_channel(&po->chan);
1579 if (error)
1580 goto end;
1581
1582 /* This is how we get the session context from the socket. */
1583 sk->sk_user_data = session;
1584
1585 /* Add session to the tunnel's hash list */
1586 write_lock(&tunnel->hlist_lock);
1587 hlist_add_head(&session->hlist,
1588 pppol2tp_session_id_hash(tunnel,
1589 session->tunnel_addr.s_session));
1590 write_unlock(&tunnel->hlist_lock);
1591
1592 atomic_inc(&pppol2tp_session_count);
1593
1594out_no_ppp:
1595 pppol2tp_tunnel_inc_refcount(tunnel);
1596 sk->sk_state = PPPOX_CONNECTED;
1597 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1598 "%s: created\n", session->name);
1599
1600end:
1601 release_sock(sk);
1602
1603 if (error != 0)
1604 PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
1605 "%s: connect failed: %d\n", session->name, error);
1606
1607 return error;
1608}
1609
1610/* getname() support.
1611 */
1612static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
1613 int *usockaddr_len, int peer)
1614{
1615 int len = sizeof(struct sockaddr_pppol2tp);
1616 struct sockaddr_pppol2tp sp;
1617 int error = 0;
1618 struct pppol2tp_session *session;
1619
1620 error = -ENOTCONN;
1621 if (sock->sk->sk_state != PPPOX_CONNECTED)
1622 goto end;
1623
1624 session = pppol2tp_sock_to_session(sock->sk);
1625 if (session == NULL) {
1626 error = -EBADF;
1627 goto end;
1628 }
1629
1630 sp.sa_family = AF_PPPOX;
1631 sp.sa_protocol = PX_PROTO_OL2TP;
1632 memcpy(&sp.pppol2tp, &session->tunnel_addr,
1633 sizeof(struct pppol2tp_addr));
1634
1635 memcpy(uaddr, &sp, len);
1636
1637 *usockaddr_len = len;
1638
1639 error = 0;
1640
1641end:
1642 return error;
1643}
1644
1645/****************************************************************************
1646 * ioctl() handlers.
1647 *
1648 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
1649 * sockets. However, in order to control kernel tunnel features, we allow
1650 * userspace to create a special "tunnel" PPPoX socket which is used for
1651 * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
1652 * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
1653 * calls.
1654 ****************************************************************************/
1655
1656/* Session ioctl helper.
1657 */
1658static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
1659 unsigned int cmd, unsigned long arg)
1660{
1661 struct ifreq ifr;
1662 int err = 0;
1663 struct sock *sk = session->sock;
1664 int val = (int) arg;
1665
1666 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1667 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
1668 session->name, cmd, arg);
1669
1670 sock_hold(sk);
1671
1672 switch (cmd) {
1673 case SIOCGIFMTU:
1674 err = -ENXIO;
1675 if (!(sk->sk_state & PPPOX_CONNECTED))
1676 break;
1677
1678 err = -EFAULT;
1679 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1680 break;
1681 ifr.ifr_mtu = session->mtu;
1682 if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
1683 break;
1684
1685 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1686 "%s: get mtu=%d\n", session->name, session->mtu);
1687 err = 0;
1688 break;
1689
1690 case SIOCSIFMTU:
1691 err = -ENXIO;
1692 if (!(sk->sk_state & PPPOX_CONNECTED))
1693 break;
1694
1695 err = -EFAULT;
1696 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1697 break;
1698
1699 session->mtu = ifr.ifr_mtu;
1700
1701 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1702 "%s: set mtu=%d\n", session->name, session->mtu);
1703 err = 0;
1704 break;
1705
1706 case PPPIOCGMRU:
1707 err = -ENXIO;
1708 if (!(sk->sk_state & PPPOX_CONNECTED))
1709 break;
1710
1711 err = -EFAULT;
1712 if (put_user(session->mru, (int __user *) arg))
1713 break;
1714
1715 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1716 "%s: get mru=%d\n", session->name, session->mru);
1717 err = 0;
1718 break;
1719
1720 case PPPIOCSMRU:
1721 err = -ENXIO;
1722 if (!(sk->sk_state & PPPOX_CONNECTED))
1723 break;
1724
1725 err = -EFAULT;
1726 if (get_user(val,(int __user *) arg))
1727 break;
1728
1729 session->mru = val;
1730 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1731 "%s: set mru=%d\n", session->name, session->mru);
1732 err = 0;
1733 break;
1734
1735 case PPPIOCGFLAGS:
1736 err = -EFAULT;
1737 if (put_user(session->flags, (int __user *) arg))
1738 break;
1739
1740 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1741 "%s: get flags=%d\n", session->name, session->flags);
1742 err = 0;
1743 break;
1744
1745 case PPPIOCSFLAGS:
1746 err = -EFAULT;
1747 if (get_user(val, (int __user *) arg))
1748 break;
1749 session->flags = val;
1750 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1751 "%s: set flags=%d\n", session->name, session->flags);
1752 err = 0;
1753 break;
1754
1755 case PPPIOCGL2TPSTATS:
1756 err = -ENXIO;
1757 if (!(sk->sk_state & PPPOX_CONNECTED))
1758 break;
1759
1760 if (copy_to_user((void __user *) arg, &session->stats,
1761 sizeof(session->stats)))
1762 break;
1763 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1764 "%s: get L2TP stats\n", session->name);
1765 err = 0;
1766 break;
1767
1768 default:
1769 err = -ENOSYS;
1770 break;
1771 }
1772
1773 sock_put(sk);
1774
1775 return err;
1776}
1777
1778/* Tunnel ioctl helper.
1779 *
1780 * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
1781 * specifies a session_id, the session ioctl handler is called. This allows an
1782 * application to retrieve session stats via a tunnel socket.
1783 */
1784static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
1785 unsigned int cmd, unsigned long arg)
1786{
1787 int err = 0;
1788 struct sock *sk = tunnel->sock;
1789 struct pppol2tp_ioc_stats stats_req;
1790
1791 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1792 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
1793 cmd, arg);
1794
1795 sock_hold(sk);
1796
1797 switch (cmd) {
1798 case PPPIOCGL2TPSTATS:
1799 err = -ENXIO;
1800 if (!(sk->sk_state & PPPOX_CONNECTED))
1801 break;
1802
1803 if (copy_from_user(&stats_req, (void __user *) arg,
1804 sizeof(stats_req))) {
1805 err = -EFAULT;
1806 break;
1807 }
1808 if (stats_req.session_id != 0) {
1809 /* resend to session ioctl handler */
1810 struct pppol2tp_session *session =
1811 pppol2tp_session_find(tunnel, stats_req.session_id);
1812 if (session != NULL)
1813 err = pppol2tp_session_ioctl(session, cmd, arg);
1814 else
1815 err = -EBADR;
1816 break;
1817 }
1818#ifdef CONFIG_XFRM
1819 tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
1820#endif
1821 if (copy_to_user((void __user *) arg, &tunnel->stats,
1822 sizeof(tunnel->stats))) {
1823 err = -EFAULT;
1824 break;
1825 }
1826 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1827 "%s: get L2TP stats\n", tunnel->name);
1828 err = 0;
1829 break;
1830
1831 default:
1832 err = -ENOSYS;
1833 break;
1834 }
1835
1836 sock_put(sk);
1837
1838 return err;
1839}
1840
1841/* Main ioctl() handler.
1842 * Dispatch to tunnel or session helpers depending on the socket.
1843 */
1844static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
1845 unsigned long arg)
1846{
1847 struct sock *sk = sock->sk;
1848 struct pppol2tp_session *session;
1849 struct pppol2tp_tunnel *tunnel;
1850 int err;
1851
1852 if (!sk)
1853 return 0;
1854
1855 err = -EBADF;
1856 if (sock_flag(sk, SOCK_DEAD) != 0)
1857 goto end;
1858
1859 err = -ENOTCONN;
1860 if ((sk->sk_user_data == NULL) ||
1861 (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
1862 goto end;
1863
1864 /* Get session context from the socket */
1865 err = -EBADF;
1866 session = pppol2tp_sock_to_session(sk);
1867 if (session == NULL)
1868 goto end;
1869
1870 /* Special case: if session's session_id is zero, treat ioctl as a
1871 * tunnel ioctl
1872 */
1873 if ((session->tunnel_addr.s_session == 0) &&
1874 (session->tunnel_addr.d_session == 0)) {
1875 err = -EBADF;
1876 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
1877 if (tunnel == NULL)
1878 goto end;
1879
1880 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
1881 goto end;
1882 }
1883
1884 err = pppol2tp_session_ioctl(session, cmd, arg);
1885
1886end:
1887 return err;
1888}
1889
1890/*****************************************************************************
1891 * setsockopt() / getsockopt() support.
1892 *
1893 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
1894 * sockets. In order to control kernel tunnel features, we allow userspace to
1895 * create a special "tunnel" PPPoX socket which is used for control only.
1896 * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
1897 * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
1898 *****************************************************************************/
1899
1900/* Tunnel setsockopt() helper.
1901 */
1902static int pppol2tp_tunnel_setsockopt(struct sock *sk,
1903 struct pppol2tp_tunnel *tunnel,
1904 int optname, int val)
1905{
1906 int err = 0;
1907
1908 switch (optname) {
1909 case PPPOL2TP_SO_DEBUG:
1910 tunnel->debug = val;
1911 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1912 "%s: set debug=%x\n", tunnel->name, tunnel->debug);
1913 break;
1914
1915 default:
1916 err = -ENOPROTOOPT;
1917 break;
1918 }
1919
1920 return err;
1921}
1922
1923/* Session setsockopt helper.
1924 */
1925static int pppol2tp_session_setsockopt(struct sock *sk,
1926 struct pppol2tp_session *session,
1927 int optname, int val)
1928{
1929 int err = 0;
1930
1931 switch (optname) {
1932 case PPPOL2TP_SO_RECVSEQ:
1933 if ((val != 0) && (val != 1)) {
1934 err = -EINVAL;
1935 break;
1936 }
1937 session->recv_seq = val ? -1 : 0;
1938 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1939 "%s: set recv_seq=%d\n", session->name,
1940 session->recv_seq);
1941 break;
1942
1943 case PPPOL2TP_SO_SENDSEQ:
1944 if ((val != 0) && (val != 1)) {
1945 err = -EINVAL;
1946 break;
1947 }
1948 session->send_seq = val ? -1 : 0;
1949 {
1950 struct sock *ssk = session->sock;
1951 struct pppox_sock *po = pppox_sk(ssk);
1952 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
1953 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1954 }
1955 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1956 "%s: set send_seq=%d\n", session->name, session->send_seq);
1957 break;
1958
1959 case PPPOL2TP_SO_LNSMODE:
1960 if ((val != 0) && (val != 1)) {
1961 err = -EINVAL;
1962 break;
1963 }
1964 session->lns_mode = val ? -1 : 0;
1965 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1966 "%s: set lns_mode=%d\n", session->name,
1967 session->lns_mode);
1968 break;
1969
1970 case PPPOL2TP_SO_DEBUG:
1971 session->debug = val;
1972 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1973 "%s: set debug=%x\n", session->name, session->debug);
1974 break;
1975
1976 case PPPOL2TP_SO_REORDERTO:
1977 session->reorder_timeout = msecs_to_jiffies(val);
1978 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1979 "%s: set reorder_timeout=%d\n", session->name,
1980 session->reorder_timeout);
1981 break;
1982
1983 default:
1984 err = -ENOPROTOOPT;
1985 break;
1986 }
1987
1988 return err;
1989}
1990
1991/* Main setsockopt() entry point.
1992 * Does API checks, then calls either the tunnel or session setsockopt
1993 * handler, according to whether the PPPoL2TP socket is a for a regular
1994 * session or the special tunnel type.
1995 */
1996static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1997 char __user *optval, int optlen)
1998{
1999 struct sock *sk = sock->sk;
2000 struct pppol2tp_session *session = sk->sk_user_data;
2001 struct pppol2tp_tunnel *tunnel;
2002 int val;
2003 int err;
2004
2005 if (level != SOL_PPPOL2TP)
2006 return udp_prot.setsockopt(sk, level, optname, optval, optlen);
2007
2008 if (optlen < sizeof(int))
2009 return -EINVAL;
2010
2011 if (get_user(val, (int __user *)optval))
2012 return -EFAULT;
2013
2014 err = -ENOTCONN;
2015 if (sk->sk_user_data == NULL)
2016 goto end;
2017
2018 /* Get session context from the socket */
2019 err = -EBADF;
2020 session = pppol2tp_sock_to_session(sk);
2021 if (session == NULL)
2022 goto end;
2023
2024 /* Special case: if session_id == 0x0000, treat as operation on tunnel
2025 */
2026 if ((session->tunnel_addr.s_session == 0) &&
2027 (session->tunnel_addr.d_session == 0)) {
2028 err = -EBADF;
2029 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2030 if (tunnel == NULL)
2031 goto end;
2032
2033 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
2034 } else
2035 err = pppol2tp_session_setsockopt(sk, session, optname, val);
2036
2037 err = 0;
2038
2039end:
2040 return err;
2041}
2042
2043/* Tunnel getsockopt helper. Called with sock locked.
2044 */
2045static int pppol2tp_tunnel_getsockopt(struct sock *sk,
2046 struct pppol2tp_tunnel *tunnel,
2047 int optname, int __user *val)
2048{
2049 int err = 0;
2050
2051 switch (optname) {
2052 case PPPOL2TP_SO_DEBUG:
2053 *val = tunnel->debug;
2054 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2055 "%s: get debug=%x\n", tunnel->name, tunnel->debug);
2056 break;
2057
2058 default:
2059 err = -ENOPROTOOPT;
2060 break;
2061 }
2062
2063 return err;
2064}
2065
2066/* Session getsockopt helper. Called with sock locked.
2067 */
2068static int pppol2tp_session_getsockopt(struct sock *sk,
2069 struct pppol2tp_session *session,
2070 int optname, int __user *val)
2071{
2072 int err = 0;
2073
2074 switch (optname) {
2075 case PPPOL2TP_SO_RECVSEQ:
2076 *val = session->recv_seq;
2077 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2078 "%s: get recv_seq=%d\n", session->name, *val);
2079 break;
2080
2081 case PPPOL2TP_SO_SENDSEQ:
2082 *val = session->send_seq;
2083 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2084 "%s: get send_seq=%d\n", session->name, *val);
2085 break;
2086
2087 case PPPOL2TP_SO_LNSMODE:
2088 *val = session->lns_mode;
2089 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2090 "%s: get lns_mode=%d\n", session->name, *val);
2091 break;
2092
2093 case PPPOL2TP_SO_DEBUG:
2094 *val = session->debug;
2095 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2096 "%s: get debug=%d\n", session->name, *val);
2097 break;
2098
2099 case PPPOL2TP_SO_REORDERTO:
2100 *val = (int) jiffies_to_msecs(session->reorder_timeout);
2101 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2102 "%s: get reorder_timeout=%d\n", session->name, *val);
2103 break;
2104
2105 default:
2106 err = -ENOPROTOOPT;
2107 }
2108
2109 return err;
2110}
2111
2112/* Main getsockopt() entry point.
2113 * Does API checks, then calls either the tunnel or session getsockopt
2114 * handler, according to whether the PPPoX socket is a for a regular session
2115 * or the special tunnel type.
2116 */
2117static int pppol2tp_getsockopt(struct socket *sock, int level,
2118 int optname, char __user *optval, int __user *optlen)
2119{
2120 struct sock *sk = sock->sk;
2121 struct pppol2tp_session *session = sk->sk_user_data;
2122 struct pppol2tp_tunnel *tunnel;
2123 int val, len;
2124 int err;
2125
2126 if (level != SOL_PPPOL2TP)
2127 return udp_prot.getsockopt(sk, level, optname, optval, optlen);
2128
2129 if (get_user(len, (int __user *) optlen))
2130 return -EFAULT;
2131
2132 len = min_t(unsigned int, len, sizeof(int));
2133
2134 if (len < 0)
2135 return -EINVAL;
2136
2137 err = -ENOTCONN;
2138 if (sk->sk_user_data == NULL)
2139 goto end;
2140
2141 /* Get the session context */
2142 err = -EBADF;
2143 session = pppol2tp_sock_to_session(sk);
2144 if (session == NULL)
2145 goto end;
2146
2147 /* Special case: if session_id == 0x0000, treat as operation on tunnel */
2148 if ((session->tunnel_addr.s_session == 0) &&
2149 (session->tunnel_addr.d_session == 0)) {
2150 err = -EBADF;
2151 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2152 if (tunnel == NULL)
2153 goto end;
2154
2155 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
2156 } else
2157 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
2158
2159 err = -EFAULT;
2160 if (put_user(len, (int __user *) optlen))
2161 goto end;
2162
2163 if (copy_to_user((void __user *) optval, &val, len))
2164 goto end;
2165
2166 err = 0;
2167end:
2168 return err;
2169}
2170
2171/*****************************************************************************
2172 * /proc filesystem for debug
2173 *****************************************************************************/
2174
2175#ifdef CONFIG_PROC_FS
2176
2177#include <linux/seq_file.h>
2178
2179struct pppol2tp_seq_data {
2180 struct pppol2tp_tunnel *tunnel; /* current tunnel */
2181 struct pppol2tp_session *session; /* NULL means get first session in tunnel */
2182};
2183
2184static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
2185{
2186 struct pppol2tp_session *session = NULL;
2187 struct hlist_node *walk;
2188 int found = 0;
2189 int next = 0;
2190 int i;
2191
2192 read_lock(&tunnel->hlist_lock);
2193 for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
2194 hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
2195 if (curr == NULL) {
2196 found = 1;
2197 goto out;
2198 }
2199 if (session == curr) {
2200 next = 1;
2201 continue;
2202 }
2203 if (next) {
2204 found = 1;
2205 goto out;
2206 }
2207 }
2208 }
2209out:
2210 read_unlock(&tunnel->hlist_lock);
2211 if (!found)
2212 session = NULL;
2213
2214 return session;
2215}
2216
2217static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr)
2218{
2219 struct pppol2tp_tunnel *tunnel = NULL;
2220
2221 read_lock(&pppol2tp_tunnel_list_lock);
2222 if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
2223 goto out;
2224 }
2225 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
2226out:
2227 read_unlock(&pppol2tp_tunnel_list_lock);
2228
2229 return tunnel;
2230}
2231
2232static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
2233{
2234 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
2235 loff_t pos = *offs;
2236
2237 if (!pos)
2238 goto out;
2239
2240 BUG_ON(m->private == NULL);
2241 pd = m->private;
2242
2243 if (pd->tunnel == NULL) {
2244 if (!list_empty(&pppol2tp_tunnel_list))
2245 pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
2246 } else {
2247 pd->session = next_session(pd->tunnel, pd->session);
2248 if (pd->session == NULL) {
2249 pd->tunnel = next_tunnel(pd->tunnel);
2250 }
2251 }
2252
2253 /* NULL tunnel and session indicates end of list */
2254 if ((pd->tunnel == NULL) && (pd->session == NULL))
2255 pd = NULL;
2256
2257out:
2258 return pd;
2259}
2260
2261static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
2262{
2263 (*pos)++;
2264 return NULL;
2265}
2266
2267static void pppol2tp_seq_stop(struct seq_file *p, void *v)
2268{
2269 /* nothing to do */
2270}
2271
2272static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
2273{
2274 struct pppol2tp_tunnel *tunnel = v;
2275
2276 seq_printf(m, "\nTUNNEL '%s', %c %d\n",
2277 tunnel->name,
2278 (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
2279 atomic_read(&tunnel->ref_count) - 1);
2280 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
2281 tunnel->debug,
2282 tunnel->stats.tx_packets, tunnel->stats.tx_bytes,
2283 tunnel->stats.tx_errors,
2284 tunnel->stats.rx_packets, tunnel->stats.rx_bytes,
2285 tunnel->stats.rx_errors);
2286}
2287
2288static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
2289{
2290 struct pppol2tp_session *session = v;
2291
2292 seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
2293 "%04X/%04X %d %c\n",
2294 session->name,
2295 ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
2296 ntohs(session->tunnel_addr.addr.sin_port),
2297 session->tunnel_addr.s_tunnel,
2298 session->tunnel_addr.s_session,
2299 session->tunnel_addr.d_tunnel,
2300 session->tunnel_addr.d_session,
2301 session->sock->sk_state,
2302 (session == session->sock->sk_user_data) ?
2303 'Y' : 'N');
2304 seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
2305 session->mtu, session->mru,
2306 session->recv_seq ? 'R' : '-',
2307 session->send_seq ? 'S' : '-',
2308 session->lns_mode ? "LNS" : "LAC",
2309 session->debug,
2310 jiffies_to_msecs(session->reorder_timeout));
2311 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
2312 session->nr, session->ns,
2313 session->stats.tx_packets,
2314 session->stats.tx_bytes,
2315 session->stats.tx_errors,
2316 session->stats.rx_packets,
2317 session->stats.rx_bytes,
2318 session->stats.rx_errors);
2319}
2320
2321static int pppol2tp_seq_show(struct seq_file *m, void *v)
2322{
2323 struct pppol2tp_seq_data *pd = v;
2324
2325 /* display header on line 1 */
2326 if (v == SEQ_START_TOKEN) {
2327 seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
2328 seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
2329 seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
2330 seq_puts(m, " SESSION name, addr/port src-tid/sid "
2331 "dest-tid/sid state user-data-ok\n");
2332 seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
2333 seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
2334 goto out;
2335 }
2336
2337 /* Show the tunnel or session context.
2338 */
2339 if (pd->session == NULL)
2340 pppol2tp_seq_tunnel_show(m, pd->tunnel);
2341 else
2342 pppol2tp_seq_session_show(m, pd->session);
2343
2344out:
2345 return 0;
2346}
2347
2348static struct seq_operations pppol2tp_seq_ops = {
2349 .start = pppol2tp_seq_start,
2350 .next = pppol2tp_seq_next,
2351 .stop = pppol2tp_seq_stop,
2352 .show = pppol2tp_seq_show,
2353};
2354
2355/* Called when our /proc file is opened. We allocate data for use when
2356 * iterating our tunnel / session contexts and store it in the private
2357 * data of the seq_file.
2358 */
2359static int pppol2tp_proc_open(struct inode *inode, struct file *file)
2360{
2361 struct seq_file *m;
2362 struct pppol2tp_seq_data *pd;
2363 int ret = 0;
2364
2365 ret = seq_open(file, &pppol2tp_seq_ops);
2366 if (ret < 0)
2367 goto out;
2368
2369 m = file->private_data;
2370
2371 /* Allocate and fill our proc_data for access later */
2372 ret = -ENOMEM;
2373 m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL);
2374 if (m->private == NULL)
2375 goto out;
2376
2377 pd = m->private;
2378 ret = 0;
2379
2380out:
2381 return ret;
2382}
2383
2384/* Called when /proc file access completes.
2385 */
2386static int pppol2tp_proc_release(struct inode *inode, struct file *file)
2387{
2388 struct seq_file *m = (struct seq_file *)file->private_data;
2389
2390 kfree(m->private);
2391 m->private = NULL;
2392
2393 return seq_release(inode, file);
2394}
2395
2396static struct file_operations pppol2tp_proc_fops = {
2397 .owner = THIS_MODULE,
2398 .open = pppol2tp_proc_open,
2399 .read = seq_read,
2400 .llseek = seq_lseek,
2401 .release = pppol2tp_proc_release,
2402};
2403
2404static struct proc_dir_entry *pppol2tp_proc;
2405
2406#endif /* CONFIG_PROC_FS */
2407
2408/*****************************************************************************
2409 * Init and cleanup
2410 *****************************************************************************/
2411
2412static struct proto_ops pppol2tp_ops = {
2413 .family = AF_PPPOX,
2414 .owner = THIS_MODULE,
2415 .release = pppol2tp_release,
2416 .bind = sock_no_bind,
2417 .connect = pppol2tp_connect,
2418 .socketpair = sock_no_socketpair,
2419 .accept = sock_no_accept,
2420 .getname = pppol2tp_getname,
2421 .poll = datagram_poll,
2422 .listen = sock_no_listen,
2423 .shutdown = sock_no_shutdown,
2424 .setsockopt = pppol2tp_setsockopt,
2425 .getsockopt = pppol2tp_getsockopt,
2426 .sendmsg = pppol2tp_sendmsg,
2427 .recvmsg = pppol2tp_recvmsg,
2428 .mmap = sock_no_mmap,
2429 .ioctl = pppox_ioctl,
2430};
2431
2432static struct pppox_proto pppol2tp_proto = {
2433 .create = pppol2tp_create,
2434 .ioctl = pppol2tp_ioctl
2435};
2436
2437static int __init pppol2tp_init(void)
2438{
2439 int err;
2440
2441 err = proto_register(&pppol2tp_sk_proto, 0);
2442 if (err)
2443 goto out;
2444 err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
2445 if (err)
2446 goto out_unregister_pppol2tp_proto;
2447
2448#ifdef CONFIG_PROC_FS
2449 pppol2tp_proc = create_proc_entry("pppol2tp", 0, proc_net);
2450 if (!pppol2tp_proc) {
2451 err = -ENOMEM;
2452 goto out_unregister_pppox_proto;
2453 }
2454 pppol2tp_proc->proc_fops = &pppol2tp_proc_fops;
2455#endif /* CONFIG_PROC_FS */
2456 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
2457 PPPOL2TP_DRV_VERSION);
2458
2459out:
2460 return err;
2461
2462out_unregister_pppox_proto:
2463 unregister_pppox_proto(PX_PROTO_OL2TP);
2464out_unregister_pppol2tp_proto:
2465 proto_unregister(&pppol2tp_sk_proto);
2466 goto out;
2467}
2468
2469static void __exit pppol2tp_exit(void)
2470{
2471 unregister_pppox_proto(PX_PROTO_OL2TP);
2472
2473#ifdef CONFIG_PROC_FS
2474 remove_proc_entry("pppol2tp", proc_net);
2475#endif
2476 proto_unregister(&pppol2tp_sk_proto);
2477}
2478
2479module_init(pppol2tp_init);
2480module_exit(pppol2tp_exit);
2481
2482MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>,"
2483 "James Chapman <jchapman@katalix.com>");
2484MODULE_DESCRIPTION("PPP over L2TP over UDP");
2485MODULE_LICENSE("GPL");
2486MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index ad94358ece89..451486b32f23 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -690,9 +690,9 @@ static int lan_saa9730_rx(struct net_device *dev)
690 lp->stats.rx_packets++; 690 lp->stats.rx_packets++;
691 skb_reserve(skb, 2); /* 16 byte align */ 691 skb_reserve(skb, 2); /* 16 byte align */
692 skb_put(skb, len); /* make room */ 692 skb_put(skb, len); /* make room */
693 eth_copy_and_sum(skb, 693 skb_copy_to_linear_data(skb,
694 (unsigned char *) pData, 694 (unsigned char *) pData,
695 len, 0); 695 len);
696 skb->protocol = eth_type_trans(skb, dev); 696 skb->protocol = eth_type_trans(skb, dev);
697 netif_rx(skb); 697 netif_rx(skb);
698 dev->last_rx = jiffies; 698 dev->last_rx = jiffies;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 2106becf6990..384b4685e977 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -320,7 +320,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
320 skb_put(skb, len); 320 skb_put(skb, len);
321 321
322 /* Copy out of kseg1 to avoid silly cache flush. */ 322 /* Copy out of kseg1 to avoid silly cache flush. */
323 eth_copy_and_sum(skb, pkt_pointer + 2, len, 0); 323 skb_copy_to_linear_data(skb, pkt_pointer + 2, len);
324 skb->protocol = eth_type_trans(skb, dev); 324 skb->protocol = eth_type_trans(skb, dev);
325 325
326 /* We don't want to receive our own packets */ 326 /* We don't want to receive our own packets */
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index bc8de48da313..ec2ad9f0efa2 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -548,7 +548,7 @@ static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
548 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 548 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
549 if (skb) { 549 if (skb) {
550 skb_reserve(skb, NET_IP_ALIGN); 550 skb_reserve(skb, NET_IP_ALIGN);
551 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); 551 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
552 *sk_buff = skb; 552 *sk_buff = skb;
553 sis190_give_to_asic(desc, rx_buf_sz); 553 sis190_give_to_asic(desc, rx_buf_sz);
554 ret = 0; 554 ret = 0;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 786d4b9c07ec..f2e101967204 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1456,7 +1456,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1456 pci_dma_sync_single_for_cpu(np->pci_dev, 1456 pci_dma_sync_single_for_cpu(np->pci_dev,
1457 np->rx_info[entry].mapping, 1457 np->rx_info[entry].mapping,
1458 pkt_len, PCI_DMA_FROMDEVICE); 1458 pkt_len, PCI_DMA_FROMDEVICE);
1459 eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0); 1459 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1460 pci_dma_sync_single_for_device(np->pci_dev, 1460 pci_dma_sync_single_for_device(np->pci_dev,
1461 np->rx_info[entry].mapping, 1461 np->rx_info[entry].mapping,
1462 pkt_len, PCI_DMA_FROMDEVICE); 1462 pkt_len, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index a123ea87893b..b77ab6e8fd35 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -777,7 +777,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
777 { 777 {
778 skb_reserve(skb,2); 778 skb_reserve(skb,2);
779 skb_put(skb,totlen); 779 skb_put(skb,totlen);
780 eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); 780 skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
781 skb->protocol=eth_type_trans(skb,dev); 781 skb->protocol=eth_type_trans(skb,dev);
782 netif_rx(skb); 782 netif_rx(skb);
783 p->stats.rx_packets++; 783 p->stats.rx_packets++;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 791e081fdc15..f1548c033327 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -853,10 +853,9 @@ static int lance_rx( struct net_device *dev )
853 853
854 skb_reserve( skb, 2 ); /* 16 byte align */ 854 skb_reserve( skb, 2 ); /* 16 byte align */
855 skb_put( skb, pkt_len ); /* Make room */ 855 skb_put( skb, pkt_len ); /* Make room */
856// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len); 856 skb_copy_to_linear_data(skb,
857 eth_copy_and_sum(skb,
858 PKTBUF_ADDR(head), 857 PKTBUF_ADDR(head),
859 pkt_len, 0); 858 pkt_len);
860 859
861 skb->protocol = eth_type_trans( skb, dev ); 860 skb->protocol = eth_type_trans( skb, dev );
862 netif_rx( skb ); 861 netif_rx( skb );
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 2ad8d58dee3b..b3e0158def4f 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -860,7 +860,7 @@ static void bigmac_rx(struct bigmac *bp)
860 sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, 860 sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
861 this->rx_addr, len, 861 this->rx_addr, len,
862 SBUS_DMA_FROMDEVICE); 862 SBUS_DMA_FROMDEVICE);
863 eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0); 863 skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
864 sbus_dma_sync_single_for_device(bp->bigmac_sdev, 864 sbus_dma_sync_single_for_device(bp->bigmac_sdev,
865 this->rx_addr, len, 865 this->rx_addr, len,
866 SBUS_DMA_FROMDEVICE); 866 SBUS_DMA_FROMDEVICE);
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index e1f912d04043..c8ba534c17bf 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1313,7 +1313,7 @@ static void rx_poll(unsigned long data)
1313 np->rx_buf_sz, 1313 np->rx_buf_sz,
1314 PCI_DMA_FROMDEVICE); 1314 PCI_DMA_FROMDEVICE);
1315 1315
1316 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); 1316 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1317 pci_dma_sync_single_for_device(np->pci_dev, 1317 pci_dma_sync_single_for_device(np->pci_dev,
1318 desc->frag[0].addr, 1318 desc->frag[0].addr,
1319 np->rx_buf_sz, 1319 np->rx_buf_sz,
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 42722530ab24..053b7cb0d944 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -549,9 +549,9 @@ static void lance_rx_dvma(struct net_device *dev)
549 549
550 skb_reserve(skb, 2); /* 16 byte align */ 550 skb_reserve(skb, 2); /* 16 byte align */
551 skb_put(skb, len); /* make room */ 551 skb_put(skb, len); /* make room */
552 eth_copy_and_sum(skb, 552 skb_copy_to_linear_data(skb,
553 (unsigned char *)&(ib->rx_buf [entry][0]), 553 (unsigned char *)&(ib->rx_buf [entry][0]),
554 len, 0); 554 len);
555 skb->protocol = eth_type_trans(skb, dev); 555 skb->protocol = eth_type_trans(skb, dev);
556 netif_rx(skb); 556 netif_rx(skb);
557 dev->last_rx = jiffies; 557 dev->last_rx = jiffies;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index fa70e0b78af7..1b65ae8a1c7c 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -439,8 +439,8 @@ static void qe_rx(struct sunqe *qep)
439 } else { 439 } else {
440 skb_reserve(skb, 2); 440 skb_reserve(skb, 2);
441 skb_put(skb, len); 441 skb_put(skb, len);
442 eth_copy_and_sum(skb, (unsigned char *) this_qbuf, 442 skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
443 len, 0); 443 len);
444 skb->protocol = eth_type_trans(skb, qep->dev); 444 skb->protocol = eth_type_trans(skb, qep->dev);
445 netif_rx(skb); 445 netif_rx(skb);
446 qep->dev->last_rx = jiffies; 446 qep->dev->last_rx = jiffies;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 2f3184184ad9..3245f16baabc 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
64 64
65#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 66#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.77" 67#define DRV_MODULE_VERSION "3.78"
68#define DRV_MODULE_RELDATE "May 31, 2007" 68#define DRV_MODULE_RELDATE "July 11, 2007"
69 69
70#define TG3_DEF_MAC_MODE 0 70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 71#define TG3_DEF_RX_MODE 0
@@ -721,6 +721,44 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
721 return ret; 721 return ret;
722} 722}
723 723
724static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
725{
726 u32 phy;
727
728 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
729 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
730 return;
731
732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
733 u32 ephy;
734
735 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
736 tg3_writephy(tp, MII_TG3_EPHY_TEST,
737 ephy | MII_TG3_EPHY_SHADOW_EN);
738 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
739 if (enable)
740 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
741 else
742 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
743 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
744 }
745 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
746 }
747 } else {
748 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
749 MII_TG3_AUXCTL_SHDWSEL_MISC;
750 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
751 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
752 if (enable)
753 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
754 else
755 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
756 phy |= MII_TG3_AUXCTL_MISC_WREN;
757 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
758 }
759 }
760}
761
724static void tg3_phy_set_wirespeed(struct tg3 *tp) 762static void tg3_phy_set_wirespeed(struct tg3 *tp)
725{ 763{
726 u32 val; 764 u32 val;
@@ -1045,23 +1083,11 @@ out:
1045 } 1083 }
1046 1084
1047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 1085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1048 u32 phy_reg;
1049
1050 /* adjust output voltage */ 1086 /* adjust output voltage */
1051 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); 1087 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1052
1053 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1054 u32 phy_reg2;
1055
1056 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1057 phy_reg | MII_TG3_EPHY_SHADOW_EN);
1058 /* Enable auto-MDIX */
1059 if (!tg3_readphy(tp, 0x10, &phy_reg2))
1060 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1061 tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1062 }
1063 } 1088 }
1064 1089
1090 tg3_phy_toggle_automdix(tp, 1);
1065 tg3_phy_set_wirespeed(tp); 1091 tg3_phy_set_wirespeed(tp);
1066 return 0; 1092 return 0;
1067} 1093}
@@ -1162,6 +1188,19 @@ static void tg3_frob_aux_power(struct tg3 *tp)
1162 } 1188 }
1163} 1189}
1164 1190
1191static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1192{
1193 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1194 return 1;
1195 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1196 if (speed != SPEED_10)
1197 return 1;
1198 } else if (speed == SPEED_10)
1199 return 1;
1200
1201 return 0;
1202}
1203
1165static int tg3_setup_phy(struct tg3 *, int); 1204static int tg3_setup_phy(struct tg3 *, int);
1166 1205
1167#define RESET_KIND_SHUTDOWN 0 1206#define RESET_KIND_SHUTDOWN 0
@@ -1320,9 +1359,17 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1320 else 1359 else
1321 mac_mode = MAC_MODE_PORT_MODE_MII; 1360 mac_mode = MAC_MODE_PORT_MODE_MII;
1322 1361
1323 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 || 1362 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1324 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)) 1363 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1325 mac_mode |= MAC_MODE_LINK_POLARITY; 1364 ASIC_REV_5700) {
1365 u32 speed = (tp->tg3_flags &
1366 TG3_FLAG_WOL_SPEED_100MB) ?
1367 SPEED_100 : SPEED_10;
1368 if (tg3_5700_link_polarity(tp, speed))
1369 mac_mode |= MAC_MODE_LINK_POLARITY;
1370 else
1371 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1372 }
1326 } else { 1373 } else {
1327 mac_mode = MAC_MODE_PORT_MODE_TBI; 1374 mac_mode = MAC_MODE_PORT_MODE_TBI;
1328 } 1375 }
@@ -1990,15 +2037,12 @@ relink:
1990 if (tp->link_config.active_duplex == DUPLEX_HALF) 2037 if (tp->link_config.active_duplex == DUPLEX_HALF)
1991 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 2038 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1992 2039
1993 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { 2040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1995 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) || 2041 if (current_link_up == 1 &&
1996 (current_link_up == 1 && 2042 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1997 tp->link_config.active_speed == SPEED_10))
1998 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1999 } else {
2000 if (current_link_up == 1)
2001 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 2043 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2044 else
2045 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2002 } 2046 }
2003 2047
2004 /* ??? Without this setting Netgear GA302T PHY does not 2048 /* ??? Without this setting Netgear GA302T PHY does not
@@ -2639,6 +2683,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2639 2683
2640 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 2684 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2641 udelay(40); 2685 udelay(40);
2686
2687 tw32_f(MAC_MODE, tp->mac_mode);
2688 udelay(40);
2642 } 2689 }
2643 2690
2644out: 2691out:
@@ -2698,10 +2745,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2698 else 2745 else
2699 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 2746 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2700 2747
2701 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2702 tw32_f(MAC_MODE, tp->mac_mode);
2703 udelay(40);
2704
2705 tp->hw_status->status = 2748 tp->hw_status->status =
2706 (SD_STATUS_UPDATED | 2749 (SD_STATUS_UPDATED |
2707 (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); 2750 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
@@ -3512,9 +3555,9 @@ static inline int tg3_irq_sync(struct tg3 *tp)
3512 */ 3555 */
3513static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 3556static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3514{ 3557{
3558 spin_lock_bh(&tp->lock);
3515 if (irq_sync) 3559 if (irq_sync)
3516 tg3_irq_quiesce(tp); 3560 tg3_irq_quiesce(tp);
3517 spin_lock_bh(&tp->lock);
3518} 3561}
3519 3562
3520static inline void tg3_full_unlock(struct tg3 *tp) 3563static inline void tg3_full_unlock(struct tg3 *tp)
@@ -6444,6 +6487,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6444 6487
6445 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 6488 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6446 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 6489 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6490 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6491 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6492 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6493 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6447 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 6494 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6448 udelay(40); 6495 udelay(40);
6449 6496
@@ -8805,7 +8852,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8805 return 0; 8852 return 0;
8806 8853
8807 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 8854 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8808 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY; 8855 MAC_MODE_PORT_INT_LPBACK;
8856 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8857 mac_mode |= MAC_MODE_LINK_POLARITY;
8809 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 8858 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8810 mac_mode |= MAC_MODE_PORT_MODE_MII; 8859 mac_mode |= MAC_MODE_PORT_MODE_MII;
8811 else 8860 else
@@ -8824,19 +8873,18 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8824 phytest | MII_TG3_EPHY_SHADOW_EN); 8873 phytest | MII_TG3_EPHY_SHADOW_EN);
8825 if (!tg3_readphy(tp, 0x1b, &phy)) 8874 if (!tg3_readphy(tp, 0x1b, &phy))
8826 tg3_writephy(tp, 0x1b, phy & ~0x20); 8875 tg3_writephy(tp, 0x1b, phy & ~0x20);
8827 if (!tg3_readphy(tp, 0x10, &phy))
8828 tg3_writephy(tp, 0x10, phy & ~0x4000);
8829 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest); 8876 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8830 } 8877 }
8831 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; 8878 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8832 } else 8879 } else
8833 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; 8880 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8834 8881
8882 tg3_phy_toggle_automdix(tp, 0);
8883
8835 tg3_writephy(tp, MII_BMCR, val); 8884 tg3_writephy(tp, MII_BMCR, val);
8836 udelay(40); 8885 udelay(40);
8837 8886
8838 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 8887 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
8839 MAC_MODE_LINK_POLARITY;
8840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 8888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8841 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); 8889 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8842 mac_mode |= MAC_MODE_PORT_MODE_MII; 8890 mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -8849,8 +8897,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8849 udelay(10); 8897 udelay(10);
8850 tw32_f(MAC_RX_MODE, tp->rx_mode); 8898 tw32_f(MAC_RX_MODE, tp->rx_mode);
8851 } 8899 }
8852 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 8900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
8853 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8901 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8902 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8903 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
8904 mac_mode |= MAC_MODE_LINK_POLARITY;
8854 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8905 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8855 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8906 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8856 } 8907 }
@@ -9116,10 +9167,10 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9116 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ 9167 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9117 __tg3_set_rx_mode(dev); 9168 __tg3_set_rx_mode(dev);
9118 9169
9119 tg3_full_unlock(tp);
9120
9121 if (netif_running(dev)) 9170 if (netif_running(dev))
9122 tg3_netif_start(tp); 9171 tg3_netif_start(tp);
9172
9173 tg3_full_unlock(tp);
9123} 9174}
9124#endif 9175#endif
9125 9176
@@ -9410,11 +9461,13 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9410 case FLASH_5755VENDOR_ATMEL_FLASH_1: 9461 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9411 case FLASH_5755VENDOR_ATMEL_FLASH_2: 9462 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9412 case FLASH_5755VENDOR_ATMEL_FLASH_3: 9463 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9464 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9413 tp->nvram_jedecnum = JEDEC_ATMEL; 9465 tp->nvram_jedecnum = JEDEC_ATMEL;
9414 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 9466 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9415 tp->tg3_flags2 |= TG3_FLG2_FLASH; 9467 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9416 tp->nvram_pagesize = 264; 9468 tp->nvram_pagesize = 264;
9417 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1) 9469 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9470 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9418 tp->nvram_size = (protect ? 0x3e200 : 0x80000); 9471 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9419 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 9472 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9420 tp->nvram_size = (protect ? 0x1f200 : 0x40000); 9473 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
@@ -11944,12 +11997,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11944 * checksumming. 11997 * checksumming.
11945 */ 11998 */
11946 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { 11999 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12000 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
11947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 12001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 12002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11949 dev->features |= NETIF_F_HW_CSUM; 12003 dev->features |= NETIF_F_IPV6_CSUM;
11950 else 12004
11951 dev->features |= NETIF_F_IP_CSUM;
11952 dev->features |= NETIF_F_SG;
11953 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 12005 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11954 } else 12006 } else
11955 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 12007 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bd9f4f428e5b..d84e75e7365d 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1467,6 +1467,7 @@
1467#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 1467#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002
1468#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 1468#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000
1469#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 1469#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003
1470#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003
1470#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 1471#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003
1471#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 1472#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002
1472#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 1473#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003
@@ -1642,6 +1643,11 @@
1642 1643
1643#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */ 1644#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */
1644 1645
1646#define MII_TG3_AUXCTL_MISC_WREN 0x8000
1647#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
1648#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
1649#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
1650
1645#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */ 1651#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */
1646#define MII_TG3_AUX_STAT_LPASS 0x0004 1652#define MII_TG3_AUX_STAT_LPASS 0x0004
1647#define MII_TG3_AUX_STAT_SPDMASK 0x0700 1653#define MII_TG3_AUX_STAT_SPDMASK 0x0700
@@ -1667,6 +1673,9 @@
1667#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */ 1673#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */
1668#define MII_TG3_EPHY_SHADOW_EN 0x80 1674#define MII_TG3_EPHY_SHADOW_EN 0x80
1669 1675
1676#define MII_TG3_EPHYTST_MISCCTRL 0x10 /* 5906 EPHY misc ctrl shadow register */
1677#define MII_TG3_EPHYTST_MISCCTRL_MDIX 0x4000
1678
1670#define MII_TG3_TEST1 0x1e 1679#define MII_TG3_TEST1 0x1e
1671#define MII_TG3_TEST1_TRIM_EN 0x0010 1680#define MII_TG3_TEST1_TRIM_EN 0x0010
1672#define MII_TG3_TEST1_CRC_EN 0x8000 1681#define MII_TG3_TEST1_CRC_EN 0x8000
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index ea896777bcaf..53efd6694e75 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -197,8 +197,8 @@ int tulip_poll(struct net_device *dev, int *budget)
197 tp->rx_buffers[entry].mapping, 197 tp->rx_buffers[entry].mapping,
198 pkt_len, PCI_DMA_FROMDEVICE); 198 pkt_len, PCI_DMA_FROMDEVICE);
199#if ! defined(__alpha__) 199#if ! defined(__alpha__)
200 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, 200 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
201 pkt_len, 0); 201 pkt_len);
202 skb_put(skb, pkt_len); 202 skb_put(skb, pkt_len);
203#else 203#else
204 memcpy(skb_put(skb, pkt_len), 204 memcpy(skb_put(skb, pkt_len),
@@ -420,8 +420,8 @@ static int tulip_rx(struct net_device *dev)
420 tp->rx_buffers[entry].mapping, 420 tp->rx_buffers[entry].mapping,
421 pkt_len, PCI_DMA_FROMDEVICE); 421 pkt_len, PCI_DMA_FROMDEVICE);
422#if ! defined(__alpha__) 422#if ! defined(__alpha__)
423 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, 423 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
424 pkt_len, 0); 424 pkt_len);
425 skb_put(skb, pkt_len); 425 skb_put(skb, pkt_len);
426#else 426#else
427 memcpy(skb_put(skb, pkt_len), 427 memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 38f3b99716b8..5824f6a35495 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1232,7 +1232,7 @@ static int netdev_rx(struct net_device *dev)
1232 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1232 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1233 np->rx_skbuff[entry]->len, 1233 np->rx_skbuff[entry]->len,
1234 PCI_DMA_FROMDEVICE); 1234 PCI_DMA_FROMDEVICE);
1235 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); 1235 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1236 skb_put(skb, pkt_len); 1236 skb_put(skb, pkt_len);
1237 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], 1237 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1238 np->rx_skbuff[entry]->len, 1238 np->rx_skbuff[entry]->len,
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 2470b1ee33c0..37e35cd277a1 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1208,7 +1208,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1208 goto out; 1208 goto out;
1209 } 1209 }
1210 skb_reserve(skb, 2); 1210 skb_reserve(skb, 2);
1211 eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); 1211 skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len);
1212 skb_put(skb, pkt_len); 1212 skb_put(skb, pkt_len);
1213 skb->protocol = eth_type_trans(skb, dev); 1213 skb->protocol = eth_type_trans(skb, dev);
1214 netif_rx(skb); 1214 netif_rx(skb);
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index f64172927377..f984fbde8b23 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -1242,8 +1242,8 @@ xircom_rx(struct net_device *dev)
1242 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1242 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1243 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1243 skb_reserve(skb, 2); /* 16 byte align the IP header */
1244#if ! defined(__alpha__) 1244#if ! defined(__alpha__)
1245 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), 1245 skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1246 pkt_len, 0); 1246 pkt_len);
1247 skb_put(skb, pkt_len); 1247 skb_put(skb, pkt_len);
1248#else 1248#else
1249 memcpy(skb_put(skb, pkt_len), 1249 memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a2c6caaaae93..62b2b3005019 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -432,6 +432,7 @@ static void tun_setup(struct net_device *dev)
432 init_waitqueue_head(&tun->read_wait); 432 init_waitqueue_head(&tun->read_wait);
433 433
434 tun->owner = -1; 434 tun->owner = -1;
435 tun->group = -1;
435 436
436 SET_MODULE_OWNER(dev); 437 SET_MODULE_OWNER(dev);
437 dev->open = tun_net_open; 438 dev->open = tun_net_open;
@@ -467,8 +468,11 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
467 return -EBUSY; 468 return -EBUSY;
468 469
469 /* Check permissions */ 470 /* Check permissions */
470 if (tun->owner != -1 && 471 if (((tun->owner != -1 &&
471 current->euid != tun->owner && !capable(CAP_NET_ADMIN)) 472 current->euid != tun->owner) ||
473 (tun->group != -1 &&
474 current->egid != tun->group)) &&
475 !capable(CAP_NET_ADMIN))
472 return -EPERM; 476 return -EPERM;
473 } 477 }
474 else if (__dev_get_by_name(ifr->ifr_name)) 478 else if (__dev_get_by_name(ifr->ifr_name))
@@ -610,6 +614,13 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
610 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); 614 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
611 break; 615 break;
612 616
617 case TUNSETGROUP:
618 /* Set group of the device */
619 tun->group= (gid_t) arg;
620
621 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
622 break;
623
613 case TUNSETLINK: 624 case TUNSETLINK:
614 /* Only allow setting the type when the interface is down */ 625 /* Only allow setting the type when the interface is down */
615 if (tun->dev->flags & IFF_UP) { 626 if (tun->dev->flags & IFF_UP) {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 15b2fb8aa492..df524548d531 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1703,7 +1703,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1703 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, 1703 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1704 PKT_BUF_SZ, 1704 PKT_BUF_SZ,
1705 PCI_DMA_FROMDEVICE); 1705 PCI_DMA_FROMDEVICE);
1706 eth_copy_and_sum(new_skb, skb->data, pkt_len, 0); 1706 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1707 pci_dma_sync_single_for_device(tp->pdev, dma_addr, 1707 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1708 PKT_BUF_SZ, 1708 PKT_BUF_SZ,
1709 PCI_DMA_FROMDEVICE); 1709 PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 86e90c59d551..76752d84a30f 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -255,7 +255,7 @@ static void catc_rx_done(struct urb *urb)
255 if (!(skb = dev_alloc_skb(pkt_len))) 255 if (!(skb = dev_alloc_skb(pkt_len)))
256 return; 256 return;
257 257
258 eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); 258 skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
259 skb_put(skb, pkt_len); 259 skb_put(skb, pkt_len);
260 260
261 skb->protocol = eth_type_trans(skb, catc->netdev); 261 skb->protocol = eth_type_trans(skb, catc->netdev);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 60d29440f316..524dc5f5e46d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -635,7 +635,7 @@ static void kaweth_usb_receive(struct urb *urb)
635 635
636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
637 637
638 eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); 638 skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len);
639 639
640 skb_put(skb, pkt_len); 640 skb_put(skb, pkt_len);
641 641
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index adea290a9d5e..565f6cc185ce 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1492,9 +1492,9 @@ static int rhine_rx(struct net_device *dev, int limit)
1492 rp->rx_buf_sz, 1492 rp->rx_buf_sz,
1493 PCI_DMA_FROMDEVICE); 1493 PCI_DMA_FROMDEVICE);
1494 1494
1495 eth_copy_and_sum(skb, 1495 skb_copy_to_linear_data(skb,
1496 rp->rx_skbuff[entry]->data, 1496 rp->rx_skbuff[entry]->data,
1497 pkt_len, 0); 1497 pkt_len);
1498 skb_put(skb, pkt_len); 1498 skb_put(skb, pkt_len);
1499 pci_dma_sync_single_for_device(rp->pdev, 1499 pci_dma_sync_single_for_device(rp->pdev,
1500 rp->rx_skbuff_dma[entry], 1500 rp->rx_skbuff_dma[entry],
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ce9230b2f630..c8b5c2271938 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1011,7 +1011,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
1011 } else { 1011 } else {
1012 skb->dev = dev; 1012 skb->dev = dev;
1013 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ 1013 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
1014 eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0); 1014 skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
1015 wl3501_receive(this, skb->data, pkt_len); 1015 wl3501_receive(this, skb->data, pkt_len);
1016 skb_put(skb, pkt_len); 1016 skb_put(skb, pkt_len);
1017 skb->protocol = eth_type_trans(skb, dev); 1017 skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index f2a90a7fa2d6..870c5393c21a 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1137,7 +1137,7 @@ static int yellowfin_rx(struct net_device *dev)
1137 if (skb == NULL) 1137 if (skb == NULL)
1138 break; 1138 break;
1139 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1139 skb_reserve(skb, 2); /* 16 byte align the IP header */
1140 eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); 1140 skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1141 skb_put(skb, pkt_len); 1141 skb_put(skb, pkt_len);
1142 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr, 1142 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
1143 yp->rx_buf_sz, 1143 yp->rx_buf_sz,
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index afae306b177c..127d2d192b5a 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -91,7 +91,6 @@ header-y += in6.h
91header-y += in_route.h 91header-y += in_route.h
92header-y += ioctl.h 92header-y += ioctl.h
93header-y += ipmi_msgdefs.h 93header-y += ipmi_msgdefs.h
94header-y += ip_mp_alg.h
95header-y += ipsec.h 94header-y += ipsec.h
96header-y += ipx.h 95header-y += ipx.h
97header-y += irda.h 96header-y += irda.h
@@ -226,6 +225,7 @@ unifdef-y += if_fddi.h
226unifdef-y += if_frad.h 225unifdef-y += if_frad.h
227unifdef-y += if_ltalk.h 226unifdef-y += if_ltalk.h
228unifdef-y += if_link.h 227unifdef-y += if_link.h
228unifdef-y += if_pppol2tp.h
229unifdef-y += if_pppox.h 229unifdef-y += if_pppox.h
230unifdef-y += if_shaper.h 230unifdef-y += if_shaper.h
231unifdef-y += if_tr.h 231unifdef-y += if_tr.h
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 071c67abed86..6cdb97365e47 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -39,13 +39,8 @@ extern void eth_header_cache_update(struct hh_cache *hh, struct net_device *dev
39extern int eth_header_cache(struct neighbour *neigh, 39extern int eth_header_cache(struct neighbour *neigh,
40 struct hh_cache *hh); 40 struct hh_cache *hh);
41 41
42extern struct net_device *alloc_etherdev(int sizeof_priv); 42extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
43static inline void eth_copy_and_sum (struct sk_buff *dest, 43#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
44 const unsigned char *src,
45 int len, int base)
46{
47 memcpy (dest->data, src, len);
48}
49 44
50/** 45/**
51 * is_zero_ether_addr - Determine if give Ethernet address is all zeros. 46 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 604c2434f71c..422084d18ce1 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -76,6 +76,8 @@ enum
76#define IFLA_WEIGHT IFLA_WEIGHT 76#define IFLA_WEIGHT IFLA_WEIGHT
77 IFLA_OPERSTATE, 77 IFLA_OPERSTATE,
78 IFLA_LINKMODE, 78 IFLA_LINKMODE,
79 IFLA_LINKINFO,
80#define IFLA_LINKINFO IFLA_LINKINFO
79 __IFLA_MAX 81 __IFLA_MAX
80}; 82};
81 83
@@ -140,4 +142,49 @@ struct ifla_cacheinfo
140 __u32 retrans_time; 142 __u32 retrans_time;
141}; 143};
142 144
145enum
146{
147 IFLA_INFO_UNSPEC,
148 IFLA_INFO_KIND,
149 IFLA_INFO_DATA,
150 IFLA_INFO_XSTATS,
151 __IFLA_INFO_MAX,
152};
153
154#define IFLA_INFO_MAX (__IFLA_INFO_MAX - 1)
155
156/* VLAN section */
157
158enum
159{
160 IFLA_VLAN_UNSPEC,
161 IFLA_VLAN_ID,
162 IFLA_VLAN_FLAGS,
163 IFLA_VLAN_EGRESS_QOS,
164 IFLA_VLAN_INGRESS_QOS,
165 __IFLA_VLAN_MAX,
166};
167
168#define IFLA_VLAN_MAX (__IFLA_VLAN_MAX - 1)
169
170struct ifla_vlan_flags {
171 __u32 flags;
172 __u32 mask;
173};
174
175enum
176{
177 IFLA_VLAN_QOS_UNSPEC,
178 IFLA_VLAN_QOS_MAPPING,
179 __IFLA_VLAN_QOS_MAX
180};
181
182#define IFLA_VLAN_QOS_MAX (__IFLA_VLAN_QOS_MAX - 1)
183
184struct ifla_vlan_qos_mapping
185{
186 __u32 from;
187 __u32 to;
188};
189
143#endif /* _LINUX_IF_LINK_H */ 190#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_ppp.h b/include/linux/if_ppp.h
index 768372f07caa..0f2f70d4e48c 100644
--- a/include/linux/if_ppp.h
+++ b/include/linux/if_ppp.h
@@ -110,6 +110,21 @@ struct ifpppcstatsreq {
110 struct ppp_comp_stats stats; 110 struct ppp_comp_stats stats;
111}; 111};
112 112
113/* For PPPIOCGL2TPSTATS */
114struct pppol2tp_ioc_stats {
115 __u16 tunnel_id; /* redundant */
116 __u16 session_id; /* if zero, get tunnel stats */
117 __u32 using_ipsec:1; /* valid only for session_id == 0 */
118 aligned_u64 tx_packets;
119 aligned_u64 tx_bytes;
120 aligned_u64 tx_errors;
121 aligned_u64 rx_packets;
122 aligned_u64 rx_bytes;
123 aligned_u64 rx_seq_discards;
124 aligned_u64 rx_oos_packets;
125 aligned_u64 rx_errors;
126};
127
113#define ifr__name b.ifr_ifrn.ifrn_name 128#define ifr__name b.ifr_ifrn.ifrn_name
114#define stats_ptr b.ifr_ifru.ifru_data 129#define stats_ptr b.ifr_ifru.ifru_data
115 130
@@ -146,6 +161,7 @@ struct ifpppcstatsreq {
146#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */ 161#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */
147#define PPPIOCATTCHAN _IOW('t', 56, int) /* attach to ppp channel */ 162#define PPPIOCATTCHAN _IOW('t', 56, int) /* attach to ppp channel */
148#define PPPIOCGCHAN _IOR('t', 55, int) /* get ppp channel number */ 163#define PPPIOCGCHAN _IOR('t', 55, int) /* get ppp channel number */
164#define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats)
149 165
150#define SIOCGPPPSTATS (SIOCDEVPRIVATE + 0) 166#define SIOCGPPPSTATS (SIOCDEVPRIVATE + 0)
151#define SIOCGPPPVER (SIOCDEVPRIVATE + 1) /* NEVER change this!! */ 167#define SIOCGPPPVER (SIOCDEVPRIVATE + 1) /* NEVER change this!! */
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
new file mode 100644
index 000000000000..516203b6fdeb
--- /dev/null
+++ b/include/linux/if_pppol2tp.h
@@ -0,0 +1,69 @@
1/***************************************************************************
2 * Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661)
3 *
4 * This file supplies definitions required by the PPP over L2TP driver
5 * (pppol2tp.c). All version information wrt this file is located in pppol2tp.c
6 *
7 * License:
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#ifndef __LINUX_IF_PPPOL2TP_H
16#define __LINUX_IF_PPPOL2TP_H
17
18#include <asm/types.h>
19
20#ifdef __KERNEL__
21#include <linux/in.h>
22#endif
23
24/* Structure used to connect() the socket to a particular tunnel UDP
25 * socket.
26 */
27struct pppol2tp_addr
28{
29 pid_t pid; /* pid that owns the fd.
30 * 0 => current */
31 int fd; /* FD of UDP socket to use */
32
33 struct sockaddr_in addr; /* IP address and port to send to */
34
35 __be16 s_tunnel, s_session; /* For matching incoming packets */
36 __be16 d_tunnel, d_session; /* For sending outgoing packets */
37};
38
39/* Socket options:
40 * DEBUG - bitmask of debug message categories
41 * SENDSEQ - 0 => don't send packets with sequence numbers
42 * 1 => send packets with sequence numbers
43 * RECVSEQ - 0 => receive packet sequence numbers are optional
44 * 1 => drop receive packets without sequence numbers
45 * LNSMODE - 0 => act as LAC.
46 * 1 => act as LNS.
47 * REORDERTO - reorder timeout (in millisecs). If 0, don't try to reorder.
48 */
49enum {
50 PPPOL2TP_SO_DEBUG = 1,
51 PPPOL2TP_SO_RECVSEQ = 2,
52 PPPOL2TP_SO_SENDSEQ = 3,
53 PPPOL2TP_SO_LNSMODE = 4,
54 PPPOL2TP_SO_REORDERTO = 5,
55};
56
57/* Debug message categories for the DEBUG socket option */
58enum {
59 PPPOL2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
60 * compiled in) */
61 PPPOL2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
62 * interface */
63 PPPOL2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
64 PPPOL2TP_MSG_DATA = (1 << 3), /* data packets */
65};
66
67
68
69#endif
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 6f987be60fe2..25652545ba6e 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -27,6 +27,7 @@
27#include <asm/semaphore.h> 27#include <asm/semaphore.h>
28#include <linux/ppp_channel.h> 28#include <linux/ppp_channel.h>
29#endif /* __KERNEL__ */ 29#endif /* __KERNEL__ */
30#include <linux/if_pppol2tp.h>
30 31
31/* For user-space programs to pick up these definitions 32/* For user-space programs to pick up these definitions
32 * which they wouldn't get otherwise without defining __KERNEL__ 33 * which they wouldn't get otherwise without defining __KERNEL__
@@ -50,8 +51,9 @@ struct pppoe_addr{
50 * Protocols supported by AF_PPPOX 51 * Protocols supported by AF_PPPOX
51 */ 52 */
52#define PX_PROTO_OE 0 /* Currently just PPPoE */ 53#define PX_PROTO_OE 0 /* Currently just PPPoE */
53#define PX_MAX_PROTO 1 54#define PX_PROTO_OL2TP 1 /* Now L2TP also */
54 55#define PX_MAX_PROTO 2
56
55struct sockaddr_pppox { 57struct sockaddr_pppox {
56 sa_family_t sa_family; /* address family, AF_PPPOX */ 58 sa_family_t sa_family; /* address family, AF_PPPOX */
57 unsigned int sa_protocol; /* protocol identifier */ 59 unsigned int sa_protocol; /* protocol identifier */
@@ -60,6 +62,16 @@ struct sockaddr_pppox {
60 }sa_addr; 62 }sa_addr;
61}__attribute__ ((packed)); 63}__attribute__ ((packed));
62 64
65/* The use of the above union isn't viable because the size of this
66 * struct must stay fixed over time -- applications use sizeof(struct
67 * sockaddr_pppox) to fill it. We use a protocol specific sockaddr
68 * type instead.
69 */
70struct sockaddr_pppol2tp {
71 sa_family_t sa_family; /* address family, AF_PPPOX */
72 unsigned int sa_protocol; /* protocol identifier */
73 struct pppol2tp_addr pppol2tp;
74}__attribute__ ((packed));
63 75
64/********************************************************************* 76/*********************************************************************
65 * 77 *
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 88aef7b86ef4..42eb6945b93e 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -36,6 +36,7 @@ struct tun_struct {
36 unsigned long flags; 36 unsigned long flags;
37 int attached; 37 int attached;
38 uid_t owner; 38 uid_t owner;
39 gid_t group;
39 40
40 wait_queue_head_t read_wait; 41 wait_queue_head_t read_wait;
41 struct sk_buff_head readq; 42 struct sk_buff_head readq;
@@ -78,6 +79,7 @@ struct tun_struct {
78#define TUNSETPERSIST _IOW('T', 203, int) 79#define TUNSETPERSIST _IOW('T', 203, int)
79#define TUNSETOWNER _IOW('T', 204, int) 80#define TUNSETOWNER _IOW('T', 204, int)
80#define TUNSETLINK _IOW('T', 205, int) 81#define TUNSETLINK _IOW('T', 205, int)
82#define TUNSETGROUP _IOW('T', 206, int)
81 83
82/* TUNSETIFF ifr flags */ 84/* TUNSETIFF ifr flags */
83#define IFF_TUN 0x0001 85#define IFF_TUN 0x0001
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 81e9bc93569b..61a57dc2ac99 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -99,7 +99,7 @@ static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
99} 99}
100 100
101struct vlan_priority_tci_mapping { 101struct vlan_priority_tci_mapping {
102 unsigned long priority; 102 u32 priority;
103 unsigned short vlan_qos; /* This should be shifted when first set, so we only do it 103 unsigned short vlan_qos; /* This should be shifted when first set, so we only do it
104 * at provisioning time. 104 * at provisioning time.
105 * ((skb->priority << 13) & 0xE000) 105 * ((skb->priority << 13) & 0xE000)
@@ -112,7 +112,10 @@ struct vlan_dev_info {
112 /** This will be the mapping that correlates skb->priority to 112 /** This will be the mapping that correlates skb->priority to
113 * 3 bits of VLAN QOS tags... 113 * 3 bits of VLAN QOS tags...
114 */ 114 */
115 unsigned long ingress_priority_map[8]; 115 unsigned int nr_ingress_mappings;
116 u32 ingress_priority_map[8];
117
118 unsigned int nr_egress_mappings;
116 struct vlan_priority_tci_mapping *egress_priority_map[16]; /* hash table */ 119 struct vlan_priority_tci_mapping *egress_priority_map[16]; /* hash table */
117 120
118 unsigned short vlan_id; /* The VLAN Identifier for this interface. */ 121 unsigned short vlan_id; /* The VLAN Identifier for this interface. */
@@ -132,6 +135,7 @@ struct vlan_dev_info {
132 int old_allmulti; /* similar to above. */ 135 int old_allmulti; /* similar to above. */
133 int old_promiscuity; /* similar to above. */ 136 int old_promiscuity; /* similar to above. */
134 struct net_device *real_dev; /* the underlying device/interface */ 137 struct net_device *real_dev; /* the underlying device/interface */
138 unsigned char real_dev_addr[ETH_ALEN];
135 struct proc_dir_entry *dent; /* Holds the proc data */ 139 struct proc_dir_entry *dent; /* Holds the proc data */
136 unsigned long cnt_inc_headroom_on_tx; /* How many times did we have to grow the skb on TX. */ 140 unsigned long cnt_inc_headroom_on_tx; /* How many times did we have to grow the skb on TX. */
137 unsigned long cnt_encap_on_xmit; /* How many times did we have to encapsulate the skb on TX. */ 141 unsigned long cnt_encap_on_xmit; /* How many times did we have to encapsulate the skb on TX. */
@@ -395,6 +399,10 @@ enum vlan_ioctl_cmds {
395 GET_VLAN_VID_CMD /* Get the VID of this VLAN (specified by name) */ 399 GET_VLAN_VID_CMD /* Get the VID of this VLAN (specified by name) */
396}; 400};
397 401
402enum vlan_flags {
403 VLAN_FLAG_REORDER_HDR = 0x1,
404};
405
398enum vlan_name_types { 406enum vlan_name_types {
399 VLAN_NAME_TYPE_PLUS_VID, /* Name will look like: vlan0005 */ 407 VLAN_NAME_TYPE_PLUS_VID, /* Name will look like: vlan0005 */
400 VLAN_NAME_TYPE_RAW_PLUS_VID, /* name will look like: eth1.0005 */ 408 VLAN_NAME_TYPE_RAW_PLUS_VID, /* name will look like: eth1.0005 */
diff --git a/include/linux/ip_mp_alg.h b/include/linux/ip_mp_alg.h
deleted file mode 100644
index e234e2008f5d..000000000000
--- a/include/linux/ip_mp_alg.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/* ip_mp_alg.h: IPV4 multipath algorithm support, user-visible values.
2 *
3 * Copyright (C) 2004, 2005 Einar Lueck <elueck@de.ibm.com>
4 * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
5 */
6
7#ifndef _LINUX_IP_MP_ALG_H
8#define _LINUX_IP_MP_ALG_H
9
10enum ip_mp_alg {
11 IP_MP_ALG_NONE,
12 IP_MP_ALG_RR,
13 IP_MP_ALG_DRR,
14 IP_MP_ALG_RANDOM,
15 IP_MP_ALG_WRANDOM,
16 __IP_MP_ALG_MAX
17};
18
19#define IP_MP_ALG_MAX (__IP_MP_ALG_MAX - 1)
20
21#endif /* _LINUX_IP_MP_ALG_H */
22
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 648bd1f0912d..97983dc9df13 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -27,8 +27,8 @@ struct in6_ifreq {
27 int ifr6_ifindex; 27 int ifr6_ifindex;
28}; 28};
29 29
30#define IPV6_SRCRT_STRICT 0x01 /* this hop must be a neighbor */ 30#define IPV6_SRCRT_STRICT 0x01 /* Deprecated; will be removed */
31#define IPV6_SRCRT_TYPE_0 0 /* IPv6 type 0 Routing Header */ 31#define IPV6_SRCRT_TYPE_0 0 /* Deprecated; will be removed */
32#define IPV6_SRCRT_TYPE_2 2 /* IPv6 type 2 Routing Header */ 32#define IPV6_SRCRT_TYPE_2 2 /* IPv6 type 2 Routing Header */
33 33
34/* 34/*
@@ -247,7 +247,7 @@ struct inet6_skb_parm {
247 __u16 lastopt; 247 __u16 lastopt;
248 __u32 nhoff; 248 __u32 nhoff;
249 __u16 flags; 249 __u16 flags;
250#ifdef CONFIG_IPV6_MIP6 250#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
251 __u16 dsthao; 251 __u16 dsthao;
252#endif 252#endif
253 253
@@ -299,8 +299,8 @@ struct ipv6_pinfo {
299 /* pktoption flags */ 299 /* pktoption flags */
300 union { 300 union {
301 struct { 301 struct {
302 __u16 srcrt:2, 302 __u16 srcrt:1,
303 osrcrt:2, 303 osrcrt:1,
304 rxinfo:1, 304 rxinfo:1,
305 rxoinfo:1, 305 rxoinfo:1,
306 rxhlim:1, 306 rxhlim:1,
diff --git a/include/linux/irda.h b/include/linux/irda.h
index 945ba3110874..8e3735714c1c 100644
--- a/include/linux/irda.h
+++ b/include/linux/irda.h
@@ -216,6 +216,34 @@ struct if_irda_req {
216#define ifr_dtr ifr_ifru.ifru_line.dtr 216#define ifr_dtr ifr_ifru.ifru_line.dtr
217#define ifr_rts ifr_ifru.ifru_line.rts 217#define ifr_rts ifr_ifru.ifru_line.rts
218 218
219
220/* IrDA netlink definitions */
221#define IRDA_NL_NAME "irda"
222#define IRDA_NL_VERSION 1
223
224enum irda_nl_commands {
225 IRDA_NL_CMD_UNSPEC,
226 IRDA_NL_CMD_SET_MODE,
227 IRDA_NL_CMD_GET_MODE,
228
229 __IRDA_NL_CMD_AFTER_LAST
230};
231#define IRDA_NL_CMD_MAX (__IRDA_NL_CMD_AFTER_LAST - 1)
232
233enum nl80211_attrs {
234 IRDA_NL_ATTR_UNSPEC,
235 IRDA_NL_ATTR_IFNAME,
236 IRDA_NL_ATTR_MODE,
237
238 __IRDA_NL_ATTR_AFTER_LAST
239};
240#define IRDA_NL_ATTR_MAX (__IRDA_NL_ATTR_AFTER_LAST - 1)
241
242/* IrDA modes */
243#define IRDA_MODE_PRIMARY 0x1
244#define IRDA_MODE_SECONDARY 0x2
245#define IRDA_MODE_MONITOR 0x4
246
219#endif /* KERNEL_IRDA_H */ 247#endif /* KERNEL_IRDA_H */
220 248
221 249
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 2b139f66027f..dae7143644fe 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -279,6 +279,16 @@ static inline s64 ktime_to_us(const ktime_t kt)
279 return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec; 279 return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
280} 280}
281 281
282static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
283{
284 return ktime_to_us(ktime_sub(later, earlier));
285}
286
287static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
288{
289 return ktime_add_ns(kt, usec * 1000);
290}
291
282/* 292/*
283 * The resolution of the clocks. The resolution value is returned in 293 * The resolution of the clocks. The resolution value is returned in
284 * the clock_getres() system call to give application programmers an 294 * the clock_getres() system call to give application programmers an
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3a70f553b28f..79cc3dab4be7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -108,6 +108,14 @@ struct wireless_dev;
108#define MAX_HEADER (LL_MAX_HEADER + 48) 108#define MAX_HEADER (LL_MAX_HEADER + 48)
109#endif 109#endif
110 110
111struct net_device_subqueue
112{
113 /* Give a control state for each queue. This struct may contain
114 * per-queue locks in the future.
115 */
116 unsigned long state;
117};
118
111/* 119/*
112 * Network device statistics. Akin to the 2.0 ether stats but 120 * Network device statistics. Akin to the 2.0 ether stats but
113 * with byte counters. 121 * with byte counters.
@@ -177,19 +185,24 @@ struct netif_rx_stats
177 185
178DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); 186DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
179 187
188struct dev_addr_list
189{
190 struct dev_addr_list *next;
191 u8 da_addr[MAX_ADDR_LEN];
192 u8 da_addrlen;
193 int da_users;
194 int da_gusers;
195};
180 196
181/* 197/*
182 * We tag multicasts with these structures. 198 * We tag multicasts with these structures.
183 */ 199 */
184 200
185struct dev_mc_list 201#define dev_mc_list dev_addr_list
186{ 202#define dmi_addr da_addr
187 struct dev_mc_list *next; 203#define dmi_addrlen da_addrlen
188 __u8 dmi_addr[MAX_ADDR_LEN]; 204#define dmi_users da_users
189 unsigned char dmi_addrlen; 205#define dmi_gusers da_gusers
190 int dmi_users;
191 int dmi_gusers;
192};
193 206
194struct hh_cache 207struct hh_cache
195{ 208{
@@ -248,6 +261,8 @@ enum netdev_state_t
248 __LINK_STATE_LINKWATCH_PENDING, 261 __LINK_STATE_LINKWATCH_PENDING,
249 __LINK_STATE_DORMANT, 262 __LINK_STATE_DORMANT,
250 __LINK_STATE_QDISC_RUNNING, 263 __LINK_STATE_QDISC_RUNNING,
264 /* Set by the netpoll NAPI code */
265 __LINK_STATE_POLL_LIST_FROZEN,
251}; 266};
252 267
253 268
@@ -314,9 +329,10 @@ struct net_device
314 /* Net device features */ 329 /* Net device features */
315 unsigned long features; 330 unsigned long features;
316#define NETIF_F_SG 1 /* Scatter/gather IO. */ 331#define NETIF_F_SG 1 /* Scatter/gather IO. */
317#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ 332#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
318#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ 333#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
319#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ 334#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
335#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
320#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ 336#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
321#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ 337#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
322#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ 338#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
@@ -325,6 +341,7 @@ struct net_device
325#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 341#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
326#define NETIF_F_GSO 2048 /* Enable software GSO. */ 342#define NETIF_F_GSO 2048 /* Enable software GSO. */
327#define NETIF_F_LLTX 4096 /* LockLess TX */ 343#define NETIF_F_LLTX 4096 /* LockLess TX */
344#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
328 345
329 /* Segmentation offload features */ 346 /* Segmentation offload features */
330#define NETIF_F_GSO_SHIFT 16 347#define NETIF_F_GSO_SHIFT 16
@@ -338,8 +355,11 @@ struct net_device
338 /* List of features with software fallbacks. */ 355 /* List of features with software fallbacks. */
339#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) 356#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
340 357
358
341#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 359#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
342#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) 360#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
361#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
362#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
343 363
344 struct net_device *next_sched; 364 struct net_device *next_sched;
345 365
@@ -388,7 +408,10 @@ struct net_device
388 unsigned char addr_len; /* hardware address length */ 408 unsigned char addr_len; /* hardware address length */
389 unsigned short dev_id; /* for shared network cards */ 409 unsigned short dev_id; /* for shared network cards */
390 410
391 struct dev_mc_list *mc_list; /* Multicast mac addresses */ 411 struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
412 int uc_count; /* Number of installed ucasts */
413 int uc_promisc;
414 struct dev_addr_list *mc_list; /* Multicast mac addresses */
392 int mc_count; /* Number of installed mcasts */ 415 int mc_count; /* Number of installed mcasts */
393 int promiscuity; 416 int promiscuity;
394 int allmulti; 417 int allmulti;
@@ -493,6 +516,8 @@ struct net_device
493 void *saddr, 516 void *saddr,
494 unsigned len); 517 unsigned len);
495 int (*rebuild_header)(struct sk_buff *skb); 518 int (*rebuild_header)(struct sk_buff *skb);
519#define HAVE_SET_RX_MODE
520 void (*set_rx_mode)(struct net_device *dev);
496#define HAVE_MULTICAST 521#define HAVE_MULTICAST
497 void (*set_multicast_list)(struct net_device *dev); 522 void (*set_multicast_list)(struct net_device *dev);
498#define HAVE_SET_MAC_ADDR 523#define HAVE_SET_MAC_ADDR
@@ -540,17 +565,22 @@ struct net_device
540 struct device dev; 565 struct device dev;
541 /* space for optional statistics and wireless sysfs groups */ 566 /* space for optional statistics and wireless sysfs groups */
542 struct attribute_group *sysfs_groups[3]; 567 struct attribute_group *sysfs_groups[3];
568
569 /* rtnetlink link ops */
570 const struct rtnl_link_ops *rtnl_link_ops;
571
572 /* The TX queue control structures */
573 unsigned int egress_subqueue_count;
574 struct net_device_subqueue egress_subqueue[0];
543}; 575};
544#define to_net_dev(d) container_of(d, struct net_device, dev) 576#define to_net_dev(d) container_of(d, struct net_device, dev)
545 577
546#define NETDEV_ALIGN 32 578#define NETDEV_ALIGN 32
547#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) 579#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
548 580
549static inline void *netdev_priv(struct net_device *dev) 581static inline void *netdev_priv(const struct net_device *dev)
550{ 582{
551 return (char *)dev + ((sizeof(struct net_device) 583 return dev->priv;
552 + NETDEV_ALIGN_CONST)
553 & ~NETDEV_ALIGN_CONST);
554} 584}
555 585
556#define SET_MODULE_OWNER(dev) do { } while (0) 586#define SET_MODULE_OWNER(dev) do { } while (0)
@@ -702,6 +732,62 @@ static inline int netif_running(const struct net_device *dev)
702 return test_bit(__LINK_STATE_START, &dev->state); 732 return test_bit(__LINK_STATE_START, &dev->state);
703} 733}
704 734
735/*
736 * Routines to manage the subqueues on a device. We only need start
737 * stop, and a check if it's stopped. All other device management is
738 * done at the overall netdevice level.
739 * Also test the device if we're multiqueue.
740 */
741static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
742{
743#ifdef CONFIG_NETDEVICES_MULTIQUEUE
744 clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
745#endif
746}
747
748static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
749{
750#ifdef CONFIG_NETDEVICES_MULTIQUEUE
751#ifdef CONFIG_NETPOLL_TRAP
752 if (netpoll_trap())
753 return;
754#endif
755 set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
756#endif
757}
758
759static inline int netif_subqueue_stopped(const struct net_device *dev,
760 u16 queue_index)
761{
762#ifdef CONFIG_NETDEVICES_MULTIQUEUE
763 return test_bit(__LINK_STATE_XOFF,
764 &dev->egress_subqueue[queue_index].state);
765#else
766 return 0;
767#endif
768}
769
770static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
771{
772#ifdef CONFIG_NETDEVICES_MULTIQUEUE
773#ifdef CONFIG_NETPOLL_TRAP
774 if (netpoll_trap())
775 return;
776#endif
777 if (test_and_clear_bit(__LINK_STATE_XOFF,
778 &dev->egress_subqueue[queue_index].state))
779 __netif_schedule(dev);
780#endif
781}
782
783static inline int netif_is_multiqueue(const struct net_device *dev)
784{
785#ifdef CONFIG_NETDEVICES_MULTIQUEUE
786 return (!!(NETIF_F_MULTI_QUEUE & dev->features));
787#else
788 return 0;
789#endif
790}
705 791
706/* Use this variant when it is known for sure that it 792/* Use this variant when it is known for sure that it
707 * is executing from interrupt context. 793 * is executing from interrupt context.
@@ -930,6 +1016,14 @@ static inline void netif_rx_complete(struct net_device *dev)
930{ 1016{
931 unsigned long flags; 1017 unsigned long flags;
932 1018
1019#ifdef CONFIG_NETPOLL
1020 /* Prevent race with netpoll - yes, this is a kludge.
1021 * But at least it doesn't penalize the non-netpoll
1022 * code path. */
1023 if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state))
1024 return;
1025#endif
1026
933 local_irq_save(flags); 1027 local_irq_save(flags);
934 __netif_rx_complete(dev); 1028 __netif_rx_complete(dev);
935 local_irq_restore(flags); 1029 local_irq_restore(flags);
@@ -992,15 +1086,24 @@ static inline void netif_tx_disable(struct net_device *dev)
992extern void ether_setup(struct net_device *dev); 1086extern void ether_setup(struct net_device *dev);
993 1087
994/* Support for loadable net-drivers */ 1088/* Support for loadable net-drivers */
995extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, 1089extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
996 void (*setup)(struct net_device *)); 1090 void (*setup)(struct net_device *),
1091 unsigned int queue_count);
1092#define alloc_netdev(sizeof_priv, name, setup) \
1093 alloc_netdev_mq(sizeof_priv, name, setup, 1)
997extern int register_netdev(struct net_device *dev); 1094extern int register_netdev(struct net_device *dev);
998extern void unregister_netdev(struct net_device *dev); 1095extern void unregister_netdev(struct net_device *dev);
999/* Functions used for multicast support */ 1096/* Functions used for secondary unicast and multicast support */
1000extern void dev_mc_upload(struct net_device *dev); 1097extern void dev_set_rx_mode(struct net_device *dev);
1098extern void __dev_set_rx_mode(struct net_device *dev);
1099extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
1100extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
1001extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); 1101extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1002extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); 1102extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1003extern void dev_mc_discard(struct net_device *dev); 1103extern void dev_mc_discard(struct net_device *dev);
1104extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1105extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
1106extern void __dev_addr_discard(struct dev_addr_list **list);
1004extern void dev_set_promiscuity(struct net_device *dev, int inc); 1107extern void dev_set_promiscuity(struct net_device *dev, int inc);
1005extern void dev_set_allmulti(struct net_device *dev, int inc); 1108extern void dev_set_allmulti(struct net_device *dev, int inc);
1006extern void netdev_state_change(struct net_device *dev); 1109extern void netdev_state_change(struct net_device *dev);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 10b5c6275706..0eed0b7ab2df 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -275,7 +275,8 @@ struct nf_queue_handler {
275}; 275};
276extern int nf_register_queue_handler(int pf, 276extern int nf_register_queue_handler(int pf,
277 struct nf_queue_handler *qh); 277 struct nf_queue_handler *qh);
278extern int nf_unregister_queue_handler(int pf); 278extern int nf_unregister_queue_handler(int pf,
279 struct nf_queue_handler *qh);
279extern void nf_unregister_queue_handlers(struct nf_queue_handler *qh); 280extern void nf_unregister_queue_handlers(struct nf_queue_handler *qh);
280extern void nf_reinject(struct sk_buff *skb, 281extern void nf_reinject(struct sk_buff *skb,
281 struct nf_info *info, 282 struct nf_info *info,
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index 9d8144a488cd..c93061f33144 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -4,6 +4,8 @@
4 4
5#include <linux/netfilter/nf_conntrack_common.h> 5#include <linux/netfilter/nf_conntrack_common.h>
6 6
7extern const char *pptp_msg_name[];
8
7/* state of the control session */ 9/* state of the control session */
8enum pptp_ctrlsess_state { 10enum pptp_ctrlsess_state {
9 PPTP_SESSION_NONE, /* no session present */ 11 PPTP_SESSION_NONE, /* no session present */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 7e733a6ba4f6..64f425a855bb 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -141,22 +141,22 @@ struct xt_match
141 /* Arguments changed since 2.6.9, as this must now handle 141 /* Arguments changed since 2.6.9, as this must now handle
142 non-linear skb, using skb_header_pointer and 142 non-linear skb, using skb_header_pointer and
143 skb_ip_make_writable. */ 143 skb_ip_make_writable. */
144 int (*match)(const struct sk_buff *skb, 144 bool (*match)(const struct sk_buff *skb,
145 const struct net_device *in, 145 const struct net_device *in,
146 const struct net_device *out, 146 const struct net_device *out,
147 const struct xt_match *match, 147 const struct xt_match *match,
148 const void *matchinfo, 148 const void *matchinfo,
149 int offset, 149 int offset,
150 unsigned int protoff, 150 unsigned int protoff,
151 int *hotdrop); 151 bool *hotdrop);
152 152
153 /* Called when user tries to insert an entry of this type. */ 153 /* Called when user tries to insert an entry of this type. */
154 /* Should return true or false. */ 154 /* Should return true or false. */
155 int (*checkentry)(const char *tablename, 155 bool (*checkentry)(const char *tablename,
156 const void *ip, 156 const void *ip,
157 const struct xt_match *match, 157 const struct xt_match *match,
158 void *matchinfo, 158 void *matchinfo,
159 unsigned int hook_mask); 159 unsigned int hook_mask);
160 160
161 /* Called when entry of this type deleted. */ 161 /* Called when entry of this type deleted. */
162 void (*destroy)(const struct xt_match *match, void *matchinfo); 162 void (*destroy)(const struct xt_match *match, void *matchinfo);
@@ -202,11 +202,11 @@ struct xt_target
202 hook_mask is a bitmask of hooks from which it can be 202 hook_mask is a bitmask of hooks from which it can be
203 called. */ 203 called. */
204 /* Should return true or false. */ 204 /* Should return true or false. */
205 int (*checkentry)(const char *tablename, 205 bool (*checkentry)(const char *tablename,
206 const void *entry, 206 const void *entry,
207 const struct xt_target *target, 207 const struct xt_target *target,
208 void *targinfo, 208 void *targinfo,
209 unsigned int hook_mask); 209 unsigned int hook_mask);
210 210
211 /* Called when entry of this type deleted. */ 211 /* Called when entry of this type deleted. */
212 void (*destroy)(const struct xt_target *target, void *targinfo); 212 void (*destroy)(const struct xt_target *target, void *targinfo);
diff --git a/include/linux/netfilter/xt_u32.h b/include/linux/netfilter/xt_u32.h
new file mode 100644
index 000000000000..9947f56cdbdd
--- /dev/null
+++ b/include/linux/netfilter/xt_u32.h
@@ -0,0 +1,40 @@
1#ifndef _XT_U32_H
2#define _XT_U32_H 1
3
4enum xt_u32_ops {
5 XT_U32_AND,
6 XT_U32_LEFTSH,
7 XT_U32_RIGHTSH,
8 XT_U32_AT,
9};
10
11struct xt_u32_location_element {
12 u_int32_t number;
13 u_int8_t nextop;
14};
15
16struct xt_u32_value_element {
17 u_int32_t min;
18 u_int32_t max;
19};
20
21/*
22 * Any way to allow for an arbitrary number of elements?
23 * For now, I settle with a limit of 10 each.
24 */
25#define XT_U32_MAXSIZE 10
26
27struct xt_u32_test {
28 struct xt_u32_location_element location[XT_U32_MAXSIZE+1];
29 struct xt_u32_value_element value[XT_U32_MAXSIZE+1];
30 u_int8_t nnums;
31 u_int8_t nvalues;
32};
33
34struct xt_u32 {
35 struct xt_u32_test tests[XT_U32_MAXSIZE+1];
36 u_int8_t ntests;
37 u_int8_t invert;
38};
39
40#endif /* _XT_U32_H */
diff --git a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
index d9bceedfb3dc..daf50be22c9d 100644
--- a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
+++ b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
@@ -18,13 +18,13 @@ struct clusterip_config;
18struct ipt_clusterip_tgt_info { 18struct ipt_clusterip_tgt_info {
19 19
20 u_int32_t flags; 20 u_int32_t flags;
21 21
22 /* only relevant for new ones */ 22 /* only relevant for new ones */
23 u_int8_t clustermac[6]; 23 u_int8_t clustermac[6];
24 u_int16_t num_total_nodes; 24 u_int16_t num_total_nodes;
25 u_int16_t num_local_nodes; 25 u_int16_t num_local_nodes;
26 u_int16_t local_nodes[CLUSTERIP_MAX_NODES]; 26 u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
27 enum clusterip_hashmode hash_mode; 27 u_int32_t hash_mode;
28 u_int32_t hash_initval; 28 u_int32_t hash_initval;
29 29
30 struct clusterip_config *config; 30 struct clusterip_config *config;
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 4686f8342cbd..9a720f05888f 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -44,8 +44,14 @@ struct ip6t_ip6 {
44 char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 44 char iniface[IFNAMSIZ], outiface[IFNAMSIZ];
45 unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 45 unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ];
46 46
47 /* ARGH, HopByHop uses 0, so can't do 0 = ANY, 47 /* Upper protocol number
48 instead IP6T_F_NOPROTO must be set */ 48 * - The allowed value is 0 (any) or protocol number of last parsable
49 * header, which is 50 (ESP), 59 (No Next Header), 135 (MH), or
50 * the non IPv6 extension headers.
51 * - The protocol numbers of IPv6 extension headers except of ESP and
52 * MH do not match any packets.
53 * - You also need to set IP6T_FLAGS_PROTO to "flags" to check protocol.
54 */
49 u_int16_t proto; 55 u_int16_t proto;
50 /* TOS to match iff flags & IP6T_F_TOS */ 56 /* TOS to match iff flags & IP6T_F_TOS */
51 u_int8_t tos; 57 u_int8_t tos;
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index c3f01b3085a4..30b8571e6b34 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -403,16 +403,13 @@ enum
403 * 1..32767 Reserved for ematches inside kernel tree 403 * 1..32767 Reserved for ematches inside kernel tree
404 * 32768..65535 Free to use, not reliable 404 * 32768..65535 Free to use, not reliable
405 */ 405 */
406enum 406#define TCF_EM_CONTAINER 0
407{ 407#define TCF_EM_CMP 1
408 TCF_EM_CONTAINER, 408#define TCF_EM_NBYTE 2
409 TCF_EM_CMP, 409#define TCF_EM_U32 3
410 TCF_EM_NBYTE, 410#define TCF_EM_META 4
411 TCF_EM_U32, 411#define TCF_EM_TEXT 5
412 TCF_EM_META, 412#define TCF_EM_MAX 5
413 TCF_EM_TEXT,
414 __TCF_EM_MAX
415};
416 413
417enum 414enum
418{ 415{
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index d10f35338507..268c51599eb8 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -101,6 +101,15 @@ struct tc_prio_qopt
101 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ 101 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
102}; 102};
103 103
104enum
105{
106 TCA_PRIO_UNSPEC,
107 TCA_PRIO_MQ,
108 __TCA_PRIO_MAX
109};
110
111#define TCA_PRIO_MAX (__TCA_PRIO_MAX - 1)
112
104/* TBF section */ 113/* TBF section */
105 114
106struct tc_tbf_qopt 115struct tc_tbf_qopt
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 1fae30af91f3..c91476ce314a 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -261,7 +261,7 @@ enum rtattr_type_t
261 RTA_FLOW, 261 RTA_FLOW,
262 RTA_CACHEINFO, 262 RTA_CACHEINFO,
263 RTA_SESSION, 263 RTA_SESSION,
264 RTA_MP_ALGO, 264 RTA_MP_ALGO, /* no longer used */
265 RTA_TABLE, 265 RTA_TABLE,
266 __RTA_MAX 266 __RTA_MAX
267}; 267};
@@ -570,10 +570,16 @@ static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
570} 570}
571 571
572extern int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len); 572extern int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len);
573extern int __rtattr_parse_nested_compat(struct rtattr *tb[], int maxattr,
574 struct rtattr *rta, int len);
573 575
574#define rtattr_parse_nested(tb, max, rta) \ 576#define rtattr_parse_nested(tb, max, rta) \
575 rtattr_parse((tb), (max), RTA_DATA((rta)), RTA_PAYLOAD((rta))) 577 rtattr_parse((tb), (max), RTA_DATA((rta)), RTA_PAYLOAD((rta)))
576 578
579#define rtattr_parse_nested_compat(tb, max, rta, data, len) \
580({ data = RTA_PAYLOAD(rta) >= len ? RTA_DATA(rta) : NULL; \
581 __rtattr_parse_nested_compat(tb, max, rta, len); })
582
577extern int rtnetlink_send(struct sk_buff *skb, u32 pid, u32 group, int echo); 583extern int rtnetlink_send(struct sk_buff *skb, u32 pid, u32 group, int echo);
578extern int rtnl_unicast(struct sk_buff *skb, u32 pid); 584extern int rtnl_unicast(struct sk_buff *skb, u32 pid);
579extern int rtnl_notify(struct sk_buff *skb, u32 pid, u32 group, 585extern int rtnl_notify(struct sk_buff *skb, u32 pid, u32 group,
@@ -638,6 +644,18 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
638({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ 644({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
639 (skb)->len; }) 645 (skb)->len; })
640 646
647#define RTA_NEST_COMPAT(skb, type, attrlen, data) \
648({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
649 RTA_PUT(skb, type, attrlen, data); \
650 RTA_NEST(skb, type); \
651 __start; })
652
653#define RTA_NEST_COMPAT_END(skb, start) \
654({ struct rtattr *__nest = (void *)(start) + NLMSG_ALIGN((start)->rta_len); \
655 (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
656 RTA_NEST_END(skb, __nest); \
657 (skb)->len; })
658
641#define RTA_NEST_CANCEL(skb, start) \ 659#define RTA_NEST_CANCEL(skb, start) \
642({ if (start) \ 660({ if (start) \
643 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ 661 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6f0b2f7d0010..9391e4a4c344 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -65,13 +65,20 @@
65 * is able to produce some skb->csum, it MUST use COMPLETE, 65 * is able to produce some skb->csum, it MUST use COMPLETE,
66 * not UNNECESSARY. 66 * not UNNECESSARY.
67 * 67 *
68 * PARTIAL: identical to the case for output below. This may occur
69 * on a packet received directly from another Linux OS, e.g.,
70 * a virtualised Linux kernel on the same host. The packet can
71 * be treated in the same way as UNNECESSARY except that on
72 * output (i.e., forwarding) the checksum must be filled in
73 * by the OS or the hardware.
74 *
68 * B. Checksumming on output. 75 * B. Checksumming on output.
69 * 76 *
70 * NONE: skb is checksummed by protocol or csum is not required. 77 * NONE: skb is checksummed by protocol or csum is not required.
71 * 78 *
72 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 79 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
73 * from skb->transport_header to the end and to record the checksum 80 * from skb->csum_start to the end and to record the checksum
74 * at skb->transport_header + skb->csum. 81 * at skb->csum_start + skb->csum_offset.
75 * 82 *
76 * Device must show its capabilities in dev->features, set 83 * Device must show its capabilities in dev->features, set
77 * at device setup time. 84 * at device setup time.
@@ -82,6 +89,7 @@
82 * TCP/UDP over IPv4. Sigh. Vendors like this 89 * TCP/UDP over IPv4. Sigh. Vendors like this
83 * way by an unknown reason. Though, see comment above 90 * way by an unknown reason. Though, see comment above
84 * about CHECKSUM_UNNECESSARY. 8) 91 * about CHECKSUM_UNNECESSARY. 8)
92 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
85 * 93 *
86 * Any questions? No questions, good. --ANK 94 * Any questions? No questions, good. --ANK
87 */ 95 */
@@ -147,8 +155,8 @@ struct skb_shared_info {
147 155
148/* We divide dataref into two halves. The higher 16 bits hold references 156/* We divide dataref into two halves. The higher 16 bits hold references
149 * to the payload part of skb->data. The lower 16 bits hold references to 157 * to the payload part of skb->data. The lower 16 bits hold references to
150 * the entire skb->data. It is up to the users of the skb to agree on 158 * the entire skb->data. A clone of a headerless skb holds the length of
151 * where the payload starts. 159 * the header in skb->hdr_len.
152 * 160 *
153 * All users must obey the rule that the skb->data reference count must be 161 * All users must obey the rule that the skb->data reference count must be
154 * greater than or equal to the payload reference count. 162 * greater than or equal to the payload reference count.
@@ -196,7 +204,6 @@ typedef unsigned char *sk_buff_data_t;
196 * @sk: Socket we are owned by 204 * @sk: Socket we are owned by
197 * @tstamp: Time we arrived 205 * @tstamp: Time we arrived
198 * @dev: Device we arrived on/are leaving by 206 * @dev: Device we arrived on/are leaving by
199 * @iif: ifindex of device we arrived on
200 * @transport_header: Transport layer header 207 * @transport_header: Transport layer header
201 * @network_header: Network layer header 208 * @network_header: Network layer header
202 * @mac_header: Link layer header 209 * @mac_header: Link layer header
@@ -206,6 +213,7 @@ typedef unsigned char *sk_buff_data_t;
206 * @len: Length of actual data 213 * @len: Length of actual data
207 * @data_len: Data length 214 * @data_len: Data length
208 * @mac_len: Length of link layer header 215 * @mac_len: Length of link layer header
216 * @hdr_len: writable header length of cloned skb
209 * @csum: Checksum (must include start/offset pair) 217 * @csum: Checksum (must include start/offset pair)
210 * @csum_start: Offset from skb->head where checksumming should start 218 * @csum_start: Offset from skb->head where checksumming should start
211 * @csum_offset: Offset from csum_start where checksum should be stored 219 * @csum_offset: Offset from csum_start where checksum should be stored
@@ -227,9 +235,12 @@ typedef unsigned char *sk_buff_data_t;
227 * @mark: Generic packet mark 235 * @mark: Generic packet mark
228 * @nfct: Associated connection, if any 236 * @nfct: Associated connection, if any
229 * @ipvs_property: skbuff is owned by ipvs 237 * @ipvs_property: skbuff is owned by ipvs
238 * @nf_trace: netfilter packet trace flag
230 * @nfctinfo: Relationship of this skb to the connection 239 * @nfctinfo: Relationship of this skb to the connection
231 * @nfct_reasm: netfilter conntrack re-assembly pointer 240 * @nfct_reasm: netfilter conntrack re-assembly pointer
232 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 241 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
242 * @iif: ifindex of device we arrived on
243 * @queue_mapping: Queue mapping for multiqueue devices
233 * @tc_index: Traffic control index 244 * @tc_index: Traffic control index
234 * @tc_verd: traffic control verdict 245 * @tc_verd: traffic control verdict
235 * @dma_cookie: a cookie to one of several possible DMA operations 246 * @dma_cookie: a cookie to one of several possible DMA operations
@@ -245,8 +256,6 @@ struct sk_buff {
245 struct sock *sk; 256 struct sock *sk;
246 ktime_t tstamp; 257 ktime_t tstamp;
247 struct net_device *dev; 258 struct net_device *dev;
248 int iif;
249 /* 4 byte hole on 64 bit*/
250 259
251 struct dst_entry *dst; 260 struct dst_entry *dst;
252 struct sec_path *sp; 261 struct sec_path *sp;
@@ -260,8 +269,9 @@ struct sk_buff {
260 char cb[48]; 269 char cb[48];
261 270
262 unsigned int len, 271 unsigned int len,
263 data_len, 272 data_len;
264 mac_len; 273 __u16 mac_len,
274 hdr_len;
265 union { 275 union {
266 __wsum csum; 276 __wsum csum;
267 struct { 277 struct {
@@ -277,7 +287,8 @@ struct sk_buff {
277 nfctinfo:3; 287 nfctinfo:3;
278 __u8 pkt_type:3, 288 __u8 pkt_type:3,
279 fclone:2, 289 fclone:2,
280 ipvs_property:1; 290 ipvs_property:1,
291 nf_trace:1;
281 __be16 protocol; 292 __be16 protocol;
282 293
283 void (*destructor)(struct sk_buff *skb); 294 void (*destructor)(struct sk_buff *skb);
@@ -288,12 +299,18 @@ struct sk_buff {
288#ifdef CONFIG_BRIDGE_NETFILTER 299#ifdef CONFIG_BRIDGE_NETFILTER
289 struct nf_bridge_info *nf_bridge; 300 struct nf_bridge_info *nf_bridge;
290#endif 301#endif
302
303 int iif;
304 __u16 queue_mapping;
305
291#ifdef CONFIG_NET_SCHED 306#ifdef CONFIG_NET_SCHED
292 __u16 tc_index; /* traffic control index */ 307 __u16 tc_index; /* traffic control index */
293#ifdef CONFIG_NET_CLS_ACT 308#ifdef CONFIG_NET_CLS_ACT
294 __u16 tc_verd; /* traffic control verdict */ 309 __u16 tc_verd; /* traffic control verdict */
295#endif 310#endif
296#endif 311#endif
312 /* 2 byte hole */
313
297#ifdef CONFIG_NET_DMA 314#ifdef CONFIG_NET_DMA
298 dma_cookie_t dma_cookie; 315 dma_cookie_t dma_cookie;
299#endif 316#endif
@@ -1322,6 +1339,20 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1322} 1339}
1323 1340
1324/** 1341/**
1342 * skb_clone_writable - is the header of a clone writable
1343 * @skb: buffer to check
1344 * @len: length up to which to write
1345 *
1346 * Returns true if modifying the header part of the cloned buffer
1347 * does not requires the data to be copied.
1348 */
1349static inline int skb_clone_writable(struct sk_buff *skb, int len)
1350{
1351 return !skb_header_cloned(skb) &&
1352 skb_headroom(skb) + len <= skb->hdr_len;
1353}
1354
1355/**
1325 * skb_cow - copy header of skb when it is required 1356 * skb_cow - copy header of skb when it is required
1326 * @skb: buffer to cow 1357 * @skb: buffer to cow
1327 * @headroom: needed headroom 1358 * @headroom: needed headroom
@@ -1709,6 +1740,20 @@ static inline void skb_init_secmark(struct sk_buff *skb)
1709{ } 1740{ }
1710#endif 1741#endif
1711 1742
1743static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1744{
1745#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1746 skb->queue_mapping = queue_mapping;
1747#endif
1748}
1749
1750static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1751{
1752#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1753 to->queue_mapping = from->queue_mapping;
1754#endif
1755}
1756
1712static inline int skb_is_gso(const struct sk_buff *skb) 1757static inline int skb_is_gso(const struct sk_buff *skb)
1713{ 1758{
1714 return skb_shinfo(skb)->gso_size; 1759 return skb_shinfo(skb)->gso_size;
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6e7c9483a6a6..fe195c97a89d 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -287,6 +287,7 @@ struct ucred {
287#define SOL_NETLINK 270 287#define SOL_NETLINK 270
288#define SOL_TIPC 271 288#define SOL_TIPC 271
289#define SOL_RXRPC 272 289#define SOL_RXRPC 272
290#define SOL_PPPOL2TP 273
290 291
291/* IPX options */ 292/* IPX options */
292#define IPX_TYPE 1 293#define IPX_TYPE 1
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 6de445c31a64..8ec703f462da 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -42,6 +42,7 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
42/* UDP encapsulation types */ 42/* UDP encapsulation types */
43#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */ 43#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
44#define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */ 44#define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */
45#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
45 46
46#ifdef __KERNEL__ 47#ifdef __KERNEL__
47#include <linux/types.h> 48#include <linux/types.h>
@@ -70,6 +71,11 @@ struct udp_sock {
70#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */ 71#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
71#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */ 72#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
72 __u8 pcflag; /* marks socket as UDP-Lite if > 0 */ 73 __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
74 __u8 unused[3];
75 /*
76 * For encapsulation sockets.
77 */
78 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
73}; 79};
74 80
75static inline struct udp_sock *udp_sk(const struct sock *sk) 81static inline struct udp_sock *udp_sk(const struct sock *sk)
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 8b06c2f3657f..2f0273feabd3 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -19,7 +19,6 @@ struct tcf_common {
19 struct gnet_stats_basic tcfc_bstats; 19 struct gnet_stats_basic tcfc_bstats;
20 struct gnet_stats_queue tcfc_qstats; 20 struct gnet_stats_queue tcfc_qstats;
21 struct gnet_stats_rate_est tcfc_rate_est; 21 struct gnet_stats_rate_est tcfc_rate_est;
22 spinlock_t *tcfc_stats_lock;
23 spinlock_t tcfc_lock; 22 spinlock_t tcfc_lock;
24}; 23};
25#define tcf_next common.tcfc_next 24#define tcf_next common.tcfc_next
@@ -32,7 +31,6 @@ struct tcf_common {
32#define tcf_bstats common.tcfc_bstats 31#define tcf_bstats common.tcfc_bstats
33#define tcf_qstats common.tcfc_qstats 32#define tcf_qstats common.tcfc_qstats
34#define tcf_rate_est common.tcfc_rate_est 33#define tcf_rate_est common.tcfc_rate_est
35#define tcf_stats_lock common.tcfc_stats_lock
36#define tcf_lock common.tcfc_lock 34#define tcf_lock common.tcfc_lock
37 35
38struct tcf_police { 36struct tcf_police {
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f3531d0bcd05..33b593e17441 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -61,7 +61,7 @@ extern int addrconf_set_dstaddr(void __user *arg);
61extern int ipv6_chk_addr(struct in6_addr *addr, 61extern int ipv6_chk_addr(struct in6_addr *addr,
62 struct net_device *dev, 62 struct net_device *dev,
63 int strict); 63 int strict);
64#ifdef CONFIG_IPV6_MIP6 64#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
65extern int ipv6_chk_home_addr(struct in6_addr *addr); 65extern int ipv6_chk_home_addr(struct in6_addr *addr);
66#endif 66#endif
67extern struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr, 67extern struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 65f49fd7deff..6de1e9e35c73 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -79,9 +79,10 @@ struct unix_sock {
79 struct mutex readlock; 79 struct mutex readlock;
80 struct sock *peer; 80 struct sock *peer;
81 struct sock *other; 81 struct sock *other;
82 struct sock *gc_tree; 82 struct list_head link;
83 atomic_t inflight; 83 atomic_t inflight;
84 spinlock_t lock; 84 spinlock_t lock;
85 unsigned int gc_candidate : 1;
85 wait_queue_head_t peer_wait; 86 wait_queue_head_t peer_wait;
86}; 87};
87#define unix_sk(__sk) ((struct unix_sock *)__sk) 88#define unix_sk(__sk) ((struct unix_sock *)__sk)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 93ce272a5d27..ebfb96b41106 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -107,14 +107,14 @@ enum {
107#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ 107#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
108#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ 108#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
109 109
110/* HCI Packet types */ 110/* HCI data types */
111#define HCI_COMMAND_PKT 0x01 111#define HCI_COMMAND_PKT 0x01
112#define HCI_ACLDATA_PKT 0x02 112#define HCI_ACLDATA_PKT 0x02
113#define HCI_SCODATA_PKT 0x03 113#define HCI_SCODATA_PKT 0x03
114#define HCI_EVENT_PKT 0x04 114#define HCI_EVENT_PKT 0x04
115#define HCI_VENDOR_PKT 0xff 115#define HCI_VENDOR_PKT 0xff
116 116
117/* HCI Packet types */ 117/* HCI packet types */
118#define HCI_DM1 0x0008 118#define HCI_DM1 0x0008
119#define HCI_DM3 0x0400 119#define HCI_DM3 0x0400
120#define HCI_DM5 0x4000 120#define HCI_DM5 0x4000
@@ -129,6 +129,14 @@ enum {
129#define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3) 129#define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3)
130#define ACL_PTYPE_MASK (~SCO_PTYPE_MASK) 130#define ACL_PTYPE_MASK (~SCO_PTYPE_MASK)
131 131
132/* eSCO packet types */
133#define ESCO_HV1 0x0001
134#define ESCO_HV2 0x0002
135#define ESCO_HV3 0x0004
136#define ESCO_EV3 0x0008
137#define ESCO_EV4 0x0010
138#define ESCO_EV5 0x0020
139
132/* ACL flags */ 140/* ACL flags */
133#define ACL_CONT 0x01 141#define ACL_CONT 0x01
134#define ACL_START 0x02 142#define ACL_START 0x02
@@ -138,6 +146,7 @@ enum {
138/* Baseband links */ 146/* Baseband links */
139#define SCO_LINK 0x00 147#define SCO_LINK 0x00
140#define ACL_LINK 0x01 148#define ACL_LINK 0x01
149#define ESCO_LINK 0x02
141 150
142/* LMP features */ 151/* LMP features */
143#define LMP_3SLOT 0x01 152#define LMP_3SLOT 0x01
@@ -162,6 +171,11 @@ enum {
162#define LMP_PSCHEME 0x02 171#define LMP_PSCHEME 0x02
163#define LMP_PCONTROL 0x04 172#define LMP_PCONTROL 0x04
164 173
174#define LMP_ESCO 0x80
175
176#define LMP_EV4 0x01
177#define LMP_EV5 0x02
178
165#define LMP_SNIFF_SUBR 0x02 179#define LMP_SNIFF_SUBR 0x02
166 180
167/* Connection modes */ 181/* Connection modes */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index c0fc39620f36..8f67c8a7169b 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -78,6 +78,7 @@ struct hci_dev {
78 __u16 voice_setting; 78 __u16 voice_setting;
79 79
80 __u16 pkt_type; 80 __u16 pkt_type;
81 __u16 esco_type;
81 __u16 link_policy; 82 __u16 link_policy;
82 __u16 link_mode; 83 __u16 link_mode;
83 84
@@ -109,6 +110,7 @@ struct hci_dev {
109 struct sk_buff_head cmd_q; 110 struct sk_buff_head cmd_q;
110 111
111 struct sk_buff *sent_cmd; 112 struct sk_buff *sent_cmd;
113 struct sk_buff *reassembly[3];
112 114
113 struct semaphore req_lock; 115 struct semaphore req_lock;
114 wait_queue_head_t req_wait_q; 116 wait_queue_head_t req_wait_q;
@@ -437,6 +439,8 @@ static inline int hci_recv_frame(struct sk_buff *skb)
437 return 0; 439 return 0;
438} 440}
439 441
442int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
443
440int hci_register_sysfs(struct hci_dev *hdev); 444int hci_register_sysfs(struct hci_dev *hdev);
441void hci_unregister_sysfs(struct hci_dev *hdev); 445void hci_unregister_sysfs(struct hci_dev *hdev);
442void hci_conn_add_sysfs(struct hci_conn *conn); 446void hci_conn_add_sysfs(struct hci_conn *conn);
@@ -449,6 +453,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
449#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT) 453#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
450#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF) 454#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
451#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR) 455#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
456#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
452 457
453/* ----- HCI protocols ----- */ 458/* ----- HCI protocols ----- */
454struct hci_proto { 459struct hci_proto {
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 3c563f02907c..25aa575db807 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -323,6 +323,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc
323#define RFCOMM_RELEASE_ONHUP 1 323#define RFCOMM_RELEASE_ONHUP 1
324#define RFCOMM_HANGUP_NOW 2 324#define RFCOMM_HANGUP_NOW 2
325#define RFCOMM_TTY_ATTACHED 3 325#define RFCOMM_TTY_ATTACHED 3
326#define RFCOMM_TTY_RELEASED 4
326 327
327struct rfcomm_dev_req { 328struct rfcomm_dev_req {
328 s16 dev_id; 329 s16 dev_id;
diff --git a/include/net/dn.h b/include/net/dn.h
index ac4ce9091747..627778384c84 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/dn.h> 4#include <linux/dn.h>
5#include <net/sock.h> 5#include <net/sock.h>
6#include <net/tcp.h>
7#include <asm/byteorder.h> 6#include <asm/byteorder.h>
8 7
9#define dn_ntohs(x) le16_to_cpu(x) 8#define dn_ntohs(x) le16_to_cpu(x)
diff --git a/include/net/dst.h b/include/net/dst.h
index 82270f9332db..e9ff4a4caef9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -47,7 +47,6 @@ struct dst_entry
47#define DST_NOXFRM 2 47#define DST_NOXFRM 2
48#define DST_NOPOLICY 4 48#define DST_NOPOLICY 4
49#define DST_NOHASH 8 49#define DST_NOHASH 8
50#define DST_BALANCED 0x10
51 unsigned long expires; 50 unsigned long expires;
52 51
53 unsigned short header_len; /* more space at head required */ 52 unsigned short header_len; /* more space at head required */
diff --git a/include/net/flow.h b/include/net/flow.h
index f3cc1f812619..af59fa5cc1f8 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -67,20 +67,16 @@ struct flowi {
67 67
68 __be32 spi; 68 __be32 spi;
69 69
70#ifdef CONFIG_IPV6_MIP6
71 struct { 70 struct {
72 __u8 type; 71 __u8 type;
73 } mht; 72 } mht;
74#endif
75 } uli_u; 73 } uli_u;
76#define fl_ip_sport uli_u.ports.sport 74#define fl_ip_sport uli_u.ports.sport
77#define fl_ip_dport uli_u.ports.dport 75#define fl_ip_dport uli_u.ports.dport
78#define fl_icmp_type uli_u.icmpt.type 76#define fl_icmp_type uli_u.icmpt.type
79#define fl_icmp_code uli_u.icmpt.code 77#define fl_icmp_code uli_u.icmpt.code
80#define fl_ipsec_spi uli_u.spi 78#define fl_ipsec_spi uli_u.spi
81#ifdef CONFIG_IPV6_MIP6
82#define fl_mh_type uli_u.mht.type 79#define fl_mh_type uli_u.mht.type
83#endif
84 __u32 secid; /* used by xfrm; see secid.txt */ 80 __u32 secid; /* used by xfrm; see secid.txt */
85} __attribute__((__aligned__(BITS_PER_LONG/8))); 81} __attribute__((__aligned__(BITS_PER_LONG/8)));
86 82
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 69252cbe05b0..8cadc77c7df4 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -39,7 +39,6 @@ struct fib_config {
39 int fc_mx_len; 39 int fc_mx_len;
40 int fc_mp_len; 40 int fc_mp_len;
41 u32 fc_flow; 41 u32 fc_flow;
42 u32 fc_mp_alg;
43 u32 fc_nlflags; 42 u32 fc_nlflags;
44 struct nl_info fc_nlinfo; 43 struct nl_info fc_nlinfo;
45 }; 44 };
@@ -86,9 +85,6 @@ struct fib_info {
86#ifdef CONFIG_IP_ROUTE_MULTIPATH 85#ifdef CONFIG_IP_ROUTE_MULTIPATH
87 int fib_power; 86 int fib_power;
88#endif 87#endif
89#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
90 u32 fib_mp_alg;
91#endif
92 struct fib_nh fib_nh[0]; 88 struct fib_nh fib_nh[0];
93#define fib_dev fib_nh[0].nh_dev 89#define fib_dev fib_nh[0].nh_dev
94}; 90};
@@ -103,10 +99,6 @@ struct fib_result {
103 unsigned char nh_sel; 99 unsigned char nh_sel;
104 unsigned char type; 100 unsigned char type;
105 unsigned char scope; 101 unsigned char scope;
106#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
107 __be32 network;
108 __be32 netmask;
109#endif
110 struct fib_info *fi; 102 struct fib_info *fi;
111#ifdef CONFIG_IP_MULTIPLE_TABLES 103#ifdef CONFIG_IP_MULTIPLE_TABLES
112 struct fib_rule *r; 104 struct fib_rule *r;
@@ -145,14 +137,6 @@ struct fib_result_nl {
145#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev) 137#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev)
146#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif) 138#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif)
147 139
148#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
149#define FIB_RES_NETWORK(res) ((res).network)
150#define FIB_RES_NETMASK(res) ((res).netmask)
151#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
152#define FIB_RES_NETWORK(res) (0)
153#define FIB_RES_NETMASK(res) (0)
154#endif /* CONFIG_IP_ROUTE_MULTIPATH_WRANDOM */
155
156struct fib_table { 140struct fib_table {
157 struct hlist_node tb_hlist; 141 struct hlist_node tb_hlist;
158 u32 tb_id; 142 u32 tb_id;
diff --git a/include/net/ip_mp_alg.h b/include/net/ip_mp_alg.h
deleted file mode 100644
index 25b56571e54b..000000000000
--- a/include/net/ip_mp_alg.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/* ip_mp_alg.h: IPV4 multipath algorithm support.
2 *
3 * Copyright (C) 2004, 2005 Einar Lueck <elueck@de.ibm.com>
4 * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
5 */
6
7#ifndef _NET_IP_MP_ALG_H
8#define _NET_IP_MP_ALG_H
9
10#include <linux/ip_mp_alg.h>
11#include <net/flow.h>
12#include <net/route.h>
13
14struct fib_nh;
15
16struct ip_mp_alg_ops {
17 void (*mp_alg_select_route)(const struct flowi *flp,
18 struct rtable *rth, struct rtable **rp);
19 void (*mp_alg_flush)(void);
20 void (*mp_alg_set_nhinfo)(__be32 network, __be32 netmask,
21 unsigned char prefixlen,
22 const struct fib_nh *nh);
23 void (*mp_alg_remove)(struct rtable *rth);
24};
25
26extern int multipath_alg_register(struct ip_mp_alg_ops *, enum ip_mp_alg);
27extern void multipath_alg_unregister(struct ip_mp_alg_ops *, enum ip_mp_alg);
28
29extern struct ip_mp_alg_ops *ip_mp_alg_table[];
30
31static inline int multipath_select_route(const struct flowi *flp,
32 struct rtable *rth,
33 struct rtable **rp)
34{
35#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
36 struct ip_mp_alg_ops *ops = ip_mp_alg_table[rth->rt_multipath_alg];
37
38 /* mp_alg_select_route _MUST_ be implemented */
39 if (ops && (rth->u.dst.flags & DST_BALANCED)) {
40 ops->mp_alg_select_route(flp, rth, rp);
41 return 1;
42 }
43#endif
44 return 0;
45}
46
47static inline void multipath_flush(void)
48{
49#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
50 int i;
51
52 for (i = IP_MP_ALG_NONE; i <= IP_MP_ALG_MAX; i++) {
53 struct ip_mp_alg_ops *ops = ip_mp_alg_table[i];
54
55 if (ops && ops->mp_alg_flush)
56 ops->mp_alg_flush();
57 }
58#endif
59}
60
61static inline void multipath_set_nhinfo(struct rtable *rth,
62 __be32 network, __be32 netmask,
63 unsigned char prefixlen,
64 const struct fib_nh *nh)
65{
66#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
67 struct ip_mp_alg_ops *ops = ip_mp_alg_table[rth->rt_multipath_alg];
68
69 if (ops && ops->mp_alg_set_nhinfo)
70 ops->mp_alg_set_nhinfo(network, netmask, prefixlen, nh);
71#endif
72}
73
74static inline void multipath_remove(struct rtable *rth)
75{
76#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
77 struct ip_mp_alg_ops *ops = ip_mp_alg_table[rth->rt_multipath_alg];
78
79 if (ops && ops->mp_alg_remove &&
80 (rth->u.dst.flags & DST_BALANCED))
81 ops->mp_alg_remove(rth);
82#endif
83}
84
85static inline int multipath_comparekeys(const struct flowi *flp1,
86 const struct flowi *flp2)
87{
88 return flp1->fl4_dst == flp2->fl4_dst &&
89 flp1->fl4_src == flp2->fl4_src &&
90 flp1->oif == flp2->oif &&
91 flp1->mark == flp2->mark &&
92 !((flp1->fl4_tos ^ flp2->fl4_tos) &
93 (IPTOS_RT_MASK | RTO_ONLINK));
94}
95
96#endif /* _NET_IP_MP_ALG_H */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 78a0d06d98d5..46b9dce82f6e 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -512,10 +512,6 @@ extern int ipv6_ext_hdr(u8 nexthdr);
512 512
513extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); 513extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
514 514
515extern struct ipv6_txoptions * ipv6_invert_rthdr(struct sock *sk,
516 struct ipv6_rt_hdr *hdr);
517
518
519/* 515/*
520 * socket options (ipv6_sockglue.c) 516 * socket options (ipv6_sockglue.c)
521 */ 517 */
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
index 36bee441aa56..08387553b57e 100644
--- a/include/net/irda/irda.h
+++ b/include/net/irda/irda.h
@@ -125,6 +125,9 @@ extern void irda_sysctl_unregister(void);
125extern int irsock_init(void); 125extern int irsock_init(void);
126extern void irsock_cleanup(void); 126extern void irsock_cleanup(void);
127 127
128extern int irda_nl_register(void);
129extern void irda_nl_unregister(void);
130
128extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, 131extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
129 struct packet_type *ptype, 132 struct packet_type *ptype,
130 struct net_device *orig_dev); 133 struct net_device *orig_dev);
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h
index a3d370efb903..9d0c78ea92f5 100644
--- a/include/net/irda/irlap.h
+++ b/include/net/irda/irlap.h
@@ -208,6 +208,8 @@ struct irlap_cb {
208 int xbofs_delay; /* Nr of XBOF's used to MTT */ 208 int xbofs_delay; /* Nr of XBOF's used to MTT */
209 int bofs_count; /* Negotiated extra BOFs */ 209 int bofs_count; /* Negotiated extra BOFs */
210 int next_bofs; /* Negotiated extra BOFs after next frame */ 210 int next_bofs; /* Negotiated extra BOFs after next frame */
211
212 int mode; /* IrLAP mode (primary, secondary or monitor) */
211}; 213};
212 214
213/* 215/*
diff --git a/include/net/mip6.h b/include/net/mip6.h
index 68263c6d9996..63272610a24a 100644
--- a/include/net/mip6.h
+++ b/include/net/mip6.h
@@ -54,8 +54,4 @@ struct ip6_mh {
54#define IP6_MH_TYPE_BERROR 7 /* Binding Error */ 54#define IP6_MH_TYPE_BERROR 7 /* Binding Error */
55#define IP6_MH_TYPE_MAX IP6_MH_TYPE_BERROR 55#define IP6_MH_TYPE_MAX IP6_MH_TYPE_BERROR
56 56
57extern int mip6_init(void);
58extern void mip6_fini(void);
59extern int mip6_mh_filter(struct sock *sk, struct sk_buff *skb);
60
61#endif 57#endif
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 1401ccc051c4..3ed4e14970c5 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -9,29 +9,8 @@
9#ifndef _NF_CONNTRACK_IPV4_H 9#ifndef _NF_CONNTRACK_IPV4_H
10#define _NF_CONNTRACK_IPV4_H 10#define _NF_CONNTRACK_IPV4_H
11 11
12#ifdef CONFIG_NF_NAT_NEEDED
13#include <net/netfilter/nf_nat.h>
14#include <linux/netfilter/nf_conntrack_pptp.h>
15
16/* per conntrack: nat application helper private data */
17union nf_conntrack_nat_help {
18 /* insert nat helper private data here */
19 struct nf_nat_pptp nat_pptp_info;
20};
21
22struct nf_conn_nat {
23 struct nf_nat_info info;
24 union nf_conntrack_nat_help help;
25#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
26 defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
27 int masq_index;
28#endif
29};
30#endif /* CONFIG_NF_NAT_NEEDED */
31
32/* Returns new sk_buff, or NULL */ 12/* Returns new sk_buff, or NULL */
33struct sk_buff * 13struct sk_buff *nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb);
34nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb);
35 14
36extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; 15extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
37extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; 16extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 4732432f8eb0..d4f02eb0c66c 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -82,6 +82,8 @@ struct nf_conn_help {
82 82
83 union nf_conntrack_help help; 83 union nf_conntrack_help help;
84 84
85 struct hlist_head expectations;
86
85 /* Current number of expected connections */ 87 /* Current number of expected connections */
86 unsigned int expecting; 88 unsigned int expecting;
87}; 89};
@@ -117,9 +119,6 @@ struct nf_conn
117 /* Unique ID that identifies this conntrack*/ 119 /* Unique ID that identifies this conntrack*/
118 unsigned int id; 120 unsigned int id;
119 121
120 /* features - nat, helper, ... used by allocating system */
121 u_int32_t features;
122
123#if defined(CONFIG_NF_CONNTRACK_MARK) 122#if defined(CONFIG_NF_CONNTRACK_MARK)
124 u_int32_t mark; 123 u_int32_t mark;
125#endif 124#endif
@@ -131,8 +130,8 @@ struct nf_conn
131 /* Storage reserved for other modules: */ 130 /* Storage reserved for other modules: */
132 union nf_conntrack_proto proto; 131 union nf_conntrack_proto proto;
133 132
134 /* features dynamically at the end: helper, nat (both optional) */ 133 /* Extensions */
135 char data[0]; 134 struct nf_ct_ext *ext;
136}; 135};
137 136
138static inline struct nf_conn * 137static inline struct nf_conn *
@@ -175,6 +174,10 @@ static inline void nf_ct_put(struct nf_conn *ct)
175extern int nf_ct_l3proto_try_module_get(unsigned short l3proto); 174extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
176extern void nf_ct_l3proto_module_put(unsigned short l3proto); 175extern void nf_ct_l3proto_module_put(unsigned short l3proto);
177 176
177extern struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced);
178extern void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced,
179 int size);
180
178extern struct nf_conntrack_tuple_hash * 181extern struct nf_conntrack_tuple_hash *
179__nf_conntrack_find(const struct nf_conntrack_tuple *tuple, 182__nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
180 const struct nf_conn *ignored_conntrack); 183 const struct nf_conn *ignored_conntrack);
@@ -216,9 +219,6 @@ extern void nf_conntrack_tcp_update(struct sk_buff *skb,
216 struct nf_conn *conntrack, 219 struct nf_conn *conntrack,
217 int dir); 220 int dir);
218 221
219/* Call me when a conntrack is destroyed. */
220extern void (*nf_conntrack_destroyed)(struct nf_conn *conntrack);
221
222/* Fake conntrack entry for untracked connections */ 222/* Fake conntrack entry for untracked connections */
223extern struct nf_conn nf_conntrack_untracked; 223extern struct nf_conn nf_conntrack_untracked;
224 224
@@ -262,60 +262,10 @@ do { \
262 local_bh_enable(); \ 262 local_bh_enable(); \
263} while (0) 263} while (0)
264 264
265/* no helper, no nat */
266#define NF_CT_F_BASIC 0
267/* for helper */
268#define NF_CT_F_HELP 1
269/* for nat. */
270#define NF_CT_F_NAT 2
271#define NF_CT_F_NUM 4
272
273extern int 265extern int
274nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size); 266nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size);
275extern void 267extern void
276nf_conntrack_unregister_cache(u_int32_t features); 268nf_conntrack_unregister_cache(u_int32_t features);
277 269
278/* valid combinations:
279 * basic: nf_conn, nf_conn .. nf_conn_help
280 * nat: nf_conn .. nf_conn_nat, nf_conn .. nf_conn_nat .. nf_conn help
281 */
282#ifdef CONFIG_NF_NAT_NEEDED
283static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
284{
285 unsigned int offset = sizeof(struct nf_conn);
286
287 if (!(ct->features & NF_CT_F_NAT))
288 return NULL;
289
290 offset = ALIGN(offset, __alignof__(struct nf_conn_nat));
291 return (struct nf_conn_nat *) ((void *)ct + offset);
292}
293
294static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
295{
296 unsigned int offset = sizeof(struct nf_conn);
297
298 if (!(ct->features & NF_CT_F_HELP))
299 return NULL;
300 if (ct->features & NF_CT_F_NAT) {
301 offset = ALIGN(offset, __alignof__(struct nf_conn_nat));
302 offset += sizeof(struct nf_conn_nat);
303 }
304
305 offset = ALIGN(offset, __alignof__(struct nf_conn_help));
306 return (struct nf_conn_help *) ((void *)ct + offset);
307}
308#else /* No NAT */
309static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
310{
311 unsigned int offset = sizeof(struct nf_conn);
312
313 if (!(ct->features & NF_CT_F_HELP))
314 return NULL;
315
316 offset = ALIGN(offset, __alignof__(struct nf_conn_help));
317 return (struct nf_conn_help *) ((void *)ct + offset);
318}
319#endif /* CONFIG_NF_NAT_NEEDED */
320#endif /* __KERNEL__ */ 270#endif /* __KERNEL__ */
321#endif /* _NF_CONNTRACK_H */ 271#endif /* _NF_CONNTRACK_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 9fb906688ffa..4056f5f08da1 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -30,6 +30,9 @@ extern void nf_conntrack_cleanup(void);
30extern int nf_conntrack_proto_init(void); 30extern int nf_conntrack_proto_init(void);
31extern void nf_conntrack_proto_fini(void); 31extern void nf_conntrack_proto_fini(void);
32 32
33extern int nf_conntrack_helper_init(void);
34extern void nf_conntrack_helper_fini(void);
35
33struct nf_conntrack_l3proto; 36struct nf_conntrack_l3proto;
34extern struct nf_conntrack_l3proto *nf_ct_find_l3proto(u_int16_t pf); 37extern struct nf_conntrack_l3proto *nf_ct_find_l3proto(u_int16_t pf);
35/* Like above, but you already have conntrack read lock. */ 38/* Like above, but you already have conntrack read lock. */
@@ -55,8 +58,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
55 58
56/* Find a connection corresponding to a tuple. */ 59/* Find a connection corresponding to a tuple. */
57extern struct nf_conntrack_tuple_hash * 60extern struct nf_conntrack_tuple_hash *
58nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple, 61nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple);
59 const struct nf_conn *ignored_conntrack);
60 62
61extern int __nf_conntrack_confirm(struct sk_buff **pskb); 63extern int __nf_conntrack_confirm(struct sk_buff **pskb);
62 64
@@ -81,9 +83,8 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
81 struct nf_conntrack_l3proto *l3proto, 83 struct nf_conntrack_l3proto *l3proto,
82 struct nf_conntrack_l4proto *proto); 84 struct nf_conntrack_l4proto *proto);
83 85
84extern struct list_head *nf_conntrack_hash; 86extern struct hlist_head *nf_conntrack_hash;
85extern struct list_head nf_conntrack_expect_list;
86extern rwlock_t nf_conntrack_lock ; 87extern rwlock_t nf_conntrack_lock ;
87extern struct list_head unconfirmed; 88extern struct hlist_head unconfirmed;
88 89
89#endif /* _NF_CONNTRACK_CORE_H */ 90#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 811c9073c532..f0b9078235c9 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -49,15 +49,15 @@ static inline void nf_conntrack_event(enum ip_conntrack_events event,
49 atomic_notifier_call_chain(&nf_conntrack_chain, event, ct); 49 atomic_notifier_call_chain(&nf_conntrack_chain, event, ct);
50} 50}
51 51
52extern struct atomic_notifier_head nf_conntrack_expect_chain; 52extern struct atomic_notifier_head nf_ct_expect_chain;
53extern int nf_conntrack_expect_register_notifier(struct notifier_block *nb); 53extern int nf_ct_expect_register_notifier(struct notifier_block *nb);
54extern int nf_conntrack_expect_unregister_notifier(struct notifier_block *nb); 54extern int nf_ct_expect_unregister_notifier(struct notifier_block *nb);
55 55
56static inline void 56static inline void
57nf_conntrack_expect_event(enum ip_conntrack_expect_events event, 57nf_ct_expect_event(enum ip_conntrack_expect_events event,
58 struct nf_conntrack_expect *exp) 58 struct nf_conntrack_expect *exp)
59{ 59{
60 atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp); 60 atomic_notifier_call_chain(&nf_ct_expect_chain, event, exp);
61} 61}
62 62
63#else /* CONFIG_NF_CONNTRACK_EVENTS */ 63#else /* CONFIG_NF_CONNTRACK_EVENTS */
@@ -67,9 +67,8 @@ static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
67static inline void nf_conntrack_event(enum ip_conntrack_events event, 67static inline void nf_conntrack_event(enum ip_conntrack_events event,
68 struct nf_conn *ct) {} 68 struct nf_conn *ct) {}
69static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} 69static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
70static inline void 70static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
71nf_conntrack_expect_event(enum ip_conntrack_expect_events event, 71 struct nf_conntrack_expect *exp) {}
72 struct nf_conntrack_expect *exp) {}
73static inline void nf_ct_event_cache_flush(void) {} 72static inline void nf_ct_event_cache_flush(void) {}
74#endif /* CONFIG_NF_CONNTRACK_EVENTS */ 73#endif /* CONFIG_NF_CONNTRACK_EVENTS */
75 74
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 173c7c1eff23..cae1a0dce365 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -6,17 +6,21 @@
6#define _NF_CONNTRACK_EXPECT_H 6#define _NF_CONNTRACK_EXPECT_H
7#include <net/netfilter/nf_conntrack.h> 7#include <net/netfilter/nf_conntrack.h>
8 8
9extern struct list_head nf_conntrack_expect_list; 9extern struct hlist_head *nf_ct_expect_hash;
10extern struct kmem_cache *nf_conntrack_expect_cachep; 10extern unsigned int nf_ct_expect_hsize;
11extern const struct file_operations exp_file_ops; 11extern unsigned int nf_ct_expect_max;
12 12
13struct nf_conntrack_expect 13struct nf_conntrack_expect
14{ 14{
15 /* Internal linked list (global expectation list) */ 15 /* Conntrack expectation list member */
16 struct list_head list; 16 struct hlist_node lnode;
17
18 /* Hash member */
19 struct hlist_node hnode;
17 20
18 /* We expect this tuple, with the following mask */ 21 /* We expect this tuple, with the following mask */
19 struct nf_conntrack_tuple tuple, mask; 22 struct nf_conntrack_tuple tuple;
23 struct nf_conntrack_tuple_mask mask;
20 24
21 /* Function to call after setup and insertion */ 25 /* Function to call after setup and insertion */
22 void (*expectfn)(struct nf_conn *new, 26 void (*expectfn)(struct nf_conn *new,
@@ -52,29 +56,31 @@ struct nf_conntrack_expect
52 56
53#define NF_CT_EXPECT_PERMANENT 0x1 57#define NF_CT_EXPECT_PERMANENT 0x1
54 58
59int nf_conntrack_expect_init(void);
60void nf_conntrack_expect_fini(void);
55 61
56struct nf_conntrack_expect * 62struct nf_conntrack_expect *
57__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple); 63__nf_ct_expect_find(const struct nf_conntrack_tuple *tuple);
58 64
59struct nf_conntrack_expect * 65struct nf_conntrack_expect *
60nf_conntrack_expect_find_get(const struct nf_conntrack_tuple *tuple); 66nf_ct_expect_find_get(const struct nf_conntrack_tuple *tuple);
61 67
62struct nf_conntrack_expect * 68struct nf_conntrack_expect *
63find_expectation(const struct nf_conntrack_tuple *tuple); 69nf_ct_find_expectation(const struct nf_conntrack_tuple *tuple);
64 70
65void nf_ct_unlink_expect(struct nf_conntrack_expect *exp); 71void nf_ct_unlink_expect(struct nf_conntrack_expect *exp);
66void nf_ct_remove_expectations(struct nf_conn *ct); 72void nf_ct_remove_expectations(struct nf_conn *ct);
67void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp); 73void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
68 74
69/* Allocate space for an expectation: this is mandatory before calling 75/* Allocate space for an expectation: this is mandatory before calling
70 nf_conntrack_expect_related. You will have to call put afterwards. */ 76 nf_ct_expect_related. You will have to call put afterwards. */
71struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me); 77struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me);
72void nf_conntrack_expect_init(struct nf_conntrack_expect *, int, 78void nf_ct_expect_init(struct nf_conntrack_expect *, int,
73 union nf_conntrack_address *, 79 union nf_conntrack_address *,
74 union nf_conntrack_address *, 80 union nf_conntrack_address *,
75 u_int8_t, __be16 *, __be16 *); 81 u_int8_t, __be16 *, __be16 *);
76void nf_conntrack_expect_put(struct nf_conntrack_expect *exp); 82void nf_ct_expect_put(struct nf_conntrack_expect *exp);
77int nf_conntrack_expect_related(struct nf_conntrack_expect *expect); 83int nf_ct_expect_related(struct nf_conntrack_expect *expect);
78 84
79#endif /*_NF_CONNTRACK_EXPECT_H*/ 85#endif /*_NF_CONNTRACK_EXPECT_H*/
80 86
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
new file mode 100644
index 000000000000..73b5711faf32
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -0,0 +1,85 @@
1#ifndef _NF_CONNTRACK_EXTEND_H
2#define _NF_CONNTRACK_EXTEND_H
3
4#include <net/netfilter/nf_conntrack.h>
5
6enum nf_ct_ext_id
7{
8 NF_CT_EXT_HELPER,
9 NF_CT_EXT_NAT,
10 NF_CT_EXT_NUM,
11};
12
13#define NF_CT_EXT_HELPER_TYPE struct nf_conn_help
14#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
15
16/* Extensions: optional stuff which isn't permanently in struct. */
17struct nf_ct_ext {
18 u8 offset[NF_CT_EXT_NUM];
19 u8 len;
20 u8 real_len;
21 char data[0];
22};
23
24static inline int nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
25{
26 return (ct->ext && ct->ext->offset[id]);
27}
28
29static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
30{
31 if (!nf_ct_ext_exist(ct, id))
32 return NULL;
33
34 return (void *)ct->ext + ct->ext->offset[id];
35}
36#define nf_ct_ext_find(ext, id) \
37 ((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
38
39/* Destroy all relationships */
40extern void __nf_ct_ext_destroy(struct nf_conn *ct);
41static inline void nf_ct_ext_destroy(struct nf_conn *ct)
42{
43 if (ct->ext)
44 __nf_ct_ext_destroy(ct);
45}
46
47/* Free operation. If you want to free a object referred from private area,
48 * please implement __nf_ct_ext_free() and call it.
49 */
50static inline void nf_ct_ext_free(struct nf_conn *ct)
51{
52 if (ct->ext)
53 kfree(ct->ext);
54}
55
56/* Add this type, returns pointer to data or NULL. */
57void *
58__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
59#define nf_ct_ext_add(ct, id, gfp) \
60 ((id##_TYPE *)__nf_ct_ext_add((ct), (id), (gfp)))
61
62#define NF_CT_EXT_F_PREALLOC 0x0001
63
64struct nf_ct_ext_type
65{
66 /* Destroys relationships (can be NULL). */
67 void (*destroy)(struct nf_conn *ct);
68 /* Called when realloacted (can be NULL).
69 Contents has already been moved. */
70 void (*move)(struct nf_conn *ct, void *old);
71
72 enum nf_ct_ext_id id;
73
74 unsigned int flags;
75
76 /* Length and min alignment. */
77 u8 len;
78 u8 align;
79 /* initial size of nf_ct_ext. */
80 u8 alloc_size;
81};
82
83int nf_ct_extend_register(struct nf_ct_ext_type *type);
84void nf_ct_extend_unregister(struct nf_ct_ext_type *type);
85#endif /* _NF_CONNTRACK_EXTEND_H */
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 8c72ac9f0ab8..d04f99964d94 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -10,12 +10,13 @@
10#ifndef _NF_CONNTRACK_HELPER_H 10#ifndef _NF_CONNTRACK_HELPER_H
11#define _NF_CONNTRACK_HELPER_H 11#define _NF_CONNTRACK_HELPER_H
12#include <net/netfilter/nf_conntrack.h> 12#include <net/netfilter/nf_conntrack.h>
13#include <net/netfilter/nf_conntrack_extend.h>
13 14
14struct module; 15struct module;
15 16
16struct nf_conntrack_helper 17struct nf_conntrack_helper
17{ 18{
18 struct list_head list; /* Internal use. */ 19 struct hlist_node hnode; /* Internal use. */
19 20
20 const char *name; /* name of the module */ 21 const char *name; /* name of the module */
21 struct module *me; /* pointer to self */ 22 struct module *me; /* pointer to self */
@@ -23,10 +24,9 @@ struct nf_conntrack_helper
23 * expected connections */ 24 * expected connections */
24 unsigned int timeout; /* timeout for expecteds */ 25 unsigned int timeout; /* timeout for expecteds */
25 26
26 /* Mask of things we will help (compared against server response) */ 27 /* Tuple of things we will help (compared against server response) */
27 struct nf_conntrack_tuple tuple; 28 struct nf_conntrack_tuple tuple;
28 struct nf_conntrack_tuple mask; 29
29
30 /* Function to call when data passes; return verdict, or -1 to 30 /* Function to call when data passes; return verdict, or -1 to
31 invalidate. */ 31 invalidate. */
32 int (*help)(struct sk_buff **pskb, 32 int (*help)(struct sk_buff **pskb,
@@ -52,4 +52,10 @@ extern void nf_ct_helper_put(struct nf_conntrack_helper *helper);
52extern int nf_conntrack_helper_register(struct nf_conntrack_helper *); 52extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
53extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *); 53extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
54 54
55extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
56
57static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
58{
59 return nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
60}
55#endif /*_NF_CONNTRACK_HELPER_H*/ 61#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 96a58d8e1d3f..890752d7f673 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -64,8 +64,6 @@ struct nf_conntrack_l3proto
64 int (*prepare)(struct sk_buff **pskb, unsigned int hooknum, 64 int (*prepare)(struct sk_buff **pskb, unsigned int hooknum,
65 unsigned int *dataoff, u_int8_t *protonum); 65 unsigned int *dataoff, u_int8_t *protonum);
66 66
67 u_int32_t (*get_features)(const struct nf_conntrack_tuple *tuple);
68
69 int (*tuple_to_nfattr)(struct sk_buff *skb, 67 int (*tuple_to_nfattr)(struct sk_buff *skb,
70 const struct nf_conntrack_tuple *t); 68 const struct nf_conntrack_tuple *t);
71 69
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 5d72b16e876f..040dae5f0c9e 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -100,6 +100,14 @@ struct nf_conntrack_tuple
100 } dst; 100 } dst;
101}; 101};
102 102
103struct nf_conntrack_tuple_mask
104{
105 struct {
106 union nf_conntrack_address u3;
107 union nf_conntrack_man_proto u;
108 } src;
109};
110
103/* This is optimized opposed to a memset of the whole structure. Everything we 111/* This is optimized opposed to a memset of the whole structure. Everything we
104 * really care about is the source/destination unions */ 112 * really care about is the source/destination unions */
105#define NF_CT_TUPLE_U_BLANK(tuple) \ 113#define NF_CT_TUPLE_U_BLANK(tuple) \
@@ -112,11 +120,11 @@ struct nf_conntrack_tuple
112 120
113#ifdef __KERNEL__ 121#ifdef __KERNEL__
114 122
115#define NF_CT_DUMP_TUPLE(tp) \ 123#define NF_CT_DUMP_TUPLE(tp) \
116DEBUGP("tuple %p: %u %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n", \ 124pr_debug("tuple %p: %u %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n", \
117 (tp), (tp)->src.l3num, (tp)->dst.protonum, \ 125 (tp), (tp)->src.l3num, (tp)->dst.protonum, \
118 NIP6(*(struct in6_addr *)(tp)->src.u3.all), ntohs((tp)->src.u.all), \ 126 NIP6(*(struct in6_addr *)(tp)->src.u3.all), ntohs((tp)->src.u.all), \
119 NIP6(*(struct in6_addr *)(tp)->dst.u3.all), ntohs((tp)->dst.u.all)) 127 NIP6(*(struct in6_addr *)(tp)->dst.u3.all), ntohs((tp)->dst.u.all))
120 128
121/* If we're the first tuple, it's the original dir. */ 129/* If we're the first tuple, it's the original dir. */
122#define NF_CT_DIRECTION(h) \ 130#define NF_CT_DIRECTION(h) \
@@ -125,8 +133,7 @@ DEBUGP("tuple %p: %u %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n", \
125/* Connections have two entries in the hash table: one for each way */ 133/* Connections have two entries in the hash table: one for each way */
126struct nf_conntrack_tuple_hash 134struct nf_conntrack_tuple_hash
127{ 135{
128 struct list_head list; 136 struct hlist_node hnode;
129
130 struct nf_conntrack_tuple tuple; 137 struct nf_conntrack_tuple tuple;
131}; 138};
132 139
@@ -162,31 +169,44 @@ static inline int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
162 return nf_ct_tuple_src_equal(t1, t2) && nf_ct_tuple_dst_equal(t1, t2); 169 return nf_ct_tuple_src_equal(t1, t2) && nf_ct_tuple_dst_equal(t1, t2);
163} 170}
164 171
172static inline int nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1,
173 const struct nf_conntrack_tuple_mask *m2)
174{
175 return (m1->src.u3.all[0] == m2->src.u3.all[0] &&
176 m1->src.u3.all[1] == m2->src.u3.all[1] &&
177 m1->src.u3.all[2] == m2->src.u3.all[2] &&
178 m1->src.u3.all[3] == m2->src.u3.all[3] &&
179 m1->src.u.all == m2->src.u.all);
180}
181
182static inline int nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
183 const struct nf_conntrack_tuple *t2,
184 const struct nf_conntrack_tuple_mask *mask)
185{
186 int count;
187
188 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++) {
189 if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) &
190 mask->src.u3.all[count])
191 return 0;
192 }
193
194 if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all)
195 return 0;
196
197 if (t1->src.l3num != t2->src.l3num ||
198 t1->dst.protonum != t2->dst.protonum)
199 return 0;
200
201 return 1;
202}
203
165static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t, 204static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
166 const struct nf_conntrack_tuple *tuple, 205 const struct nf_conntrack_tuple *tuple,
167 const struct nf_conntrack_tuple *mask) 206 const struct nf_conntrack_tuple_mask *mask)
168{ 207{
169 int count = 0; 208 return nf_ct_tuple_src_mask_cmp(t, tuple, mask) &&
170 209 nf_ct_tuple_dst_equal(t, tuple);
171 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
172 if ((t->src.u3.all[count] ^ tuple->src.u3.all[count]) &
173 mask->src.u3.all[count])
174 return 0;
175 }
176
177 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
178 if ((t->dst.u3.all[count] ^ tuple->dst.u3.all[count]) &
179 mask->dst.u3.all[count])
180 return 0;
181 }
182
183 if ((t->src.u.all ^ tuple->src.u.all) & mask->src.u.all ||
184 (t->dst.u.all ^ tuple->dst.u.all) & mask->dst.u.all ||
185 (t->src.l3num ^ tuple->src.l3num) & mask->src.l3num ||
186 (t->dst.protonum ^ tuple->dst.protonum) & mask->dst.protonum)
187 return 0;
188
189 return 1;
190} 210}
191 211
192#endif /* _NF_CONNTRACK_TUPLE_H */ 212#endif /* _NF_CONNTRACK_TUPLE_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index bc57dd7b9b5c..6ae52f7c9f55 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -51,16 +51,31 @@ struct nf_nat_multi_range_compat
51 51
52#ifdef __KERNEL__ 52#ifdef __KERNEL__
53#include <linux/list.h> 53#include <linux/list.h>
54#include <linux/netfilter/nf_conntrack_pptp.h>
55#include <net/netfilter/nf_conntrack_extend.h>
54 56
55/* The structure embedded in the conntrack structure. */ 57/* per conntrack: nat application helper private data */
56struct nf_nat_info 58union nf_conntrack_nat_help
57{ 59{
58 struct list_head bysource; 60 /* insert nat helper private data here */
59 struct nf_nat_seq seq[IP_CT_DIR_MAX]; 61 struct nf_nat_pptp nat_pptp_info;
60}; 62};
61 63
62struct nf_conn; 64struct nf_conn;
63 65
66/* The structure embedded in the conntrack structure. */
67struct nf_conn_nat
68{
69 struct hlist_node bysource;
70 struct nf_nat_seq seq[IP_CT_DIR_MAX];
71 struct nf_conn *ct;
72 union nf_conntrack_nat_help help;
73#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
74 defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
75 int masq_index;
76#endif
77};
78
64/* Set up the info structure to map into this range. */ 79/* Set up the info structure to map into this range. */
65extern unsigned int nf_nat_setup_info(struct nf_conn *ct, 80extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
66 const struct nf_nat_range *range, 81 const struct nf_nat_range *range,
@@ -70,7 +85,10 @@ extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
70extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 85extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
71 const struct nf_conn *ignored_conntrack); 86 const struct nf_conn *ignored_conntrack);
72 87
73extern int nf_nat_module_is_loaded; 88static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
89{
90 return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
91}
74 92
75#else /* !__KERNEL__: iptables wants this to compile. */ 93#else /* !__KERNEL__: iptables wants this to compile. */
76#define nf_nat_multi_range nf_nat_multi_range_compat 94#define nf_nat_multi_range nf_nat_multi_range_compat
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 9778ffa93440..c3cd127ba4bb 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -2,6 +2,7 @@
2#define _NF_NAT_CORE_H 2#define _NF_NAT_CORE_H
3#include <linux/list.h> 3#include <linux/list.h>
4#include <net/netfilter/nf_conntrack.h> 4#include <net/netfilter/nf_conntrack.h>
5#include <net/netfilter/nf_nat.h>
5 6
6/* This header used to share core functionality between the standalone 7/* This header used to share core functionality between the standalone
7 NAT module, and the compatibility layer's use of NAT for masquerading. */ 8 NAT module, and the compatibility layer's use of NAT for masquerading. */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 7b510a9edb91..d7b824be5422 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -118,6 +118,9 @@
118 * Nested Attributes Construction: 118 * Nested Attributes Construction:
119 * nla_nest_start(skb, type) start a nested attribute 119 * nla_nest_start(skb, type) start a nested attribute
120 * nla_nest_end(skb, nla) finalize a nested attribute 120 * nla_nest_end(skb, nla) finalize a nested attribute
121 * nla_nest_compat_start(skb, type, start a nested compat attribute
122 * len, data)
123 * nla_nest_compat_end(skb, type) finalize a nested compat attribute
121 * nla_nest_cancel(skb, nla) cancel nested attribute construction 124 * nla_nest_cancel(skb, nla) cancel nested attribute construction
122 * 125 *
123 * Attribute Length Calculations: 126 * Attribute Length Calculations:
@@ -152,6 +155,7 @@
152 * nla_find_nested() find attribute in nested attributes 155 * nla_find_nested() find attribute in nested attributes
153 * nla_parse() parse and validate stream of attrs 156 * nla_parse() parse and validate stream of attrs
154 * nla_parse_nested() parse nested attribuets 157 * nla_parse_nested() parse nested attribuets
158 * nla_parse_nested_compat() parse nested compat attributes
155 * nla_for_each_attr() loop over all attributes 159 * nla_for_each_attr() loop over all attributes
156 * nla_for_each_nested() loop over the nested attributes 160 * nla_for_each_nested() loop over the nested attributes
157 *========================================================================= 161 *=========================================================================
@@ -170,6 +174,7 @@ enum {
170 NLA_FLAG, 174 NLA_FLAG,
171 NLA_MSECS, 175 NLA_MSECS,
172 NLA_NESTED, 176 NLA_NESTED,
177 NLA_NESTED_COMPAT,
173 NLA_NUL_STRING, 178 NLA_NUL_STRING,
174 NLA_BINARY, 179 NLA_BINARY,
175 __NLA_TYPE_MAX, 180 __NLA_TYPE_MAX,
@@ -190,6 +195,7 @@ enum {
190 * NLA_NUL_STRING Maximum length of string (excluding NUL) 195 * NLA_NUL_STRING Maximum length of string (excluding NUL)
191 * NLA_FLAG Unused 196 * NLA_FLAG Unused
192 * NLA_BINARY Maximum length of attribute payload 197 * NLA_BINARY Maximum length of attribute payload
198 * NLA_NESTED_COMPAT Exact length of structure payload
193 * All other Exact length of attribute payload 199 * All other Exact length of attribute payload
194 * 200 *
195 * Example: 201 * Example:
@@ -733,6 +739,39 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
733{ 739{
734 return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy); 740 return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy);
735} 741}
742
743/**
744 * nla_parse_nested_compat - parse nested compat attributes
745 * @tb: destination array with maxtype+1 elements
746 * @maxtype: maximum attribute type to be expected
747 * @nla: attribute containing the nested attributes
748 * @data: pointer to point to contained structure
749 * @len: length of contained structure
750 * @policy: validation policy
751 *
752 * Parse a nested compat attribute. The compat attribute contains a structure
753 * and optionally a set of nested attributes. On success the data pointer
754 * points to the nested data and tb contains the parsed attributes
755 * (see nla_parse).
756 */
757static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
758 struct nlattr *nla,
759 const struct nla_policy *policy,
760 int len)
761{
762 if (nla_len(nla) < len)
763 return -1;
764 if (nla_len(nla) >= NLA_ALIGN(len) + sizeof(struct nlattr))
765 return nla_parse_nested(tb, maxtype,
766 nla_data(nla) + NLA_ALIGN(len),
767 policy);
768 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
769 return 0;
770}
771
772#define nla_parse_nested_compat(tb, maxtype, nla, policy, data, len) \
773({ data = nla_len(nla) >= len ? nla_data(nla) : NULL; \
774 __nla_parse_nested_compat(tb, maxtype, nla, policy, len); })
736/** 775/**
737 * nla_put_u8 - Add a u16 netlink attribute to a socket buffer 776 * nla_put_u8 - Add a u16 netlink attribute to a socket buffer
738 * @skb: socket buffer to add attribute to 777 * @skb: socket buffer to add attribute to
@@ -965,6 +1004,51 @@ static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
965} 1004}
966 1005
967/** 1006/**
1007 * nla_nest_compat_start - Start a new level of nested compat attributes
1008 * @skb: socket buffer to add attributes to
1009 * @attrtype: attribute type of container
1010 * @attrlen: length of structure
1011 * @data: pointer to structure
1012 *
1013 * Start a nested compat attribute that contains both a structure and
1014 * a set of nested attributes.
1015 *
1016 * Returns the container attribute
1017 */
1018static inline struct nlattr *nla_nest_compat_start(struct sk_buff *skb,
1019 int attrtype, int attrlen,
1020 const void *data)
1021{
1022 struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
1023
1024 if (nla_put(skb, attrtype, attrlen, data) < 0)
1025 return NULL;
1026 if (nla_nest_start(skb, attrtype) == NULL) {
1027 nlmsg_trim(skb, start);
1028 return NULL;
1029 }
1030 return start;
1031}
1032
1033/**
1034 * nla_nest_compat_end - Finalize nesting of compat attributes
1035 * @skb: socket buffer the attribtues are stored in
1036 * @start: container attribute
1037 *
1038 * Corrects the container attribute header to include the all
1039 * appeneded attributes.
1040 *
1041 * Returns the total data length of the skb.
1042 */
1043static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start)
1044{
1045 struct nlattr *nest = (void *)start + NLMSG_ALIGN(start->nla_len);
1046
1047 start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
1048 return nla_nest_end(skb, nest);
1049}
1050
1051/**
968 * nla_nest_cancel - Cancel nesting of attributes 1052 * nla_nest_cancel - Cancel nesting of attributes
969 * @skb: socket buffer the message is stored in 1053 * @skb: socket buffer the message is stored in
970 * @start: container attribute 1054 * @start: container attribute
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 4129df708079..6c29920cbe29 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -306,6 +306,8 @@ static inline int tcf_em_tree_match(struct sk_buff *skb,
306 return 1; 306 return 1;
307} 307}
308 308
309#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
310
309#else /* CONFIG_NET_EMATCH */ 311#else /* CONFIG_NET_EMATCH */
310 312
311struct tcf_ematch_tree 313struct tcf_ematch_tree
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index af8960878ef4..a5819891d525 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -3,6 +3,8 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <net/protocol.h>
7
6#define RAWV6_HTABLE_SIZE MAX_INET_PROTOS 8#define RAWV6_HTABLE_SIZE MAX_INET_PROTOS
7extern struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE]; 9extern struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE];
8extern rwlock_t raw_v6_lock; 10extern rwlock_t raw_v6_lock;
@@ -23,6 +25,13 @@ extern void rawv6_err(struct sock *sk,
23 int type, int code, 25 int type, int code,
24 int offset, __be32 info); 26 int offset, __be32 info);
25 27
28#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
29int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
30 struct sk_buff *skb));
31int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
32 struct sk_buff *skb));
33#endif
34
26#endif 35#endif
27 36
28#endif 37#endif
diff --git a/include/net/route.h b/include/net/route.h
index 749e4dfe5ff3..f7ce6259f86f 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -62,7 +62,6 @@ struct rtable
62 62
63 unsigned rt_flags; 63 unsigned rt_flags;
64 __u16 rt_type; 64 __u16 rt_type;
65 __u16 rt_multipath_alg;
66 65
67 __be32 rt_dst; /* Path destination */ 66 __be32 rt_dst; /* Path destination */
68 __be32 rt_src; /* Path source */ 67 __be32 rt_src; /* Path source */
@@ -136,7 +135,7 @@ static inline void ip_rt_put(struct rtable * rt)
136 135
137#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3) 136#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
138 137
139extern __u8 ip_tos2prio[16]; 138extern const __u8 ip_tos2prio[16];
140 139
141static inline char rt_tos2priority(u8 tos) 140static inline char rt_tos2priority(u8 tos)
142{ 141{
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 3b3d4745618d..3861c05cdf0f 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -22,4 +22,62 @@ static inline int rtnl_msg_family(struct nlmsghdr *nlh)
22 return AF_UNSPEC; 22 return AF_UNSPEC;
23} 23}
24 24
25/**
26 * struct rtnl_link_ops - rtnetlink link operations
27 *
28 * @list: Used internally
29 * @kind: Identifier
30 * @maxtype: Highest device specific netlink attribute number
31 * @policy: Netlink policy for device specific attribute validation
32 * @validate: Optional validation function for netlink/changelink parameters
33 * @priv_size: sizeof net_device private space
34 * @setup: net_device setup function
35 * @newlink: Function for configuring and registering a new device
36 * @changelink: Function for changing parameters of an existing device
37 * @dellink: Function to remove a device
38 * @get_size: Function to calculate required room for dumping device
39 * specific netlink attributes
40 * @fill_info: Function to dump device specific netlink attributes
41 * @get_xstats_size: Function to calculate required room for dumping devic
42 * specific statistics
43 * @fill_xstats: Function to dump device specific statistics
44 */
45struct rtnl_link_ops {
46 struct list_head list;
47
48 const char *kind;
49
50 size_t priv_size;
51 void (*setup)(struct net_device *dev);
52
53 int maxtype;
54 const struct nla_policy *policy;
55 int (*validate)(struct nlattr *tb[],
56 struct nlattr *data[]);
57
58 int (*newlink)(struct net_device *dev,
59 struct nlattr *tb[],
60 struct nlattr *data[]);
61 int (*changelink)(struct net_device *dev,
62 struct nlattr *tb[],
63 struct nlattr *data[]);
64 void (*dellink)(struct net_device *dev);
65
66 size_t (*get_size)(const struct net_device *dev);
67 int (*fill_info)(struct sk_buff *skb,
68 const struct net_device *dev);
69
70 size_t (*get_xstats_size)(const struct net_device *dev);
71 int (*fill_xstats)(struct sk_buff *skb,
72 const struct net_device *dev);
73};
74
75extern int __rtnl_link_register(struct rtnl_link_ops *ops);
76extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
77
78extern int rtnl_link_register(struct rtnl_link_ops *ops);
79extern void rtnl_link_unregister(struct rtnl_link_ops *ops);
80
81#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
82
25#endif 83#endif
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
index 333bba6dc522..cfc4ba46de8f 100644
--- a/include/net/tipc/tipc_port.h
+++ b/include/net/tipc/tipc_port.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports 2 * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
3 * 3 *
4 * Copyright (c) 1994-2006, Ericsson AB 4 * Copyright (c) 1994-2007, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -55,6 +55,7 @@
55 * @conn_unacked: number of unacknowledged messages received from peer port 55 * @conn_unacked: number of unacknowledged messages received from peer port
56 * @published: non-zero if port has one or more associated names 56 * @published: non-zero if port has one or more associated names
57 * @congested: non-zero if cannot send because of link or port congestion 57 * @congested: non-zero if cannot send because of link or port congestion
58 * @max_pkt: maximum packet size "hint" used when building messages sent by port
58 * @ref: unique reference to port in TIPC object registry 59 * @ref: unique reference to port in TIPC object registry
59 * @phdr: preformatted message header used when sending messages 60 * @phdr: preformatted message header used when sending messages
60 */ 61 */
@@ -68,6 +69,7 @@ struct tipc_port {
68 u32 conn_unacked; 69 u32 conn_unacked;
69 int published; 70 int published;
70 u32 congested; 71 u32 congested;
72 u32 max_pkt;
71 u32 ref; 73 u32 ref;
72 struct tipc_msg phdr; 74 struct tipc_msg phdr;
73}; 75};
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 311f25af5e1a..ae959e950174 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -19,9 +19,19 @@
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20#include <net/ip6_fib.h> 20#include <net/ip6_fib.h>
21 21
22#define XFRM_PROTO_ESP 50
23#define XFRM_PROTO_AH 51
24#define XFRM_PROTO_COMP 108
25#define XFRM_PROTO_IPIP 4
26#define XFRM_PROTO_IPV6 41
27#define XFRM_PROTO_ROUTING IPPROTO_ROUTING
28#define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
29
22#define XFRM_ALIGN8(len) (((len) + 7) & ~7) 30#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
23#define MODULE_ALIAS_XFRM_MODE(family, encap) \ 31#define MODULE_ALIAS_XFRM_MODE(family, encap) \
24 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap)) 32 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
33#define MODULE_ALIAS_XFRM_TYPE(family, proto) \
34 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
25 35
26extern struct sock *xfrm_nl; 36extern struct sock *xfrm_nl;
27extern u32 sysctl_xfrm_aevent_etime; 37extern u32 sysctl_xfrm_aevent_etime;
@@ -509,11 +519,9 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
509 case IPPROTO_ICMPV6: 519 case IPPROTO_ICMPV6:
510 port = htons(fl->fl_icmp_type); 520 port = htons(fl->fl_icmp_type);
511 break; 521 break;
512#ifdef CONFIG_IPV6_MIP6
513 case IPPROTO_MH: 522 case IPPROTO_MH:
514 port = htons(fl->fl_mh_type); 523 port = htons(fl->fl_mh_type);
515 break; 524 break;
516#endif
517 default: 525 default:
518 port = 0; /*XXX*/ 526 port = 0; /*XXX*/
519 } 527 }
@@ -920,6 +928,10 @@ extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t
920 struct flowi *fl, struct xfrm_tmpl *tmpl, 928 struct flowi *fl, struct xfrm_tmpl *tmpl,
921 struct xfrm_policy *pol, int *err, 929 struct xfrm_policy *pol, int *err,
922 unsigned short family); 930 unsigned short family);
931extern struct xfrm_state * xfrm_stateonly_find(xfrm_address_t *daddr,
932 xfrm_address_t *saddr,
933 unsigned short family,
934 u8 mode, u8 proto, u32 reqid);
923extern int xfrm_state_check_expire(struct xfrm_state *x); 935extern int xfrm_state_check_expire(struct xfrm_state *x);
924extern void xfrm_state_insert(struct xfrm_state *x); 936extern void xfrm_state_insert(struct xfrm_state *x);
925extern int xfrm_state_add(struct xfrm_state *x); 937extern int xfrm_state_add(struct xfrm_state *x);
@@ -991,7 +1003,7 @@ extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
991 u8 **prevhdr); 1003 u8 **prevhdr);
992 1004
993#ifdef CONFIG_XFRM 1005#ifdef CONFIG_XFRM
994extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type); 1006extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
995extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen); 1007extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
996extern int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family); 1008extern int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family);
997#else 1009#else
@@ -1000,12 +1012,13 @@ static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optv
1000 return -ENOPROTOOPT; 1012 return -ENOPROTOOPT;
1001} 1013}
1002 1014
1003static inline int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) 1015static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1004{ 1016{
1005 /* should not happen */ 1017 /* should not happen */
1006 kfree_skb(skb); 1018 kfree_skb(skb);
1007 return 0; 1019 return 0;
1008} 1020}
1021
1009static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family) 1022static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family)
1010{ 1023{
1011 return -EINVAL; 1024 return -EINVAL;
diff --git a/net/802/tr.c b/net/802/tr.c
index 0ba1946211c9..e56e61a7f545 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -567,7 +567,7 @@ static int rif_seq_show(struct seq_file *seq, void *v)
567} 567}
568 568
569 569
570static struct seq_operations rif_seq_ops = { 570static const struct seq_operations rif_seq_ops = {
571 .start = rif_seq_start, 571 .start = rif_seq_start,
572 .next = rif_seq_next, 572 .next = rif_seq_next,
573 .stop = rif_seq_stop, 573 .stop = rif_seq_stop,
diff --git a/net/8021q/Makefile b/net/8021q/Makefile
index 97feb44dbdce..10ca7f486c3a 100644
--- a/net/8021q/Makefile
+++ b/net/8021q/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_VLAN_8021Q) += 8021q.o 5obj-$(CONFIG_VLAN_8021Q) += 8021q.o
6 6
78021q-objs := vlan.o vlan_dev.o 78021q-objs := vlan.o vlan_dev.o vlan_netlink.o
8 8
9ifeq ($(CONFIG_PROC_FS),y) 9ifeq ($(CONFIG_PROC_FS),y)
108021q-objs += vlanproc.o 108021q-objs += vlanproc.o
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index de78c9dd713b..abb9900edb3f 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -97,35 +97,22 @@ static int __init vlan_proto_init(void)
97 97
98 /* Register us to receive netdevice events */ 98 /* Register us to receive netdevice events */
99 err = register_netdevice_notifier(&vlan_notifier_block); 99 err = register_netdevice_notifier(&vlan_notifier_block);
100 if (err < 0) { 100 if (err < 0)
101 dev_remove_pack(&vlan_packet_type); 101 goto err1;
102 vlan_proc_cleanup();
103 return err;
104 }
105 102
106 vlan_ioctl_set(vlan_ioctl_handler); 103 err = vlan_netlink_init();
104 if (err < 0)
105 goto err2;
107 106
107 vlan_ioctl_set(vlan_ioctl_handler);
108 return 0; 108 return 0;
109}
110
111/* Cleanup all vlan devices
112 * Note: devices that have been registered that but not
113 * brought up will exist but have no module ref count.
114 */
115static void __exit vlan_cleanup_devices(void)
116{
117 struct net_device *dev, *nxt;
118
119 rtnl_lock();
120 for_each_netdev_safe(dev, nxt) {
121 if (dev->priv_flags & IFF_802_1Q_VLAN) {
122 unregister_vlan_dev(VLAN_DEV_INFO(dev)->real_dev,
123 VLAN_DEV_INFO(dev)->vlan_id);
124 109
125 unregister_netdevice(dev); 110err2:
126 } 111 unregister_netdevice_notifier(&vlan_notifier_block);
127 } 112err1:
128 rtnl_unlock(); 113 vlan_proc_cleanup();
114 dev_remove_pack(&vlan_packet_type);
115 return err;
129} 116}
130 117
131/* 118/*
@@ -136,13 +123,13 @@ static void __exit vlan_cleanup_module(void)
136{ 123{
137 int i; 124 int i;
138 125
126 vlan_netlink_fini();
139 vlan_ioctl_set(NULL); 127 vlan_ioctl_set(NULL);
140 128
141 /* Un-register us from receiving netdevice events */ 129 /* Un-register us from receiving netdevice events */
142 unregister_netdevice_notifier(&vlan_notifier_block); 130 unregister_netdevice_notifier(&vlan_notifier_block);
143 131
144 dev_remove_pack(&vlan_packet_type); 132 dev_remove_pack(&vlan_packet_type);
145 vlan_cleanup_devices();
146 133
147 /* This table must be empty if there are no module 134 /* This table must be empty if there are no module
148 * references left. 135 * references left.
@@ -197,6 +184,34 @@ static void vlan_group_free(struct vlan_group *grp)
197 kfree(grp); 184 kfree(grp);
198} 185}
199 186
187static struct vlan_group *vlan_group_alloc(int ifindex)
188{
189 struct vlan_group *grp;
190 unsigned int size;
191 unsigned int i;
192
193 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
194 if (!grp)
195 return NULL;
196
197 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
198
199 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) {
200 grp->vlan_devices_arrays[i] = kzalloc(size, GFP_KERNEL);
201 if (!grp->vlan_devices_arrays[i])
202 goto err;
203 }
204
205 grp->real_dev_ifindex = ifindex;
206 hlist_add_head_rcu(&grp->hlist,
207 &vlan_group_hash[vlan_grp_hashfn(ifindex)]);
208 return grp;
209
210err:
211 vlan_group_free(grp);
212 return NULL;
213}
214
200static void vlan_rcu_free(struct rcu_head *rcu) 215static void vlan_rcu_free(struct rcu_head *rcu)
201{ 216{
202 vlan_group_free(container_of(rcu, struct vlan_group, rcu)); 217 vlan_group_free(container_of(rcu, struct vlan_group, rcu));
@@ -278,50 +293,66 @@ static int unregister_vlan_dev(struct net_device *real_dev,
278 return ret; 293 return ret;
279} 294}
280 295
281static int unregister_vlan_device(const char *vlan_IF_name) 296int unregister_vlan_device(struct net_device *dev)
282{ 297{
283 struct net_device *dev = NULL;
284 int ret; 298 int ret;
285 299
300 ret = unregister_vlan_dev(VLAN_DEV_INFO(dev)->real_dev,
301 VLAN_DEV_INFO(dev)->vlan_id);
302 unregister_netdevice(dev);
286 303
287 dev = dev_get_by_name(vlan_IF_name); 304 if (ret == 1)
288 ret = -EINVAL; 305 ret = 0;
289 if (dev) { 306 return ret;
290 if (dev->priv_flags & IFF_802_1Q_VLAN) { 307}
291 rtnl_lock();
292 308
293 ret = unregister_vlan_dev(VLAN_DEV_INFO(dev)->real_dev, 309/*
294 VLAN_DEV_INFO(dev)->vlan_id); 310 * vlan network devices have devices nesting below it, and are a special
311 * "super class" of normal network devices; split their locks off into a
312 * separate class since they always nest.
313 */
314static struct lock_class_key vlan_netdev_xmit_lock_key;
295 315
296 dev_put(dev); 316static int vlan_dev_init(struct net_device *dev)
297 unregister_netdevice(dev); 317{
318 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
298 319
299 rtnl_unlock(); 320 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
321 dev->flags = real_dev->flags & ~IFF_UP;
322 dev->iflink = real_dev->ifindex;
323 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
324 (1<<__LINK_STATE_DORMANT))) |
325 (1<<__LINK_STATE_PRESENT);
300 326
301 if (ret == 1) 327 if (is_zero_ether_addr(dev->dev_addr))
302 ret = 0; 328 memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
303 } else { 329 if (is_zero_ether_addr(dev->broadcast))
304 printk(VLAN_ERR 330 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
305 "%s: ERROR: Tried to remove a non-vlan device " 331
306 "with VLAN code, name: %s priv_flags: %hX\n", 332 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
307 __FUNCTION__, dev->name, dev->priv_flags); 333 dev->hard_header = real_dev->hard_header;
308 dev_put(dev); 334 dev->hard_header_len = real_dev->hard_header_len;
309 ret = -EPERM; 335 dev->hard_start_xmit = vlan_dev_hwaccel_hard_start_xmit;
310 } 336 dev->rebuild_header = real_dev->rebuild_header;
311 } else { 337 } else {
312#ifdef VLAN_DEBUG 338 dev->hard_header = vlan_dev_hard_header;
313 printk(VLAN_DBG "%s: WARNING: Could not find dev.\n", __FUNCTION__); 339 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
314#endif 340 dev->hard_start_xmit = vlan_dev_hard_start_xmit;
315 ret = -EINVAL; 341 dev->rebuild_header = vlan_dev_rebuild_header;
316 } 342 }
343 dev->hard_header_parse = real_dev->hard_header_parse;
344 dev->hard_header_cache = NULL;
317 345
318 return ret; 346 lockdep_set_class(&dev->_xmit_lock, &vlan_netdev_xmit_lock_key);
347 return 0;
319} 348}
320 349
321static void vlan_setup(struct net_device *new_dev) 350void vlan_setup(struct net_device *new_dev)
322{ 351{
323 SET_MODULE_OWNER(new_dev); 352 SET_MODULE_OWNER(new_dev);
324 353
354 ether_setup(new_dev);
355
325 /* new_dev->ifindex = 0; it will be set when added to 356 /* new_dev->ifindex = 0; it will be set when added to
326 * the global list. 357 * the global list.
327 * iflink is set as well. 358 * iflink is set as well.
@@ -338,12 +369,14 @@ static void vlan_setup(struct net_device *new_dev)
338 369
339 /* set up method calls */ 370 /* set up method calls */
340 new_dev->change_mtu = vlan_dev_change_mtu; 371 new_dev->change_mtu = vlan_dev_change_mtu;
372 new_dev->init = vlan_dev_init;
341 new_dev->open = vlan_dev_open; 373 new_dev->open = vlan_dev_open;
342 new_dev->stop = vlan_dev_stop; 374 new_dev->stop = vlan_dev_stop;
343 new_dev->set_mac_address = vlan_dev_set_mac_address;
344 new_dev->set_multicast_list = vlan_dev_set_multicast_list; 375 new_dev->set_multicast_list = vlan_dev_set_multicast_list;
345 new_dev->destructor = free_netdev; 376 new_dev->destructor = free_netdev;
346 new_dev->do_ioctl = vlan_dev_ioctl; 377 new_dev->do_ioctl = vlan_dev_ioctl;
378
379 memset(new_dev->broadcast, 0, sizeof(ETH_ALEN));
347} 380}
348 381
349static void vlan_transfer_operstate(const struct net_device *dev, struct net_device *vlandev) 382static void vlan_transfer_operstate(const struct net_device *dev, struct net_device *vlandev)
@@ -366,77 +399,110 @@ static void vlan_transfer_operstate(const struct net_device *dev, struct net_dev
366 } 399 }
367} 400}
368 401
369/* 402int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id)
370 * vlan network devices have devices nesting below it, and are a special
371 * "super class" of normal network devices; split their locks off into a
372 * separate class since they always nest.
373 */
374static struct lock_class_key vlan_netdev_xmit_lock_key;
375
376
377/* Attach a VLAN device to a mac address (ie Ethernet Card).
378 * Returns the device that was created, or NULL if there was
379 * an error of some kind.
380 */
381static struct net_device *register_vlan_device(const char *eth_IF_name,
382 unsigned short VLAN_ID)
383{ 403{
384 struct vlan_group *grp;
385 struct net_device *new_dev;
386 struct net_device *real_dev; /* the ethernet device */
387 char name[IFNAMSIZ];
388 int i;
389
390#ifdef VLAN_DEBUG
391 printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n",
392 __FUNCTION__, eth_IF_name, VLAN_ID);
393#endif
394
395 if (VLAN_ID >= VLAN_VID_MASK)
396 goto out_ret_null;
397
398 /* find the device relating to eth_IF_name. */
399 real_dev = dev_get_by_name(eth_IF_name);
400 if (!real_dev)
401 goto out_ret_null;
402
403 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { 404 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
404 printk(VLAN_DBG "%s: VLANs not supported on %s.\n", 405 printk(VLAN_DBG "%s: VLANs not supported on %s.\n",
405 __FUNCTION__, real_dev->name); 406 __FUNCTION__, real_dev->name);
406 goto out_put_dev; 407 return -EOPNOTSUPP;
407 } 408 }
408 409
409 if ((real_dev->features & NETIF_F_HW_VLAN_RX) && 410 if ((real_dev->features & NETIF_F_HW_VLAN_RX) &&
410 !real_dev->vlan_rx_register) { 411 !real_dev->vlan_rx_register) {
411 printk(VLAN_DBG "%s: Device %s has buggy VLAN hw accel.\n", 412 printk(VLAN_DBG "%s: Device %s has buggy VLAN hw accel.\n",
412 __FUNCTION__, real_dev->name); 413 __FUNCTION__, real_dev->name);
413 goto out_put_dev; 414 return -EOPNOTSUPP;
414 } 415 }
415 416
416 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) && 417 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
417 (!real_dev->vlan_rx_add_vid || !real_dev->vlan_rx_kill_vid)) { 418 (!real_dev->vlan_rx_add_vid || !real_dev->vlan_rx_kill_vid)) {
418 printk(VLAN_DBG "%s: Device %s has buggy VLAN hw accel.\n", 419 printk(VLAN_DBG "%s: Device %s has buggy VLAN hw accel.\n",
419 __FUNCTION__, real_dev->name); 420 __FUNCTION__, real_dev->name);
420 goto out_put_dev; 421 return -EOPNOTSUPP;
421 } 422 }
422 423
423 /* From this point on, all the data structures must remain
424 * consistent.
425 */
426 rtnl_lock();
427
428 /* The real device must be up and operating in order to 424 /* The real device must be up and operating in order to
429 * assosciate a VLAN device with it. 425 * assosciate a VLAN device with it.
430 */ 426 */
431 if (!(real_dev->flags & IFF_UP)) 427 if (!(real_dev->flags & IFF_UP))
432 goto out_unlock; 428 return -ENETDOWN;
433 429
434 if (__find_vlan_dev(real_dev, VLAN_ID) != NULL) { 430 if (__find_vlan_dev(real_dev, vlan_id) != NULL) {
435 /* was already registered. */ 431 /* was already registered. */
436 printk(VLAN_DBG "%s: ALREADY had VLAN registered\n", __FUNCTION__); 432 printk(VLAN_DBG "%s: ALREADY had VLAN registered\n", __FUNCTION__);
437 goto out_unlock; 433 return -EEXIST;
434 }
435
436 return 0;
437}
438
439int register_vlan_dev(struct net_device *dev)
440{
441 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
442 struct net_device *real_dev = vlan->real_dev;
443 unsigned short vlan_id = vlan->vlan_id;
444 struct vlan_group *grp, *ngrp = NULL;
445 int err;
446
447 grp = __vlan_find_group(real_dev->ifindex);
448 if (!grp) {
449 ngrp = grp = vlan_group_alloc(real_dev->ifindex);
450 if (!grp)
451 return -ENOBUFS;
438 } 452 }
439 453
454 err = register_netdevice(dev);
455 if (err < 0)
456 goto out_free_group;
457
458 /* Account for reference in struct vlan_dev_info */
459 dev_hold(real_dev);
460
461 vlan_transfer_operstate(real_dev, dev);
462 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
463
464 /* So, got the sucker initialized, now lets place
465 * it into our local structure.
466 */
467 vlan_group_set_device(grp, vlan_id, dev);
468 if (ngrp && real_dev->features & NETIF_F_HW_VLAN_RX)
469 real_dev->vlan_rx_register(real_dev, ngrp);
470 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
471 real_dev->vlan_rx_add_vid(real_dev, vlan_id);
472
473 if (vlan_proc_add_dev(dev) < 0)
474 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n",
475 dev->name);
476 return 0;
477
478out_free_group:
479 if (ngrp)
480 vlan_group_free(ngrp);
481 return err;
482}
483
484/* Attach a VLAN device to a mac address (ie Ethernet Card).
485 * Returns 0 if the device was created or a negative error code otherwise.
486 */
487static int register_vlan_device(struct net_device *real_dev,
488 unsigned short VLAN_ID)
489{
490 struct net_device *new_dev;
491 char name[IFNAMSIZ];
492 int err;
493
494#ifdef VLAN_DEBUG
495 printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n",
496 __FUNCTION__, eth_IF_name, VLAN_ID);
497#endif
498
499 if (VLAN_ID >= VLAN_VID_MASK)
500 return -ERANGE;
501
502 err = vlan_check_real_dev(real_dev, VLAN_ID);
503 if (err < 0)
504 return err;
505
440 /* Gotta set up the fields for the device. */ 506 /* Gotta set up the fields for the device. */
441#ifdef VLAN_DEBUG 507#ifdef VLAN_DEBUG
442 printk(VLAN_DBG "About to allocate name, vlan_name_type: %i\n", 508 printk(VLAN_DBG "About to allocate name, vlan_name_type: %i\n",
@@ -471,138 +537,64 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
471 vlan_setup); 537 vlan_setup);
472 538
473 if (new_dev == NULL) 539 if (new_dev == NULL)
474 goto out_unlock; 540 return -ENOBUFS;
475
476#ifdef VLAN_DEBUG
477 printk(VLAN_DBG "Allocated new name -:%s:-\n", new_dev->name);
478#endif
479 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
480 new_dev->flags = real_dev->flags;
481 new_dev->flags &= ~IFF_UP;
482
483 new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
484 (1<<__LINK_STATE_DORMANT))) |
485 (1<<__LINK_STATE_PRESENT);
486 541
487 /* need 4 bytes for extra VLAN header info, 542 /* need 4 bytes for extra VLAN header info,
488 * hope the underlying device can handle it. 543 * hope the underlying device can handle it.
489 */ 544 */
490 new_dev->mtu = real_dev->mtu; 545 new_dev->mtu = real_dev->mtu;
491 546
492 /* TODO: maybe just assign it to be ETHERNET? */ 547#ifdef VLAN_DEBUG
493 new_dev->type = real_dev->type; 548 printk(VLAN_DBG "Allocated new name -:%s:-\n", new_dev->name);
494
495 new_dev->hard_header_len = real_dev->hard_header_len;
496 if (!(real_dev->features & NETIF_F_HW_VLAN_TX)) {
497 /* Regular ethernet + 4 bytes (18 total). */
498 new_dev->hard_header_len += VLAN_HLEN;
499 }
500
501 VLAN_MEM_DBG("new_dev->priv malloc, addr: %p size: %i\n", 549 VLAN_MEM_DBG("new_dev->priv malloc, addr: %p size: %i\n",
502 new_dev->priv, 550 new_dev->priv,
503 sizeof(struct vlan_dev_info)); 551 sizeof(struct vlan_dev_info));
504 552#endif
505 memcpy(new_dev->broadcast, real_dev->broadcast, real_dev->addr_len);
506 memcpy(new_dev->dev_addr, real_dev->dev_addr, real_dev->addr_len);
507 new_dev->addr_len = real_dev->addr_len;
508
509 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
510 new_dev->hard_header = real_dev->hard_header;
511 new_dev->hard_start_xmit = vlan_dev_hwaccel_hard_start_xmit;
512 new_dev->rebuild_header = real_dev->rebuild_header;
513 } else {
514 new_dev->hard_header = vlan_dev_hard_header;
515 new_dev->hard_start_xmit = vlan_dev_hard_start_xmit;
516 new_dev->rebuild_header = vlan_dev_rebuild_header;
517 }
518 new_dev->hard_header_parse = real_dev->hard_header_parse;
519 553
520 VLAN_DEV_INFO(new_dev)->vlan_id = VLAN_ID; /* 1 through VLAN_VID_MASK */ 554 VLAN_DEV_INFO(new_dev)->vlan_id = VLAN_ID; /* 1 through VLAN_VID_MASK */
521 VLAN_DEV_INFO(new_dev)->real_dev = real_dev; 555 VLAN_DEV_INFO(new_dev)->real_dev = real_dev;
522 VLAN_DEV_INFO(new_dev)->dent = NULL; 556 VLAN_DEV_INFO(new_dev)->dent = NULL;
523 VLAN_DEV_INFO(new_dev)->flags = 1; 557 VLAN_DEV_INFO(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
524 558
525#ifdef VLAN_DEBUG 559 new_dev->rtnl_link_ops = &vlan_link_ops;
526 printk(VLAN_DBG "About to go find the group for idx: %i\n", 560 err = register_vlan_dev(new_dev);
527 real_dev->ifindex); 561 if (err < 0)
528#endif
529
530 if (register_netdevice(new_dev))
531 goto out_free_newdev; 562 goto out_free_newdev;
532 563
533 lockdep_set_class(&new_dev->_xmit_lock, &vlan_netdev_xmit_lock_key); 564 /* Account for reference in struct vlan_dev_info */
534 565 dev_hold(real_dev);
535 new_dev->iflink = real_dev->ifindex;
536 vlan_transfer_operstate(real_dev, new_dev);
537 linkwatch_fire_event(new_dev); /* _MUST_ call rfc2863_policy() */
538
539 /* So, got the sucker initialized, now lets place
540 * it into our local structure.
541 */
542 grp = __vlan_find_group(real_dev->ifindex);
543
544 /* Note, we are running under the RTNL semaphore
545 * so it cannot "appear" on us.
546 */
547 if (!grp) { /* need to add a new group */
548 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
549 if (!grp)
550 goto out_free_unregister;
551
552 for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) {
553 grp->vlan_devices_arrays[i] = kzalloc(
554 sizeof(struct net_device *)*VLAN_GROUP_ARRAY_PART_LEN,
555 GFP_KERNEL);
556
557 if (!grp->vlan_devices_arrays[i])
558 goto out_free_arrays;
559 }
560
561 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
562 grp->real_dev_ifindex = real_dev->ifindex;
563
564 hlist_add_head_rcu(&grp->hlist,
565 &vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]);
566
567 if (real_dev->features & NETIF_F_HW_VLAN_RX)
568 real_dev->vlan_rx_register(real_dev, grp);
569 }
570
571 vlan_group_set_device(grp, VLAN_ID, new_dev);
572
573 if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */
574 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n",
575 new_dev->name);
576
577 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
578 real_dev->vlan_rx_add_vid(real_dev, VLAN_ID);
579
580 rtnl_unlock();
581
582
583#ifdef VLAN_DEBUG 566#ifdef VLAN_DEBUG
584 printk(VLAN_DBG "Allocated new device successfully, returning.\n"); 567 printk(VLAN_DBG "Allocated new device successfully, returning.\n");
585#endif 568#endif
586 return new_dev; 569 return 0;
587
588out_free_arrays:
589 vlan_group_free(grp);
590
591out_free_unregister:
592 unregister_netdev(new_dev);
593 goto out_unlock;
594 570
595out_free_newdev: 571out_free_newdev:
596 free_netdev(new_dev); 572 free_netdev(new_dev);
573 return err;
574}
597 575
598out_unlock: 576static void vlan_sync_address(struct net_device *dev,
599 rtnl_unlock(); 577 struct net_device *vlandev)
578{
579 struct vlan_dev_info *vlan = VLAN_DEV_INFO(vlandev);
600 580
601out_put_dev: 581 /* May be called without an actual change */
602 dev_put(real_dev); 582 if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
583 return;
603 584
604out_ret_null: 585 /* vlan address was different from the old address and is equal to
605 return NULL; 586 * the new address */
587 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
588 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
589 dev_unicast_delete(dev, vlandev->dev_addr, ETH_ALEN);
590
591 /* vlan address was equal to the old address and is different from
592 * the new address */
593 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
594 compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
595 dev_unicast_add(dev, vlandev->dev_addr, ETH_ALEN);
596
597 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
606} 598}
607 599
608static int vlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) 600static int vlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
@@ -631,6 +623,17 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
631 } 623 }
632 break; 624 break;
633 625
626 case NETDEV_CHANGEADDR:
627 /* Adjust unicast filters on underlying device */
628 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
629 vlandev = vlan_group_get_device(grp, i);
630 if (!vlandev)
631 continue;
632
633 vlan_sync_address(dev, vlandev);
634 }
635 break;
636
634 case NETDEV_DOWN: 637 case NETDEV_DOWN:
635 /* Put all VLANs for this dev in the down state too. */ 638 /* Put all VLANs for this dev in the down state too. */
636 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 639 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
@@ -693,9 +696,10 @@ out:
693 */ 696 */
694static int vlan_ioctl_handler(void __user *arg) 697static int vlan_ioctl_handler(void __user *arg)
695{ 698{
696 int err = 0; 699 int err;
697 unsigned short vid = 0; 700 unsigned short vid = 0;
698 struct vlan_ioctl_args args; 701 struct vlan_ioctl_args args;
702 struct net_device *dev = NULL;
699 703
700 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args))) 704 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
701 return -EFAULT; 705 return -EFAULT;
@@ -708,35 +712,61 @@ static int vlan_ioctl_handler(void __user *arg)
708 printk(VLAN_DBG "%s: args.cmd: %x\n", __FUNCTION__, args.cmd); 712 printk(VLAN_DBG "%s: args.cmd: %x\n", __FUNCTION__, args.cmd);
709#endif 713#endif
710 714
715 rtnl_lock();
716
717 switch (args.cmd) {
718 case SET_VLAN_INGRESS_PRIORITY_CMD:
719 case SET_VLAN_EGRESS_PRIORITY_CMD:
720 case SET_VLAN_FLAG_CMD:
721 case ADD_VLAN_CMD:
722 case DEL_VLAN_CMD:
723 case GET_VLAN_REALDEV_NAME_CMD:
724 case GET_VLAN_VID_CMD:
725 err = -ENODEV;
726 dev = __dev_get_by_name(args.device1);
727 if (!dev)
728 goto out;
729
730 err = -EINVAL;
731 if (args.cmd != ADD_VLAN_CMD &&
732 !(dev->priv_flags & IFF_802_1Q_VLAN))
733 goto out;
734 }
735
711 switch (args.cmd) { 736 switch (args.cmd) {
712 case SET_VLAN_INGRESS_PRIORITY_CMD: 737 case SET_VLAN_INGRESS_PRIORITY_CMD:
738 err = -EPERM;
713 if (!capable(CAP_NET_ADMIN)) 739 if (!capable(CAP_NET_ADMIN))
714 return -EPERM; 740 break;
715 err = vlan_dev_set_ingress_priority(args.device1, 741 vlan_dev_set_ingress_priority(dev,
716 args.u.skb_priority, 742 args.u.skb_priority,
717 args.vlan_qos); 743 args.vlan_qos);
718 break; 744 break;
719 745
720 case SET_VLAN_EGRESS_PRIORITY_CMD: 746 case SET_VLAN_EGRESS_PRIORITY_CMD:
747 err = -EPERM;
721 if (!capable(CAP_NET_ADMIN)) 748 if (!capable(CAP_NET_ADMIN))
722 return -EPERM; 749 break;
723 err = vlan_dev_set_egress_priority(args.device1, 750 err = vlan_dev_set_egress_priority(dev,
724 args.u.skb_priority, 751 args.u.skb_priority,
725 args.vlan_qos); 752 args.vlan_qos);
726 break; 753 break;
727 754
728 case SET_VLAN_FLAG_CMD: 755 case SET_VLAN_FLAG_CMD:
756 err = -EPERM;
729 if (!capable(CAP_NET_ADMIN)) 757 if (!capable(CAP_NET_ADMIN))
730 return -EPERM; 758 break;
731 err = vlan_dev_set_vlan_flag(args.device1, 759 err = vlan_dev_set_vlan_flag(dev,
732 args.u.flag, 760 args.u.flag,
733 args.vlan_qos); 761 args.vlan_qos);
734 break; 762 break;
735 763
736 case SET_VLAN_NAME_TYPE_CMD: 764 case SET_VLAN_NAME_TYPE_CMD:
765 err = -EPERM;
737 if (!capable(CAP_NET_ADMIN)) 766 if (!capable(CAP_NET_ADMIN))
738 return -EPERM; 767 return -EPERM;
739 if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { 768 if ((args.u.name_type >= 0) &&
769 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
740 vlan_name_type = args.u.name_type; 770 vlan_name_type = args.u.name_type;
741 err = 0; 771 err = 0;
742 } else { 772 } else {
@@ -745,26 +775,17 @@ static int vlan_ioctl_handler(void __user *arg)
745 break; 775 break;
746 776
747 case ADD_VLAN_CMD: 777 case ADD_VLAN_CMD:
778 err = -EPERM;
748 if (!capable(CAP_NET_ADMIN)) 779 if (!capable(CAP_NET_ADMIN))
749 return -EPERM; 780 break;
750 /* we have been given the name of the Ethernet Device we want to 781 err = register_vlan_device(dev, args.u.VID);
751 * talk to: args.dev1 We also have the
752 * VLAN ID: args.u.VID
753 */
754 if (register_vlan_device(args.device1, args.u.VID)) {
755 err = 0;
756 } else {
757 err = -EINVAL;
758 }
759 break; 782 break;
760 783
761 case DEL_VLAN_CMD: 784 case DEL_VLAN_CMD:
785 err = -EPERM;
762 if (!capable(CAP_NET_ADMIN)) 786 if (!capable(CAP_NET_ADMIN))
763 return -EPERM; 787 break;
764 /* Here, the args.dev1 is the actual VLAN we want 788 err = unregister_vlan_device(dev);
765 * to get rid of.
766 */
767 err = unregister_vlan_device(args.device1);
768 break; 789 break;
769 790
770 case GET_VLAN_INGRESS_PRIORITY_CMD: 791 case GET_VLAN_INGRESS_PRIORITY_CMD:
@@ -788,9 +809,7 @@ static int vlan_ioctl_handler(void __user *arg)
788 err = -EINVAL; 809 err = -EINVAL;
789 break; 810 break;
790 case GET_VLAN_REALDEV_NAME_CMD: 811 case GET_VLAN_REALDEV_NAME_CMD:
791 err = vlan_dev_get_realdev_name(args.device1, args.u.device2); 812 vlan_dev_get_realdev_name(dev, args.u.device2);
792 if (err)
793 goto out;
794 if (copy_to_user(arg, &args, 813 if (copy_to_user(arg, &args,
795 sizeof(struct vlan_ioctl_args))) { 814 sizeof(struct vlan_ioctl_args))) {
796 err = -EFAULT; 815 err = -EFAULT;
@@ -798,9 +817,7 @@ static int vlan_ioctl_handler(void __user *arg)
798 break; 817 break;
799 818
800 case GET_VLAN_VID_CMD: 819 case GET_VLAN_VID_CMD:
801 err = vlan_dev_get_vid(args.device1, &vid); 820 vlan_dev_get_vid(dev, &vid);
802 if (err)
803 goto out;
804 args.u.VID = vid; 821 args.u.VID = vid;
805 if (copy_to_user(arg, &args, 822 if (copy_to_user(arg, &args,
806 sizeof(struct vlan_ioctl_args))) { 823 sizeof(struct vlan_ioctl_args))) {
@@ -812,9 +829,11 @@ static int vlan_ioctl_handler(void __user *arg)
812 /* pass on to underlying device instead?? */ 829 /* pass on to underlying device instead?? */
813 printk(VLAN_DBG "%s: Unknown VLAN CMD: %x \n", 830 printk(VLAN_DBG "%s: Unknown VLAN CMD: %x \n",
814 __FUNCTION__, args.cmd); 831 __FUNCTION__, args.cmd);
815 return -EINVAL; 832 err = -EINVAL;
833 break;
816 } 834 }
817out: 835out:
836 rtnl_unlock();
818 return err; 837 return err;
819} 838}
820 839
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 1976cdba8f72..62ce1c519aab 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -58,15 +58,27 @@ int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
58int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); 58int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
59int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); 59int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
60int vlan_dev_change_mtu(struct net_device *dev, int new_mtu); 60int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
61int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
62int vlan_dev_open(struct net_device* dev); 61int vlan_dev_open(struct net_device* dev);
63int vlan_dev_stop(struct net_device* dev); 62int vlan_dev_stop(struct net_device* dev);
64int vlan_dev_ioctl(struct net_device* dev, struct ifreq *ifr, int cmd); 63int vlan_dev_ioctl(struct net_device* dev, struct ifreq *ifr, int cmd);
65int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio); 64void vlan_dev_set_ingress_priority(const struct net_device *dev,
66int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio); 65 u32 skb_prio, short vlan_prio);
67int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val); 66int vlan_dev_set_egress_priority(const struct net_device *dev,
68int vlan_dev_get_realdev_name(const char* dev_name, char* result); 67 u32 skb_prio, short vlan_prio);
69int vlan_dev_get_vid(const char* dev_name, unsigned short* result); 68int vlan_dev_set_vlan_flag(const struct net_device *dev,
69 u32 flag, short flag_val);
70void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
71void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result);
70void vlan_dev_set_multicast_list(struct net_device *vlan_dev); 72void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
71 73
74int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id);
75void vlan_setup(struct net_device *dev);
76int register_vlan_dev(struct net_device *dev);
77int unregister_vlan_device(struct net_device *dev);
78
79int vlan_netlink_init(void);
80void vlan_netlink_fini(void);
81
82extern struct rtnl_link_ops vlan_link_ops;
83
72#endif /* !(__BEN_VLAN_802_1Q_INC__) */ 84#endif /* !(__BEN_VLAN_802_1Q_INC__) */
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ec46084f44b4..d4a62d1b52b4 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -73,7 +73,7 @@ int vlan_dev_rebuild_header(struct sk_buff *skb)
73 73
74static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) 74static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
75{ 75{
76 if (VLAN_DEV_INFO(skb->dev)->flags & 1) { 76 if (VLAN_DEV_INFO(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
77 if (skb_shared(skb) || skb_cloned(skb)) { 77 if (skb_shared(skb) || skb_cloned(skb)) {
78 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 78 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
79 kfree_skb(skb); 79 kfree_skb(skb);
@@ -350,7 +350,8 @@ int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
350 * header shuffling in the hard_start_xmit. Users can turn off this 350 * header shuffling in the hard_start_xmit. Users can turn off this
351 * REORDER behaviour with the vconfig tool. 351 * REORDER behaviour with the vconfig tool.
352 */ 352 */
353 build_vlan_header = ((VLAN_DEV_INFO(dev)->flags & 1) == 0); 353 if (!(VLAN_DEV_INFO(dev)->flags & VLAN_FLAG_REORDER_HDR))
354 build_vlan_header = 1;
354 355
355 if (build_vlan_header) { 356 if (build_vlan_header) {
356 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); 357 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
@@ -534,172 +535,81 @@ int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
534 return 0; 535 return 0;
535} 536}
536 537
537int vlan_dev_set_ingress_priority(char *dev_name, __u32 skb_prio, short vlan_prio) 538void vlan_dev_set_ingress_priority(const struct net_device *dev,
539 u32 skb_prio, short vlan_prio)
538{ 540{
539 struct net_device *dev = dev_get_by_name(dev_name); 541 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
540 542
541 if (dev) { 543 if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
542 if (dev->priv_flags & IFF_802_1Q_VLAN) { 544 vlan->nr_ingress_mappings--;
543 /* see if a priority mapping exists.. */ 545 else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
544 VLAN_DEV_INFO(dev)->ingress_priority_map[vlan_prio & 0x7] = skb_prio; 546 vlan->nr_ingress_mappings++;
545 dev_put(dev);
546 return 0;
547 }
548 547
549 dev_put(dev); 548 vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
550 }
551 return -EINVAL;
552} 549}
553 550
554int vlan_dev_set_egress_priority(char *dev_name, __u32 skb_prio, short vlan_prio) 551int vlan_dev_set_egress_priority(const struct net_device *dev,
552 u32 skb_prio, short vlan_prio)
555{ 553{
556 struct net_device *dev = dev_get_by_name(dev_name); 554 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
557 struct vlan_priority_tci_mapping *mp = NULL; 555 struct vlan_priority_tci_mapping *mp = NULL;
558 struct vlan_priority_tci_mapping *np; 556 struct vlan_priority_tci_mapping *np;
557 u32 vlan_qos = (vlan_prio << 13) & 0xE000;
559 558
560 if (dev) { 559 /* See if a priority mapping exists.. */
561 if (dev->priv_flags & IFF_802_1Q_VLAN) { 560 mp = vlan->egress_priority_map[skb_prio & 0xF];
562 /* See if a priority mapping exists.. */ 561 while (mp) {
563 mp = VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF]; 562 if (mp->priority == skb_prio) {
564 while (mp) { 563 if (mp->vlan_qos && !vlan_qos)
565 if (mp->priority == skb_prio) { 564 vlan->nr_egress_mappings--;
566 mp->vlan_qos = ((vlan_prio << 13) & 0xE000); 565 else if (!mp->vlan_qos && vlan_qos)
567 dev_put(dev); 566 vlan->nr_egress_mappings++;
568 return 0; 567 mp->vlan_qos = vlan_qos;
569 } 568 return 0;
570 mp = mp->next;
571 }
572
573 /* Create a new mapping then. */
574 mp = VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF];
575 np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
576 if (np) {
577 np->next = mp;
578 np->priority = skb_prio;
579 np->vlan_qos = ((vlan_prio << 13) & 0xE000);
580 VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF] = np;
581 dev_put(dev);
582 return 0;
583 } else {
584 dev_put(dev);
585 return -ENOBUFS;
586 }
587 }
588 dev_put(dev);
589 }
590 return -EINVAL;
591}
592
593/* Flags are defined in the vlan_dev_info class in include/linux/if_vlan.h file. */
594int vlan_dev_set_vlan_flag(char *dev_name, __u32 flag, short flag_val)
595{
596 struct net_device *dev = dev_get_by_name(dev_name);
597
598 if (dev) {
599 if (dev->priv_flags & IFF_802_1Q_VLAN) {
600 /* verify flag is supported */
601 if (flag == 1) {
602 if (flag_val) {
603 VLAN_DEV_INFO(dev)->flags |= 1;
604 } else {
605 VLAN_DEV_INFO(dev)->flags &= ~1;
606 }
607 dev_put(dev);
608 return 0;
609 } else {
610 printk(KERN_ERR "%s: flag %i is not valid.\n",
611 __FUNCTION__, (int)(flag));
612 dev_put(dev);
613 return -EINVAL;
614 }
615 } else {
616 printk(KERN_ERR
617 "%s: %s is not a vlan device, priv_flags: %hX.\n",
618 __FUNCTION__, dev->name, dev->priv_flags);
619 dev_put(dev);
620 } 569 }
621 } else { 570 mp = mp->next;
622 printk(KERN_ERR "%s: Could not find device: %s\n",
623 __FUNCTION__, dev_name);
624 } 571 }
625 572
626 return -EINVAL; 573 /* Create a new mapping then. */
574 mp = vlan->egress_priority_map[skb_prio & 0xF];
575 np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
576 if (!np)
577 return -ENOBUFS;
578
579 np->next = mp;
580 np->priority = skb_prio;
581 np->vlan_qos = vlan_qos;
582 vlan->egress_priority_map[skb_prio & 0xF] = np;
583 if (vlan_qos)
584 vlan->nr_egress_mappings++;
585 return 0;
627} 586}
628 587
629 588/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
630int vlan_dev_get_realdev_name(const char *dev_name, char* result) 589int vlan_dev_set_vlan_flag(const struct net_device *dev,
590 u32 flag, short flag_val)
631{ 591{
632 struct net_device *dev = dev_get_by_name(dev_name); 592 /* verify flag is supported */
633 int rv = 0; 593 if (flag == VLAN_FLAG_REORDER_HDR) {
634 if (dev) { 594 if (flag_val) {
635 if (dev->priv_flags & IFF_802_1Q_VLAN) { 595 VLAN_DEV_INFO(dev)->flags |= VLAN_FLAG_REORDER_HDR;
636 strncpy(result, VLAN_DEV_INFO(dev)->real_dev->name, 23);
637 rv = 0;
638 } else { 596 } else {
639 rv = -EINVAL; 597 VLAN_DEV_INFO(dev)->flags &= ~VLAN_FLAG_REORDER_HDR;
640 } 598 }
641 dev_put(dev); 599 return 0;
642 } else {
643 rv = -ENODEV;
644 } 600 }
645 return rv; 601 printk(KERN_ERR "%s: flag %i is not valid.\n", __FUNCTION__, flag);
602 return -EINVAL;
646} 603}
647 604
648int vlan_dev_get_vid(const char *dev_name, unsigned short* result) 605void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
649{ 606{
650 struct net_device *dev = dev_get_by_name(dev_name); 607 strncpy(result, VLAN_DEV_INFO(dev)->real_dev->name, 23);
651 int rv = 0;
652 if (dev) {
653 if (dev->priv_flags & IFF_802_1Q_VLAN) {
654 *result = VLAN_DEV_INFO(dev)->vlan_id;
655 rv = 0;
656 } else {
657 rv = -EINVAL;
658 }
659 dev_put(dev);
660 } else {
661 rv = -ENODEV;
662 }
663 return rv;
664} 608}
665 609
666 610void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result)
667int vlan_dev_set_mac_address(struct net_device *dev, void *addr_struct_p)
668{ 611{
669 struct sockaddr *addr = (struct sockaddr *)(addr_struct_p); 612 *result = VLAN_DEV_INFO(dev)->vlan_id;
670 int i;
671
672 if (netif_running(dev))
673 return -EBUSY;
674
675 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
676
677 printk("%s: Setting MAC address to ", dev->name);
678 for (i = 0; i < 6; i++)
679 printk(" %2.2x", dev->dev_addr[i]);
680 printk(".\n");
681
682 if (memcmp(VLAN_DEV_INFO(dev)->real_dev->dev_addr,
683 dev->dev_addr,
684 dev->addr_len) != 0) {
685 if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_PROMISC)) {
686 int flgs = VLAN_DEV_INFO(dev)->real_dev->flags;
687
688 /* Increment our in-use promiscuity counter */
689 dev_set_promiscuity(VLAN_DEV_INFO(dev)->real_dev, 1);
690
691 /* Make PROMISC visible to the user. */
692 flgs |= IFF_PROMISC;
693 printk("VLAN (%s): Setting underlying device (%s) to promiscious mode.\n",
694 dev->name, VLAN_DEV_INFO(dev)->real_dev->name);
695 dev_change_flags(VLAN_DEV_INFO(dev)->real_dev, flgs);
696 }
697 } else {
698 printk("VLAN (%s): Underlying device (%s) has same MAC, not checking promiscious mode.\n",
699 dev->name, VLAN_DEV_INFO(dev)->real_dev->name);
700 }
701
702 return 0;
703} 613}
704 614
705static inline int vlan_dmi_equals(struct dev_mc_list *dmi1, 615static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
@@ -788,15 +698,32 @@ static void vlan_flush_mc_list(struct net_device *dev)
788 698
789int vlan_dev_open(struct net_device *dev) 699int vlan_dev_open(struct net_device *dev)
790{ 700{
791 if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_UP)) 701 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
702 struct net_device *real_dev = vlan->real_dev;
703 int err;
704
705 if (!(real_dev->flags & IFF_UP))
792 return -ENETDOWN; 706 return -ENETDOWN;
793 707
708 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
709 err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN);
710 if (err < 0)
711 return err;
712 }
713 memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN);
714
794 return 0; 715 return 0;
795} 716}
796 717
797int vlan_dev_stop(struct net_device *dev) 718int vlan_dev_stop(struct net_device *dev)
798{ 719{
720 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
721
799 vlan_flush_mc_list(dev); 722 vlan_flush_mc_list(dev);
723
724 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
725 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len);
726
800 return 0; 727 return 0;
801} 728}
802 729
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
new file mode 100644
index 000000000000..6cdd1e015e2d
--- /dev/null
+++ b/net/8021q/vlan_netlink.c
@@ -0,0 +1,243 @@
1/*
2 * VLAN netlink control interface
3 *
4 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/netdevice.h>
13#include <linux/if_vlan.h>
14#include <net/netlink.h>
15#include <net/rtnetlink.h>
16#include "vlan.h"
17
18
19static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
20 [IFLA_VLAN_ID] = { .type = NLA_U16 },
21 [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
22 [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
23 [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
24};
25
26static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
27 [IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) },
28};
29
30
31static inline int vlan_validate_qos_map(struct nlattr *attr)
32{
33 if (!attr)
34 return 0;
35 return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy);
36}
37
38static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
39{
40 struct ifla_vlan_flags *flags;
41 u16 id;
42 int err;
43
44 if (tb[IFLA_ADDRESS]) {
45 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
46 return -EINVAL;
47 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
48 return -EADDRNOTAVAIL;
49 }
50
51 if (!data)
52 return -EINVAL;
53
54 if (data[IFLA_VLAN_ID]) {
55 id = nla_get_u16(data[IFLA_VLAN_ID]);
56 if (id >= VLAN_VID_MASK)
57 return -ERANGE;
58 }
59 if (data[IFLA_VLAN_FLAGS]) {
60 flags = nla_data(data[IFLA_VLAN_FLAGS]);
61 if ((flags->flags & flags->mask) & ~VLAN_FLAG_REORDER_HDR)
62 return -EINVAL;
63 }
64
65 err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]);
66 if (err < 0)
67 return err;
68 err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]);
69 if (err < 0)
70 return err;
71 return 0;
72}
73
74static int vlan_changelink(struct net_device *dev,
75 struct nlattr *tb[], struct nlattr *data[])
76{
77 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
78 struct ifla_vlan_flags *flags;
79 struct ifla_vlan_qos_mapping *m;
80 struct nlattr *attr;
81 int rem;
82
83 if (data[IFLA_VLAN_FLAGS]) {
84 flags = nla_data(data[IFLA_VLAN_FLAGS]);
85 vlan->flags = (vlan->flags & ~flags->mask) |
86 (flags->flags & flags->mask);
87 }
88 if (data[IFLA_VLAN_INGRESS_QOS]) {
89 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
90 m = nla_data(attr);
91 vlan_dev_set_ingress_priority(dev, m->to, m->from);
92 }
93 }
94 if (data[IFLA_VLAN_EGRESS_QOS]) {
95 nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
96 m = nla_data(attr);
97 vlan_dev_set_egress_priority(dev, m->from, m->to);
98 }
99 }
100 return 0;
101}
102
103static int vlan_newlink(struct net_device *dev,
104 struct nlattr *tb[], struct nlattr *data[])
105{
106 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
107 struct net_device *real_dev;
108 int err;
109
110 if (!data[IFLA_VLAN_ID])
111 return -EINVAL;
112
113 if (!tb[IFLA_LINK])
114 return -EINVAL;
115 real_dev = __dev_get_by_index(nla_get_u32(tb[IFLA_LINK]));
116 if (!real_dev)
117 return -ENODEV;
118
119 vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
120 vlan->real_dev = real_dev;
121 vlan->flags = VLAN_FLAG_REORDER_HDR;
122
123 err = vlan_check_real_dev(real_dev, vlan->vlan_id);
124 if (err < 0)
125 return err;
126
127 if (!tb[IFLA_MTU])
128 dev->mtu = real_dev->mtu;
129 else if (dev->mtu > real_dev->mtu)
130 return -EINVAL;
131
132 err = vlan_changelink(dev, tb, data);
133 if (err < 0)
134 return err;
135
136 return register_vlan_dev(dev);
137}
138
139static void vlan_dellink(struct net_device *dev)
140{
141 unregister_vlan_device(dev);
142}
143
144static inline size_t vlan_qos_map_size(unsigned int n)
145{
146 if (n == 0)
147 return 0;
148 /* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */
149 return nla_total_size(sizeof(struct nlattr)) +
150 nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n;
151}
152
153static size_t vlan_get_size(const struct net_device *dev)
154{
155 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
156
157 return nla_total_size(2) + /* IFLA_VLAN_ID */
158 vlan_qos_map_size(vlan->nr_ingress_mappings) +
159 vlan_qos_map_size(vlan->nr_egress_mappings);
160}
161
162static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
163{
164 struct vlan_dev_info *vlan = VLAN_DEV_INFO(dev);
165 struct vlan_priority_tci_mapping *pm;
166 struct ifla_vlan_flags f;
167 struct ifla_vlan_qos_mapping m;
168 struct nlattr *nest;
169 unsigned int i;
170
171 NLA_PUT_U16(skb, IFLA_VLAN_ID, VLAN_DEV_INFO(dev)->vlan_id);
172 if (vlan->flags) {
173 f.flags = vlan->flags;
174 f.mask = ~0;
175 NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f);
176 }
177 if (vlan->nr_ingress_mappings) {
178 nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
179 if (nest == NULL)
180 goto nla_put_failure;
181
182 for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) {
183 if (!vlan->ingress_priority_map[i])
184 continue;
185
186 m.from = i;
187 m.to = vlan->ingress_priority_map[i];
188 NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
189 sizeof(m), &m);
190 }
191 nla_nest_end(skb, nest);
192 }
193
194 if (vlan->nr_egress_mappings) {
195 nest = nla_nest_start(skb, IFLA_VLAN_EGRESS_QOS);
196 if (nest == NULL)
197 goto nla_put_failure;
198
199 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
200 for (pm = vlan->egress_priority_map[i]; pm;
201 pm = pm->next) {
202 if (!pm->vlan_qos)
203 continue;
204
205 m.from = pm->priority;
206 m.to = (pm->vlan_qos >> 13) & 0x7;
207 NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
208 sizeof(m), &m);
209 }
210 }
211 nla_nest_end(skb, nest);
212 }
213 return 0;
214
215nla_put_failure:
216 return -EMSGSIZE;
217}
218
219struct rtnl_link_ops vlan_link_ops __read_mostly = {
220 .kind = "vlan",
221 .maxtype = IFLA_VLAN_MAX,
222 .policy = vlan_policy,
223 .priv_size = sizeof(struct vlan_dev_info),
224 .setup = vlan_setup,
225 .validate = vlan_validate,
226 .newlink = vlan_newlink,
227 .changelink = vlan_changelink,
228 .dellink = vlan_dellink,
229 .get_size = vlan_get_size,
230 .fill_info = vlan_fill_info,
231};
232
233int __init vlan_netlink_init(void)
234{
235 return rtnl_link_register(&vlan_link_ops);
236}
237
238void __exit vlan_netlink_fini(void)
239{
240 rtnl_link_unregister(&vlan_link_ops);
241}
242
243MODULE_ALIAS_RTNL_LINK("vlan");
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index d216a64421cd..c0040c9064a1 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -69,7 +69,7 @@ static const char name_conf[] = "config";
69 * Generic /proc/net/vlan/<file> file and inode operations 69 * Generic /proc/net/vlan/<file> file and inode operations
70 */ 70 */
71 71
72static struct seq_operations vlan_seq_ops = { 72static const struct seq_operations vlan_seq_ops = {
73 .start = vlan_seq_start, 73 .start = vlan_seq_start,
74 .next = vlan_seq_next, 74 .next = vlan_seq_next,
75 .stop = vlan_seq_stop, 75 .stop = vlan_seq_stop,
@@ -342,7 +342,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
342 seq_printf(seq, "Device: %s", dev_info->real_dev->name); 342 seq_printf(seq, "Device: %s", dev_info->real_dev->name);
343 /* now show all PRIORITY mappings relating to this VLAN */ 343 /* now show all PRIORITY mappings relating to this VLAN */
344 seq_printf(seq, 344 seq_printf(seq,
345 "\nINGRESS priority mappings: 0:%lu 1:%lu 2:%lu 3:%lu 4:%lu 5:%lu 6:%lu 7:%lu\n", 345 "\nINGRESS priority mappings: 0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n",
346 dev_info->ingress_priority_map[0], 346 dev_info->ingress_priority_map[0],
347 dev_info->ingress_priority_map[1], 347 dev_info->ingress_priority_map[1],
348 dev_info->ingress_priority_map[2], 348 dev_info->ingress_priority_map[2],
@@ -357,7 +357,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
357 const struct vlan_priority_tci_mapping *mp 357 const struct vlan_priority_tci_mapping *mp
358 = dev_info->egress_priority_map[i]; 358 = dev_info->egress_priority_map[i];
359 while (mp) { 359 while (mp) {
360 seq_printf(seq, "%lu:%hu ", 360 seq_printf(seq, "%u:%hu ",
361 mp->priority, ((mp->vlan_qos >> 13) & 0x7)); 361 mp->priority, ((mp->vlan_qos >> 13) & 0x7));
362 mp = mp->next; 362 mp = mp->next;
363 } 363 }
diff --git a/net/Makefile b/net/Makefile
index 34e5b2d7f877..a87a88963432 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -37,7 +37,6 @@ obj-$(CONFIG_AX25) += ax25/
37obj-$(CONFIG_IRDA) += irda/ 37obj-$(CONFIG_IRDA) += irda/
38obj-$(CONFIG_BT) += bluetooth/ 38obj-$(CONFIG_BT) += bluetooth/
39obj-$(CONFIG_SUNRPC) += sunrpc/ 39obj-$(CONFIG_SUNRPC) += sunrpc/
40obj-$(CONFIG_RXRPC) += rxrpc/
41obj-$(CONFIG_AF_RXRPC) += rxrpc/ 40obj-$(CONFIG_AF_RXRPC) += rxrpc/
42obj-$(CONFIG_ATM) += atm/ 41obj-$(CONFIG_ATM) += atm/
43obj-$(CONFIG_DECNET) += decnet/ 42obj-$(CONFIG_DECNET) += decnet/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 5ef6a238bdbc..3d1655f98388 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -1024,7 +1024,7 @@ static int aarp_seq_show(struct seq_file *seq, void *v)
1024 return 0; 1024 return 0;
1025} 1025}
1026 1026
1027static struct seq_operations aarp_seq_ops = { 1027static const struct seq_operations aarp_seq_ops = {
1028 .start = aarp_seq_start, 1028 .start = aarp_seq_start,
1029 .next = aarp_seq_next, 1029 .next = aarp_seq_next,
1030 .stop = aarp_seq_stop, 1030 .stop = aarp_seq_stop,
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 57ff8122b5c5..87a582cc8111 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -204,21 +204,21 @@ out:
204 return 0; 204 return 0;
205} 205}
206 206
207static struct seq_operations atalk_seq_interface_ops = { 207static const struct seq_operations atalk_seq_interface_ops = {
208 .start = atalk_seq_interface_start, 208 .start = atalk_seq_interface_start,
209 .next = atalk_seq_interface_next, 209 .next = atalk_seq_interface_next,
210 .stop = atalk_seq_interface_stop, 210 .stop = atalk_seq_interface_stop,
211 .show = atalk_seq_interface_show, 211 .show = atalk_seq_interface_show,
212}; 212};
213 213
214static struct seq_operations atalk_seq_route_ops = { 214static const struct seq_operations atalk_seq_route_ops = {
215 .start = atalk_seq_route_start, 215 .start = atalk_seq_route_start,
216 .next = atalk_seq_route_next, 216 .next = atalk_seq_route_next,
217 .stop = atalk_seq_route_stop, 217 .stop = atalk_seq_route_stop,
218 .show = atalk_seq_route_show, 218 .show = atalk_seq_route_show,
219}; 219};
220 220
221static struct seq_operations atalk_seq_socket_ops = { 221static const struct seq_operations atalk_seq_socket_ops = {
222 .start = atalk_seq_socket_start, 222 .start = atalk_seq_socket_start,
223 .next = atalk_seq_socket_next, 223 .next = atalk_seq_socket_next,
224 .stop = atalk_seq_socket_stop, 224 .stop = atalk_seq_socket_stop,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 0e9f00c5c899..faa6aaf67563 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -699,28 +699,13 @@ static struct atm_ioctl br2684_ioctl_ops = {
699#ifdef CONFIG_PROC_FS 699#ifdef CONFIG_PROC_FS
700static void *br2684_seq_start(struct seq_file *seq, loff_t *pos) 700static void *br2684_seq_start(struct seq_file *seq, loff_t *pos)
701{ 701{
702 loff_t offs = 0;
703 struct br2684_dev *brd;
704
705 read_lock(&devs_lock); 702 read_lock(&devs_lock);
706 703 return seq_list_start(&br2684_devs, *pos);
707 list_for_each_entry(brd, &br2684_devs, br2684_devs) {
708 if (offs == *pos)
709 return brd;
710 ++offs;
711 }
712 return NULL;
713} 704}
714 705
715static void *br2684_seq_next(struct seq_file *seq, void *v, loff_t *pos) 706static void *br2684_seq_next(struct seq_file *seq, void *v, loff_t *pos)
716{ 707{
717 struct br2684_dev *brd = v; 708 return seq_list_next(v, &br2684_devs, pos);
718
719 ++*pos;
720
721 brd = list_entry(brd->br2684_devs.next,
722 struct br2684_dev, br2684_devs);
723 return (&brd->br2684_devs != &br2684_devs) ? brd : NULL;
724} 709}
725 710
726static void br2684_seq_stop(struct seq_file *seq, void *v) 711static void br2684_seq_stop(struct seq_file *seq, void *v)
@@ -730,7 +715,8 @@ static void br2684_seq_stop(struct seq_file *seq, void *v)
730 715
731static int br2684_seq_show(struct seq_file *seq, void *v) 716static int br2684_seq_show(struct seq_file *seq, void *v)
732{ 717{
733 const struct br2684_dev *brdev = v; 718 const struct br2684_dev *brdev = list_entry(v, struct br2684_dev,
719 br2684_devs);
734 const struct net_device *net_dev = brdev->net_dev; 720 const struct net_device *net_dev = brdev->net_dev;
735 const struct br2684_vcc *brvcc; 721 const struct br2684_vcc *brvcc;
736 722
@@ -772,7 +758,7 @@ static int br2684_seq_show(struct seq_file *seq, void *v)
772 return 0; 758 return 0;
773} 759}
774 760
775static struct seq_operations br2684_seq_ops = { 761static const struct seq_operations br2684_seq_ops = {
776 .start = br2684_seq_start, 762 .start = br2684_seq_start,
777 .next = br2684_seq_next, 763 .next = br2684_seq_next,
778 .stop = br2684_seq_stop, 764 .stop = br2684_seq_stop,
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 876b77f14745..ecf0f79b94ae 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -928,7 +928,7 @@ static int clip_seq_show(struct seq_file *seq, void *v)
928 return 0; 928 return 0;
929} 929}
930 930
931static struct seq_operations arp_seq_ops = { 931static const struct seq_operations arp_seq_ops = {
932 .start = clip_seq_start, 932 .start = clip_seq_start,
933 .next = neigh_seq_next, 933 .next = neigh_seq_next,
934 .stop = neigh_seq_stop, 934 .stop = neigh_seq_stop,
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 4dc5f2b8c43c..2770fb451ae8 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1174,7 +1174,7 @@ static int lec_seq_show(struct seq_file *seq, void *v)
1174 return 0; 1174 return 0;
1175} 1175}
1176 1176
1177static struct seq_operations lec_seq_ops = { 1177static const struct seq_operations lec_seq_ops = {
1178 .start = lec_seq_start, 1178 .start = lec_seq_start,
1179 .next = lec_seq_next, 1179 .next = lec_seq_next,
1180 .stop = lec_seq_stop, 1180 .stop = lec_seq_stop,
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 4b05cbec7a58..91f3ffc90dbd 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -177,7 +177,7 @@ static int mpc_show(struct seq_file *m, void *v)
177 return 0; 177 return 0;
178} 178}
179 179
180static struct seq_operations mpc_op = { 180static const struct seq_operations mpc_op = {
181 .start = mpc_start, 181 .start = mpc_start,
182 .next = mpc_next, 182 .next = mpc_next,
183 .stop = mpc_stop, 183 .stop = mpc_stop,
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 9e61e512f667..88154da62cd3 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -260,7 +260,7 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
260 return 0; 260 return 0;
261} 261}
262 262
263static struct seq_operations atm_dev_seq_ops = { 263static const struct seq_operations atm_dev_seq_ops = {
264 .start = atm_dev_seq_start, 264 .start = atm_dev_seq_start,
265 .next = atm_dev_seq_next, 265 .next = atm_dev_seq_next,
266 .stop = atm_dev_seq_stop, 266 .stop = atm_dev_seq_stop,
@@ -295,7 +295,7 @@ static int pvc_seq_show(struct seq_file *seq, void *v)
295 return 0; 295 return 0;
296} 296}
297 297
298static struct seq_operations pvc_seq_ops = { 298static const struct seq_operations pvc_seq_ops = {
299 .start = vcc_seq_start, 299 .start = vcc_seq_start,
300 .next = vcc_seq_next, 300 .next = vcc_seq_next,
301 .stop = vcc_seq_stop, 301 .stop = vcc_seq_stop,
@@ -329,7 +329,7 @@ static int vcc_seq_show(struct seq_file *seq, void *v)
329 return 0; 329 return 0;
330} 330}
331 331
332static struct seq_operations vcc_seq_ops = { 332static const struct seq_operations vcc_seq_ops = {
333 .start = vcc_seq_start, 333 .start = vcc_seq_start,
334 .next = vcc_seq_next, 334 .next = vcc_seq_next,
335 .stop = vcc_seq_stop, 335 .stop = vcc_seq_stop,
@@ -364,7 +364,7 @@ static int svc_seq_show(struct seq_file *seq, void *v)
364 return 0; 364 return 0;
365} 365}
366 366
367static struct seq_operations svc_seq_ops = { 367static const struct seq_operations svc_seq_ops = {
368 .start = vcc_seq_start, 368 .start = vcc_seq_start,
369 .next = vcc_seq_next, 369 .next = vcc_seq_next,
370 .stop = vcc_seq_stop, 370 .stop = vcc_seq_stop,
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 429e13a6c6ad..c83cf8432970 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1924,7 +1924,7 @@ static int ax25_info_show(struct seq_file *seq, void *v)
1924 return 0; 1924 return 0;
1925} 1925}
1926 1926
1927static struct seq_operations ax25_info_seqops = { 1927static const struct seq_operations ax25_info_seqops = {
1928 .start = ax25_info_start, 1928 .start = ax25_info_start,
1929 .next = ax25_info_next, 1929 .next = ax25_info_next,
1930 .stop = ax25_info_stop, 1930 .stop = ax25_info_stop,
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index d65b8e22868d..9ecf6f1df863 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -320,7 +320,7 @@ static int ax25_rt_seq_show(struct seq_file *seq, void *v)
320 return 0; 320 return 0;
321} 321}
322 322
323static struct seq_operations ax25_rt_seqops = { 323static const struct seq_operations ax25_rt_seqops = {
324 .start = ax25_rt_seq_start, 324 .start = ax25_rt_seq_start,
325 .next = ax25_rt_seq_next, 325 .next = ax25_rt_seq_next,
326 .stop = ax25_rt_seq_stop, 326 .stop = ax25_rt_seq_stop,
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 75c76647b2cb..ce0b13d44385 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -185,7 +185,7 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
185 return 0; 185 return 0;
186} 186}
187 187
188static struct seq_operations ax25_uid_seqops = { 188static const struct seq_operations ax25_uid_seqops = {
189 .start = ax25_uid_seq_start, 189 .start = ax25_uid_seq_start,
190 .next = ax25_uid_seq_next, 190 .next = ax25_uid_seq_next,
191 .stop = ax25_uid_seq_stop, 191 .stop = ax25_uid_seq_stop,
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 63980bd6b5f2..5fdfc9a67d39 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -123,8 +123,8 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
123 conn->state = BT_CONNECT; 123 conn->state = BT_CONNECT;
124 conn->out = 1; 124 conn->out = 1;
125 125
126 cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
127 cp.handle = cpu_to_le16(handle); 126 cp.handle = cpu_to_le16(handle);
127 cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
128 128
129 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp); 129 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp);
130} 130}
@@ -220,19 +220,19 @@ int hci_conn_del(struct hci_conn *conn)
220 220
221 del_timer(&conn->disc_timer); 221 del_timer(&conn->disc_timer);
222 222
223 if (conn->type == SCO_LINK) { 223 if (conn->type == ACL_LINK) {
224 struct hci_conn *acl = conn->link;
225 if (acl) {
226 acl->link = NULL;
227 hci_conn_put(acl);
228 }
229 } else {
230 struct hci_conn *sco = conn->link; 224 struct hci_conn *sco = conn->link;
231 if (sco) 225 if (sco)
232 sco->link = NULL; 226 sco->link = NULL;
233 227
234 /* Unacked frames */ 228 /* Unacked frames */
235 hdev->acl_cnt += conn->sent; 229 hdev->acl_cnt += conn->sent;
230 } else {
231 struct hci_conn *acl = conn->link;
232 if (acl) {
233 acl->link = NULL;
234 hci_conn_put(acl);
235 }
236 } 236 }
237 237
238 tasklet_disable(&hdev->tx_task); 238 tasklet_disable(&hdev->tx_task);
@@ -297,9 +297,10 @@ EXPORT_SYMBOL(hci_get_route);
297 297
298/* Create SCO or ACL connection. 298/* Create SCO or ACL connection.
299 * Device _must_ be locked */ 299 * Device _must_ be locked */
300struct hci_conn * hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst) 300struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
301{ 301{
302 struct hci_conn *acl; 302 struct hci_conn *acl;
303 struct hci_conn *sco;
303 304
304 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 305 BT_DBG("%s dst %s", hdev->name, batostr(dst));
305 306
@@ -313,28 +314,26 @@ struct hci_conn * hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
313 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) 314 if (acl->state == BT_OPEN || acl->state == BT_CLOSED)
314 hci_acl_connect(acl); 315 hci_acl_connect(acl);
315 316
316 if (type == SCO_LINK) { 317 if (type == ACL_LINK)
317 struct hci_conn *sco; 318 return acl;
318 319
319 if (!(sco = hci_conn_hash_lookup_ba(hdev, SCO_LINK, dst))) { 320 if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
320 if (!(sco = hci_conn_add(hdev, SCO_LINK, dst))) { 321 if (!(sco = hci_conn_add(hdev, type, dst))) {
321 hci_conn_put(acl); 322 hci_conn_put(acl);
322 return NULL; 323 return NULL;
323 }
324 } 324 }
325 acl->link = sco; 325 }
326 sco->link = acl;
327 326
328 hci_conn_hold(sco); 327 acl->link = sco;
328 sco->link = acl;
329 329
330 if (acl->state == BT_CONNECTED && 330 hci_conn_hold(sco);
331 (sco->state == BT_OPEN || sco->state == BT_CLOSED))
332 hci_add_sco(sco, acl->handle);
333 331
334 return sco; 332 if (acl->state == BT_CONNECTED &&
335 } else { 333 (sco->state == BT_OPEN || sco->state == BT_CLOSED))
336 return acl; 334 hci_add_sco(sco, acl->handle);
337 } 335
336 return sco;
338} 337}
339EXPORT_SYMBOL(hci_connect); 338EXPORT_SYMBOL(hci_connect);
340 339
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index aa4b56a8c3ea..f6d867e0179f 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -826,7 +826,7 @@ EXPORT_SYMBOL(hci_free_dev);
826int hci_register_dev(struct hci_dev *hdev) 826int hci_register_dev(struct hci_dev *hdev)
827{ 827{
828 struct list_head *head = &hci_dev_list, *p; 828 struct list_head *head = &hci_dev_list, *p;
829 int id = 0; 829 int i, id = 0;
830 830
831 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); 831 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
832 832
@@ -851,6 +851,7 @@ int hci_register_dev(struct hci_dev *hdev)
851 851
852 hdev->flags = 0; 852 hdev->flags = 0;
853 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 853 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
854 hdev->esco_type = (ESCO_HV1);
854 hdev->link_mode = (HCI_LM_ACCEPT); 855 hdev->link_mode = (HCI_LM_ACCEPT);
855 856
856 hdev->idle_timeout = 0; 857 hdev->idle_timeout = 0;
@@ -865,6 +866,9 @@ int hci_register_dev(struct hci_dev *hdev)
865 skb_queue_head_init(&hdev->cmd_q); 866 skb_queue_head_init(&hdev->cmd_q);
866 skb_queue_head_init(&hdev->raw_q); 867 skb_queue_head_init(&hdev->raw_q);
867 868
869 for (i = 0; i < 3; i++)
870 hdev->reassembly[i] = NULL;
871
868 init_waitqueue_head(&hdev->req_wait_q); 872 init_waitqueue_head(&hdev->req_wait_q);
869 init_MUTEX(&hdev->req_lock); 873 init_MUTEX(&hdev->req_lock);
870 874
@@ -889,6 +893,8 @@ EXPORT_SYMBOL(hci_register_dev);
889/* Unregister HCI device */ 893/* Unregister HCI device */
890int hci_unregister_dev(struct hci_dev *hdev) 894int hci_unregister_dev(struct hci_dev *hdev)
891{ 895{
896 int i;
897
892 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 898 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
893 899
894 hci_unregister_sysfs(hdev); 900 hci_unregister_sysfs(hdev);
@@ -899,9 +905,13 @@ int hci_unregister_dev(struct hci_dev *hdev)
899 905
900 hci_dev_do_close(hdev); 906 hci_dev_do_close(hdev);
901 907
908 for (i = 0; i < 3; i++)
909 kfree_skb(hdev->reassembly[i]);
910
902 hci_notify(hdev, HCI_DEV_UNREG); 911 hci_notify(hdev, HCI_DEV_UNREG);
903 912
904 __hci_dev_put(hdev); 913 __hci_dev_put(hdev);
914
905 return 0; 915 return 0;
906} 916}
907EXPORT_SYMBOL(hci_unregister_dev); 917EXPORT_SYMBOL(hci_unregister_dev);
@@ -922,6 +932,90 @@ int hci_resume_dev(struct hci_dev *hdev)
922} 932}
923EXPORT_SYMBOL(hci_resume_dev); 933EXPORT_SYMBOL(hci_resume_dev);
924 934
935/* Receive packet type fragment */
936#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
937
938int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
939{
940 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
941 return -EILSEQ;
942
943 while (count) {
944 struct sk_buff *skb = __reassembly(hdev, type);
945 struct { int expect; } *scb;
946 int len = 0;
947
948 if (!skb) {
949 /* Start of the frame */
950
951 switch (type) {
952 case HCI_EVENT_PKT:
953 if (count >= HCI_EVENT_HDR_SIZE) {
954 struct hci_event_hdr *h = data;
955 len = HCI_EVENT_HDR_SIZE + h->plen;
956 } else
957 return -EILSEQ;
958 break;
959
960 case HCI_ACLDATA_PKT:
961 if (count >= HCI_ACL_HDR_SIZE) {
962 struct hci_acl_hdr *h = data;
963 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
964 } else
965 return -EILSEQ;
966 break;
967
968 case HCI_SCODATA_PKT:
969 if (count >= HCI_SCO_HDR_SIZE) {
970 struct hci_sco_hdr *h = data;
971 len = HCI_SCO_HDR_SIZE + h->dlen;
972 } else
973 return -EILSEQ;
974 break;
975 }
976
977 skb = bt_skb_alloc(len, GFP_ATOMIC);
978 if (!skb) {
979 BT_ERR("%s no memory for packet", hdev->name);
980 return -ENOMEM;
981 }
982
983 skb->dev = (void *) hdev;
984 bt_cb(skb)->pkt_type = type;
985
986 __reassembly(hdev, type) = skb;
987
988 scb = (void *) skb->cb;
989 scb->expect = len;
990 } else {
991 /* Continuation */
992
993 scb = (void *) skb->cb;
994 len = scb->expect;
995 }
996
997 len = min(len, count);
998
999 memcpy(skb_put(skb, len), data, len);
1000
1001 scb->expect -= len;
1002
1003 if (scb->expect == 0) {
1004 /* Complete frame */
1005
1006 __reassembly(hdev, type) = NULL;
1007
1008 bt_cb(skb)->pkt_type = type;
1009 hci_recv_frame(skb);
1010 }
1011
1012 count -= len; data += len;
1013 }
1014
1015 return 0;
1016}
1017EXPORT_SYMBOL(hci_recv_fragment);
1018
925/* ---- Interface to upper protocols ---- */ 1019/* ---- Interface to upper protocols ---- */
926 1020
927/* Register/Unregister protocols. 1021/* Register/Unregister protocols.
@@ -1029,7 +1123,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
1029 1123
1030 skb = bt_skb_alloc(len, GFP_ATOMIC); 1124 skb = bt_skb_alloc(len, GFP_ATOMIC);
1031 if (!skb) { 1125 if (!skb) {
1032 BT_ERR("%s Can't allocate memory for HCI command", hdev->name); 1126 BT_ERR("%s no memory for command", hdev->name);
1033 return -ENOMEM; 1127 return -ENOMEM;
1034 } 1128 }
1035 1129
@@ -1161,7 +1255,7 @@ EXPORT_SYMBOL(hci_send_sco);
1161static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 1255static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1162{ 1256{
1163 struct hci_conn_hash *h = &hdev->conn_hash; 1257 struct hci_conn_hash *h = &hdev->conn_hash;
1164 struct hci_conn *conn = NULL; 1258 struct hci_conn *conn = NULL;
1165 int num = 0, min = ~0; 1259 int num = 0, min = ~0;
1166 struct list_head *p; 1260 struct list_head *p;
1167 1261
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 447ba7131220..4baea1e38652 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -350,11 +350,24 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s
350 if (hdev->features[0] & LMP_5SLOT) 350 if (hdev->features[0] & LMP_5SLOT)
351 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 351 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
352 352
353 if (hdev->features[1] & LMP_HV2) 353 if (hdev->features[1] & LMP_HV2) {
354 hdev->pkt_type |= (HCI_HV2); 354 hdev->pkt_type |= (HCI_HV2);
355 hdev->esco_type |= (ESCO_HV2);
356 }
357
358 if (hdev->features[1] & LMP_HV3) {
359 hdev->pkt_type |= (HCI_HV3);
360 hdev->esco_type |= (ESCO_HV3);
361 }
355 362
356 if (hdev->features[1] & LMP_HV3) 363 if (hdev->features[3] & LMP_ESCO)
357 hdev->pkt_type |= (HCI_HV3); 364 hdev->esco_type |= (ESCO_EV3);
365
366 if (hdev->features[4] & LMP_EV4)
367 hdev->esco_type |= (ESCO_EV4);
368
369 if (hdev->features[4] & LMP_EV5)
370 hdev->esco_type |= (ESCO_EV5);
358 371
359 BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, 372 BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name,
360 lf->features[0], lf->features[1], lf->features[2]); 373 lf->features[0], lf->features[1], lf->features[2]);
@@ -881,12 +894,12 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
881 if (conn) { 894 if (conn) {
882 conn->sent -= count; 895 conn->sent -= count;
883 896
884 if (conn->type == SCO_LINK) { 897 if (conn->type == ACL_LINK) {
885 if ((hdev->sco_cnt += count) > hdev->sco_pkts)
886 hdev->sco_cnt = hdev->sco_pkts;
887 } else {
888 if ((hdev->acl_cnt += count) > hdev->acl_pkts) 898 if ((hdev->acl_cnt += count) > hdev->acl_pkts)
889 hdev->acl_cnt = hdev->acl_pkts; 899 hdev->acl_cnt = hdev->acl_pkts;
900 } else {
901 if ((hdev->sco_cnt += count) > hdev->sco_pkts)
902 hdev->sco_cnt = hdev->sco_pkts;
890 } 903 }
891 } 904 }
892 } 905 }
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index b2b1cceb102a..23ba61a13bdd 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -95,6 +95,10 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
95 95
96 BT_DBG("dev %p dlc %p", dev, dlc); 96 BT_DBG("dev %p dlc %p", dev, dlc);
97 97
98 write_lock_bh(&rfcomm_dev_lock);
99 list_del_init(&dev->list);
100 write_unlock_bh(&rfcomm_dev_lock);
101
98 rfcomm_dlc_lock(dlc); 102 rfcomm_dlc_lock(dlc);
99 /* Detach DLC if it's owned by this dev */ 103 /* Detach DLC if it's owned by this dev */
100 if (dlc->owner == dev) 104 if (dlc->owner == dev)
@@ -156,8 +160,13 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
156 read_lock(&rfcomm_dev_lock); 160 read_lock(&rfcomm_dev_lock);
157 161
158 dev = __rfcomm_dev_get(id); 162 dev = __rfcomm_dev_get(id);
159 if (dev) 163
160 rfcomm_dev_hold(dev); 164 if (dev) {
165 if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
166 dev = NULL;
167 else
168 rfcomm_dev_hold(dev);
169 }
161 170
162 read_unlock(&rfcomm_dev_lock); 171 read_unlock(&rfcomm_dev_lock);
163 172
@@ -265,6 +274,12 @@ out:
265 274
266 dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL); 275 dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL);
267 276
277 if (IS_ERR(dev->tty_dev)) {
278 list_del(&dev->list);
279 kfree(dev);
280 return PTR_ERR(dev->tty_dev);
281 }
282
268 return dev->id; 283 return dev->id;
269} 284}
270 285
@@ -272,10 +287,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
272{ 287{
273 BT_DBG("dev %p", dev); 288 BT_DBG("dev %p", dev);
274 289
275 write_lock_bh(&rfcomm_dev_lock); 290 set_bit(RFCOMM_TTY_RELEASED, &dev->flags);
276 list_del_init(&dev->list);
277 write_unlock_bh(&rfcomm_dev_lock);
278
279 rfcomm_dev_put(dev); 291 rfcomm_dev_put(dev);
280} 292}
281 293
@@ -329,7 +341,7 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)
329 if (copy_from_user(&req, arg, sizeof(req))) 341 if (copy_from_user(&req, arg, sizeof(req)))
330 return -EFAULT; 342 return -EFAULT;
331 343
332 BT_DBG("sk %p dev_id %id flags 0x%x", sk, req.dev_id, req.flags); 344 BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags);
333 345
334 if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) 346 if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN))
335 return -EPERM; 347 return -EPERM;
@@ -370,7 +382,7 @@ static int rfcomm_release_dev(void __user *arg)
370 if (copy_from_user(&req, arg, sizeof(req))) 382 if (copy_from_user(&req, arg, sizeof(req)))
371 return -EFAULT; 383 return -EFAULT;
372 384
373 BT_DBG("dev_id %id flags 0x%x", req.dev_id, req.flags); 385 BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags);
374 386
375 if (!(dev = rfcomm_dev_get(req.dev_id))) 387 if (!(dev = rfcomm_dev_get(req.dev_id)))
376 return -ENODEV; 388 return -ENODEV;
@@ -383,6 +395,10 @@ static int rfcomm_release_dev(void __user *arg)
383 if (req.flags & (1 << RFCOMM_HANGUP_NOW)) 395 if (req.flags & (1 << RFCOMM_HANGUP_NOW))
384 rfcomm_dlc_close(dev->dlc, 0); 396 rfcomm_dlc_close(dev->dlc, 0);
385 397
398 /* Shut down TTY synchronously before freeing rfcomm_dev */
399 if (dev->tty)
400 tty_vhangup(dev->tty);
401
386 rfcomm_dev_del(dev); 402 rfcomm_dev_del(dev);
387 rfcomm_dev_put(dev); 403 rfcomm_dev_put(dev);
388 return 0; 404 return 0;
@@ -415,6 +431,8 @@ static int rfcomm_get_dev_list(void __user *arg)
415 431
416 list_for_each(p, &rfcomm_dev_list) { 432 list_for_each(p, &rfcomm_dev_list) {
417 struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list); 433 struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
434 if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
435 continue;
418 (di + n)->id = dev->id; 436 (di + n)->id = dev->id;
419 (di + n)->flags = dev->flags; 437 (di + n)->flags = dev->flags;
420 (di + n)->state = dev->dlc->state; 438 (di + n)->state = dev->dlc->state;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 849deaf14108..7b4ce9113be2 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -368,10 +368,18 @@ void br_features_recompute(struct net_bridge *br)
368 list_for_each_entry(p, &br->port_list, list) { 368 list_for_each_entry(p, &br->port_list, list) {
369 unsigned long feature = p->dev->features; 369 unsigned long feature = p->dev->features;
370 370
371 /* if device needs checksumming, downgrade to hw checksumming */
371 if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM)) 372 if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
372 checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM; 373 checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
374
375 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
373 if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM)) 376 if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
374 checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM; 377 checksum ^= NETIF_F_HW_CSUM
378 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
379
380 if (checksum & NETIF_F_IPV6_CSUM && !(feature & NETIF_F_IPV6_CSUM))
381 checksum &= ~NETIF_F_IPV6_CSUM;
382
375 if (!(feature & NETIF_F_IP_CSUM)) 383 if (!(feature & NETIF_F_IP_CSUM))
376 checksum = 0; 384 checksum = 0;
377 385
diff --git a/net/core/dev.c b/net/core/dev.c
index ee051bb398a0..4221dcda88d7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -942,7 +942,7 @@ int dev_open(struct net_device *dev)
942 /* 942 /*
943 * Initialize multicasting status 943 * Initialize multicasting status
944 */ 944 */
945 dev_mc_upload(dev); 945 dev_set_rx_mode(dev);
946 946
947 /* 947 /*
948 * Wakeup transmit queue engine 948 * Wakeup transmit queue engine
@@ -1429,7 +1429,9 @@ gso:
1429 skb->next = nskb; 1429 skb->next = nskb;
1430 return rc; 1430 return rc;
1431 } 1431 }
1432 if (unlikely(netif_queue_stopped(dev) && skb->next)) 1432 if (unlikely((netif_queue_stopped(dev) ||
1433 netif_subqueue_stopped(dev, skb->queue_mapping)) &&
1434 skb->next))
1433 return NETDEV_TX_BUSY; 1435 return NETDEV_TX_BUSY;
1434 } while (skb->next); 1436 } while (skb->next);
1435 1437
@@ -1510,8 +1512,10 @@ int dev_queue_xmit(struct sk_buff *skb)
1510 skb_headroom(skb)); 1512 skb_headroom(skb));
1511 1513
1512 if (!(dev->features & NETIF_F_GEN_CSUM) && 1514 if (!(dev->features & NETIF_F_GEN_CSUM) &&
1513 (!(dev->features & NETIF_F_IP_CSUM) || 1515 !((dev->features & NETIF_F_IP_CSUM) &&
1514 skb->protocol != htons(ETH_P_IP))) 1516 skb->protocol == htons(ETH_P_IP)) &&
1517 !((dev->features & NETIF_F_IPV6_CSUM) &&
1518 skb->protocol == htons(ETH_P_IPV6)))
1515 if (skb_checksum_help(skb)) 1519 if (skb_checksum_help(skb))
1516 goto out_kfree_skb; 1520 goto out_kfree_skb;
1517 } 1521 }
@@ -1545,6 +1549,8 @@ gso:
1545 spin_lock(&dev->queue_lock); 1549 spin_lock(&dev->queue_lock);
1546 q = dev->qdisc; 1550 q = dev->qdisc;
1547 if (q->enqueue) { 1551 if (q->enqueue) {
1552 /* reset queue_mapping to zero */
1553 skb->queue_mapping = 0;
1548 rc = q->enqueue(skb, q); 1554 rc = q->enqueue(skb, q);
1549 qdisc_run(dev); 1555 qdisc_run(dev);
1550 spin_unlock(&dev->queue_lock); 1556 spin_unlock(&dev->queue_lock);
@@ -1574,7 +1580,8 @@ gso:
1574 1580
1575 HARD_TX_LOCK(dev, cpu); 1581 HARD_TX_LOCK(dev, cpu);
1576 1582
1577 if (!netif_queue_stopped(dev)) { 1583 if (!netif_queue_stopped(dev) &&
1584 !netif_subqueue_stopped(dev, skb->queue_mapping)) {
1578 rc = 0; 1585 rc = 0;
1579 if (!dev_hard_start_xmit(skb, dev)) { 1586 if (!dev_hard_start_xmit(skb, dev)) {
1580 HARD_TX_UNLOCK(dev); 1587 HARD_TX_UNLOCK(dev);
@@ -2496,17 +2503,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2496 return 0; 2503 return 0;
2497} 2504}
2498 2505
2499/** 2506static void __dev_set_promiscuity(struct net_device *dev, int inc)
2500 * dev_set_promiscuity - update promiscuity count on a device
2501 * @dev: device
2502 * @inc: modifier
2503 *
2504 * Add or remove promiscuity from a device. While the count in the device
2505 * remains above zero the interface remains promiscuous. Once it hits zero
2506 * the device reverts back to normal filtering operation. A negative inc
2507 * value is used to drop promiscuity on the device.
2508 */
2509void dev_set_promiscuity(struct net_device *dev, int inc)
2510{ 2507{
2511 unsigned short old_flags = dev->flags; 2508 unsigned short old_flags = dev->flags;
2512 2509
@@ -2515,7 +2512,6 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
2515 else 2512 else
2516 dev->flags |= IFF_PROMISC; 2513 dev->flags |= IFF_PROMISC;
2517 if (dev->flags != old_flags) { 2514 if (dev->flags != old_flags) {
2518 dev_mc_upload(dev);
2519 printk(KERN_INFO "device %s %s promiscuous mode\n", 2515 printk(KERN_INFO "device %s %s promiscuous mode\n",
2520 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 2516 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2521 "left"); 2517 "left");
@@ -2529,6 +2525,25 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
2529} 2525}
2530 2526
2531/** 2527/**
2528 * dev_set_promiscuity - update promiscuity count on a device
2529 * @dev: device
2530 * @inc: modifier
2531 *
2532 * Add or remove promiscuity from a device. While the count in the device
2533 * remains above zero the interface remains promiscuous. Once it hits zero
2534 * the device reverts back to normal filtering operation. A negative inc
2535 * value is used to drop promiscuity on the device.
2536 */
2537void dev_set_promiscuity(struct net_device *dev, int inc)
2538{
2539 unsigned short old_flags = dev->flags;
2540
2541 __dev_set_promiscuity(dev, inc);
2542 if (dev->flags != old_flags)
2543 dev_set_rx_mode(dev);
2544}
2545
2546/**
2532 * dev_set_allmulti - update allmulti count on a device 2547 * dev_set_allmulti - update allmulti count on a device
2533 * @dev: device 2548 * @dev: device
2534 * @inc: modifier 2549 * @inc: modifier
@@ -2548,7 +2563,176 @@ void dev_set_allmulti(struct net_device *dev, int inc)
2548 if ((dev->allmulti += inc) == 0) 2563 if ((dev->allmulti += inc) == 0)
2549 dev->flags &= ~IFF_ALLMULTI; 2564 dev->flags &= ~IFF_ALLMULTI;
2550 if (dev->flags ^ old_flags) 2565 if (dev->flags ^ old_flags)
2551 dev_mc_upload(dev); 2566 dev_set_rx_mode(dev);
2567}
2568
2569/*
2570 * Upload unicast and multicast address lists to device and
2571 * configure RX filtering. When the device doesn't support unicast
2572 * filtering it is put in promiscous mode while unicast addresses
2573 * are present.
2574 */
2575void __dev_set_rx_mode(struct net_device *dev)
2576{
2577 /* dev_open will call this function so the list will stay sane. */
2578 if (!(dev->flags&IFF_UP))
2579 return;
2580
2581 if (!netif_device_present(dev))
2582 return;
2583
2584 if (dev->set_rx_mode)
2585 dev->set_rx_mode(dev);
2586 else {
2587 /* Unicast addresses changes may only happen under the rtnl,
2588 * therefore calling __dev_set_promiscuity here is safe.
2589 */
2590 if (dev->uc_count > 0 && !dev->uc_promisc) {
2591 __dev_set_promiscuity(dev, 1);
2592 dev->uc_promisc = 1;
2593 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2594 __dev_set_promiscuity(dev, -1);
2595 dev->uc_promisc = 0;
2596 }
2597
2598 if (dev->set_multicast_list)
2599 dev->set_multicast_list(dev);
2600 }
2601}
2602
2603void dev_set_rx_mode(struct net_device *dev)
2604{
2605 netif_tx_lock_bh(dev);
2606 __dev_set_rx_mode(dev);
2607 netif_tx_unlock_bh(dev);
2608}
2609
2610int __dev_addr_delete(struct dev_addr_list **list, int *count,
2611 void *addr, int alen, int glbl)
2612{
2613 struct dev_addr_list *da;
2614
2615 for (; (da = *list) != NULL; list = &da->next) {
2616 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2617 alen == da->da_addrlen) {
2618 if (glbl) {
2619 int old_glbl = da->da_gusers;
2620 da->da_gusers = 0;
2621 if (old_glbl == 0)
2622 break;
2623 }
2624 if (--da->da_users)
2625 return 0;
2626
2627 *list = da->next;
2628 kfree(da);
2629 (*count)--;
2630 return 0;
2631 }
2632 }
2633 return -ENOENT;
2634}
2635
2636int __dev_addr_add(struct dev_addr_list **list, int *count,
2637 void *addr, int alen, int glbl)
2638{
2639 struct dev_addr_list *da;
2640
2641 for (da = *list; da != NULL; da = da->next) {
2642 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2643 da->da_addrlen == alen) {
2644 if (glbl) {
2645 int old_glbl = da->da_gusers;
2646 da->da_gusers = 1;
2647 if (old_glbl)
2648 return 0;
2649 }
2650 da->da_users++;
2651 return 0;
2652 }
2653 }
2654
2655 da = kmalloc(sizeof(*da), GFP_ATOMIC);
2656 if (da == NULL)
2657 return -ENOMEM;
2658 memcpy(da->da_addr, addr, alen);
2659 da->da_addrlen = alen;
2660 da->da_users = 1;
2661 da->da_gusers = glbl ? 1 : 0;
2662 da->next = *list;
2663 *list = da;
2664 (*count)++;
2665 return 0;
2666}
2667
2668void __dev_addr_discard(struct dev_addr_list **list)
2669{
2670 struct dev_addr_list *tmp;
2671
2672 while (*list != NULL) {
2673 tmp = *list;
2674 *list = tmp->next;
2675 if (tmp->da_users > tmp->da_gusers)
2676 printk("__dev_addr_discard: address leakage! "
2677 "da_users=%d\n", tmp->da_users);
2678 kfree(tmp);
2679 }
2680}
2681
2682/**
2683 * dev_unicast_delete - Release secondary unicast address.
2684 * @dev: device
2685 *
2686 * Release reference to a secondary unicast address and remove it
2687 * from the device if the reference count drop to zero.
2688 *
2689 * The caller must hold the rtnl_mutex.
2690 */
2691int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
2692{
2693 int err;
2694
2695 ASSERT_RTNL();
2696
2697 netif_tx_lock_bh(dev);
2698 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2699 if (!err)
2700 __dev_set_rx_mode(dev);
2701 netif_tx_unlock_bh(dev);
2702 return err;
2703}
2704EXPORT_SYMBOL(dev_unicast_delete);
2705
2706/**
2707 * dev_unicast_add - add a secondary unicast address
2708 * @dev: device
2709 *
2710 * Add a secondary unicast address to the device or increase
2711 * the reference count if it already exists.
2712 *
2713 * The caller must hold the rtnl_mutex.
2714 */
2715int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2716{
2717 int err;
2718
2719 ASSERT_RTNL();
2720
2721 netif_tx_lock_bh(dev);
2722 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2723 if (!err)
2724 __dev_set_rx_mode(dev);
2725 netif_tx_unlock_bh(dev);
2726 return err;
2727}
2728EXPORT_SYMBOL(dev_unicast_add);
2729
2730static void dev_unicast_discard(struct net_device *dev)
2731{
2732 netif_tx_lock_bh(dev);
2733 __dev_addr_discard(&dev->uc_list);
2734 dev->uc_count = 0;
2735 netif_tx_unlock_bh(dev);
2552} 2736}
2553 2737
2554unsigned dev_get_flags(const struct net_device *dev) 2738unsigned dev_get_flags(const struct net_device *dev)
@@ -2594,7 +2778,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2594 * Load in the correct multicast list now the flags have changed. 2778 * Load in the correct multicast list now the flags have changed.
2595 */ 2779 */
2596 2780
2597 dev_mc_upload(dev); 2781 dev_set_rx_mode(dev);
2598 2782
2599 /* 2783 /*
2600 * Have we downed the interface. We handle IFF_UP ourselves 2784 * Have we downed the interface. We handle IFF_UP ourselves
@@ -2607,7 +2791,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2607 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); 2791 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2608 2792
2609 if (!ret) 2793 if (!ret)
2610 dev_mc_upload(dev); 2794 dev_set_rx_mode(dev);
2611 } 2795 }
2612 2796
2613 if (dev->flags & IFF_UP && 2797 if (dev->flags & IFF_UP &&
@@ -3107,6 +3291,22 @@ int register_netdevice(struct net_device *dev)
3107 } 3291 }
3108 } 3292 }
3109 3293
3294 /* Fix illegal checksum combinations */
3295 if ((dev->features & NETIF_F_HW_CSUM) &&
3296 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3297 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3298 dev->name);
3299 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3300 }
3301
3302 if ((dev->features & NETIF_F_NO_CSUM) &&
3303 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3304 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3305 dev->name);
3306 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3307 }
3308
3309
3110 /* Fix illegal SG+CSUM combinations. */ 3310 /* Fix illegal SG+CSUM combinations. */
3111 if ((dev->features & NETIF_F_SG) && 3311 if ((dev->features & NETIF_F_SG) &&
3112 !(dev->features & NETIF_F_ALL_CSUM)) { 3312 !(dev->features & NETIF_F_ALL_CSUM)) {
@@ -3343,16 +3543,18 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
3343} 3543}
3344 3544
3345/** 3545/**
3346 * alloc_netdev - allocate network device 3546 * alloc_netdev_mq - allocate network device
3347 * @sizeof_priv: size of private data to allocate space for 3547 * @sizeof_priv: size of private data to allocate space for
3348 * @name: device name format string 3548 * @name: device name format string
3349 * @setup: callback to initialize device 3549 * @setup: callback to initialize device
3550 * @queue_count: the number of subqueues to allocate
3350 * 3551 *
3351 * Allocates a struct net_device with private data area for driver use 3552 * Allocates a struct net_device with private data area for driver use
3352 * and performs basic initialization. 3553 * and performs basic initialization. Also allocates subquue structs
3554 * for each queue on the device at the end of the netdevice.
3353 */ 3555 */
3354struct net_device *alloc_netdev(int sizeof_priv, const char *name, 3556struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3355 void (*setup)(struct net_device *)) 3557 void (*setup)(struct net_device *), unsigned int queue_count)
3356{ 3558{
3357 void *p; 3559 void *p;
3358 struct net_device *dev; 3560 struct net_device *dev;
@@ -3361,7 +3563,9 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3361 BUG_ON(strlen(name) >= sizeof(dev->name)); 3563 BUG_ON(strlen(name) >= sizeof(dev->name));
3362 3564
3363 /* ensure 32-byte alignment of both the device and private area */ 3565 /* ensure 32-byte alignment of both the device and private area */
3364 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 3566 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
3567 (sizeof(struct net_device_subqueue) * queue_count)) &
3568 ~NETDEV_ALIGN_CONST;
3365 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; 3569 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3366 3570
3367 p = kzalloc(alloc_size, GFP_KERNEL); 3571 p = kzalloc(alloc_size, GFP_KERNEL);
@@ -3374,15 +3578,22 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3374 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 3578 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3375 dev->padded = (char *)dev - (char *)p; 3579 dev->padded = (char *)dev - (char *)p;
3376 3580
3377 if (sizeof_priv) 3581 if (sizeof_priv) {
3378 dev->priv = netdev_priv(dev); 3582 dev->priv = ((char *)dev +
3583 ((sizeof(struct net_device) +
3584 (sizeof(struct net_device_subqueue) *
3585 queue_count) + NETDEV_ALIGN_CONST)
3586 & ~NETDEV_ALIGN_CONST));
3587 }
3588
3589 dev->egress_subqueue_count = queue_count;
3379 3590
3380 dev->get_stats = internal_stats; 3591 dev->get_stats = internal_stats;
3381 setup(dev); 3592 setup(dev);
3382 strcpy(dev->name, name); 3593 strcpy(dev->name, name);
3383 return dev; 3594 return dev;
3384} 3595}
3385EXPORT_SYMBOL(alloc_netdev); 3596EXPORT_SYMBOL(alloc_netdev_mq);
3386 3597
3387/** 3598/**
3388 * free_netdev - free network device 3599 * free_netdev - free network device
@@ -3471,8 +3682,9 @@ void unregister_netdevice(struct net_device *dev)
3471 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); 3682 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3472 3683
3473 /* 3684 /*
3474 * Flush the multicast chain 3685 * Flush the unicast and multicast chains
3475 */ 3686 */
3687 dev_unicast_discard(dev);
3476 dev_mc_discard(dev); 3688 dev_mc_discard(dev);
3477 3689
3478 if (dev->uninit) 3690 if (dev->uninit)
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 5a54053386c8..aa38100601fb 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -64,85 +64,24 @@
64 */ 64 */
65 65
66/* 66/*
67 * Update the multicast list into the physical NIC controller.
68 */
69
70static void __dev_mc_upload(struct net_device *dev)
71{
72 /* Don't do anything till we up the interface
73 * [dev_open will call this function so the list will
74 * stay sane]
75 */
76
77 if (!(dev->flags&IFF_UP))
78 return;
79
80 /*
81 * Devices with no set multicast or which have been
82 * detached don't get set.
83 */
84
85 if (dev->set_multicast_list == NULL ||
86 !netif_device_present(dev))
87 return;
88
89 dev->set_multicast_list(dev);
90}
91
92void dev_mc_upload(struct net_device *dev)
93{
94 netif_tx_lock_bh(dev);
95 __dev_mc_upload(dev);
96 netif_tx_unlock_bh(dev);
97}
98
99/*
100 * Delete a device level multicast 67 * Delete a device level multicast
101 */ 68 */
102 69
103int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) 70int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
104{ 71{
105 int err = 0; 72 int err;
106 struct dev_mc_list *dmi, **dmip;
107 73
108 netif_tx_lock_bh(dev); 74 netif_tx_lock_bh(dev);
109 75 err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
110 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { 76 addr, alen, glbl);
77 if (!err) {
111 /* 78 /*
112 * Find the entry we want to delete. The device could 79 * We have altered the list, so the card
113 * have variable length entries so check these too. 80 * loaded filter is now wrong. Fix it
114 */ 81 */
115 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
116 alen == dmi->dmi_addrlen) {
117 if (glbl) {
118 int old_glbl = dmi->dmi_gusers;
119 dmi->dmi_gusers = 0;
120 if (old_glbl == 0)
121 break;
122 }
123 if (--dmi->dmi_users)
124 goto done;
125 82
126 /* 83 __dev_set_rx_mode(dev);
127 * Last user. So delete the entry.
128 */
129 *dmip = dmi->next;
130 dev->mc_count--;
131
132 kfree(dmi);
133
134 /*
135 * We have altered the list, so the card
136 * loaded filter is now wrong. Fix it
137 */
138 __dev_mc_upload(dev);
139
140 netif_tx_unlock_bh(dev);
141 return 0;
142 }
143 } 84 }
144 err = -ENOENT;
145done:
146 netif_tx_unlock_bh(dev); 85 netif_tx_unlock_bh(dev);
147 return err; 86 return err;
148} 87}
@@ -153,46 +92,13 @@ done:
153 92
154int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) 93int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
155{ 94{
156 int err = 0; 95 int err;
157 struct dev_mc_list *dmi, *dmi1;
158
159 dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
160 96
161 netif_tx_lock_bh(dev); 97 netif_tx_lock_bh(dev);
162 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { 98 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
163 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && 99 if (!err)
164 dmi->dmi_addrlen == alen) { 100 __dev_set_rx_mode(dev);
165 if (glbl) {
166 int old_glbl = dmi->dmi_gusers;
167 dmi->dmi_gusers = 1;
168 if (old_glbl)
169 goto done;
170 }
171 dmi->dmi_users++;
172 goto done;
173 }
174 }
175
176 if ((dmi = dmi1) == NULL) {
177 netif_tx_unlock_bh(dev);
178 return -ENOMEM;
179 }
180 memcpy(dmi->dmi_addr, addr, alen);
181 dmi->dmi_addrlen = alen;
182 dmi->next = dev->mc_list;
183 dmi->dmi_users = 1;
184 dmi->dmi_gusers = glbl ? 1 : 0;
185 dev->mc_list = dmi;
186 dev->mc_count++;
187
188 __dev_mc_upload(dev);
189
190 netif_tx_unlock_bh(dev);
191 return 0;
192
193done:
194 netif_tx_unlock_bh(dev); 101 netif_tx_unlock_bh(dev);
195 kfree(dmi1);
196 return err; 102 return err;
197} 103}
198 104
@@ -203,16 +109,8 @@ done:
203void dev_mc_discard(struct net_device *dev) 109void dev_mc_discard(struct net_device *dev)
204{ 110{
205 netif_tx_lock_bh(dev); 111 netif_tx_lock_bh(dev);
206 112 __dev_addr_discard(&dev->mc_list);
207 while (dev->mc_list != NULL) {
208 struct dev_mc_list *tmp = dev->mc_list;
209 dev->mc_list = tmp->next;
210 if (tmp->dmi_users > tmp->dmi_gusers)
211 printk("dev_mc_discard: multicast leakage! dmi_users=%d\n", tmp->dmi_users);
212 kfree(tmp);
213 }
214 dev->mc_count = 0; 113 dev->mc_count = 0;
215
216 netif_tx_unlock_bh(dev); 114 netif_tx_unlock_bh(dev);
217} 115}
218 116
@@ -244,7 +142,7 @@ static void dev_mc_seq_stop(struct seq_file *seq, void *v)
244 142
245static int dev_mc_seq_show(struct seq_file *seq, void *v) 143static int dev_mc_seq_show(struct seq_file *seq, void *v)
246{ 144{
247 struct dev_mc_list *m; 145 struct dev_addr_list *m;
248 struct net_device *dev = v; 146 struct net_device *dev = v;
249 147
250 netif_tx_lock_bh(dev); 148 netif_tx_lock_bh(dev);
@@ -292,4 +190,3 @@ void __init dev_mcast_init(void)
292 190
293EXPORT_SYMBOL(dev_mc_add); 191EXPORT_SYMBOL(dev_mc_add);
294EXPORT_SYMBOL(dev_mc_delete); 192EXPORT_SYMBOL(dev_mc_delete);
295EXPORT_SYMBOL(dev_mc_upload);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 17daf4c9f793..cc84d8d8a3c7 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -128,7 +128,8 @@ static void est_timer(unsigned long arg)
128 spin_unlock(e->stats_lock); 128 spin_unlock(e->stats_lock);
129 } 129 }
130 130
131 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4)); 131 if (elist[idx].list != NULL)
132 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
132 read_unlock(&est_lock); 133 read_unlock(&est_lock);
133} 134}
134 135
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a0efdd7a6b37..d1264e9a50a8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -66,8 +66,9 @@ static void queue_process(struct work_struct *work)
66 66
67 local_irq_save(flags); 67 local_irq_save(flags);
68 netif_tx_lock(dev); 68 netif_tx_lock(dev);
69 if (netif_queue_stopped(dev) || 69 if ((netif_queue_stopped(dev) ||
70 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 70 netif_subqueue_stopped(dev, skb->queue_mapping)) ||
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
71 skb_queue_head(&npinfo->txq, skb); 72 skb_queue_head(&npinfo->txq, skb);
72 netif_tx_unlock(dev); 73 netif_tx_unlock(dev);
73 local_irq_restore(flags); 74 local_irq_restore(flags);
@@ -123,6 +124,13 @@ static void poll_napi(struct netpoll *np)
123 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && 124 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
124 npinfo->poll_owner != smp_processor_id() && 125 npinfo->poll_owner != smp_processor_id() &&
125 spin_trylock(&npinfo->poll_lock)) { 126 spin_trylock(&npinfo->poll_lock)) {
127 /* When calling dev->poll from poll_napi, we may end up in
128 * netif_rx_complete. However, only the CPU to which the
129 * device was queued is allowed to remove it from poll_list.
130 * Setting POLL_LIST_FROZEN tells netif_rx_complete
131 * to leave the NAPI state alone.
132 */
133 set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
126 npinfo->rx_flags |= NETPOLL_RX_DROP; 134 npinfo->rx_flags |= NETPOLL_RX_DROP;
127 atomic_inc(&trapped); 135 atomic_inc(&trapped);
128 136
@@ -130,6 +138,7 @@ static void poll_napi(struct netpoll *np)
130 138
131 atomic_dec(&trapped); 139 atomic_dec(&trapped);
132 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 140 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
141 clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
133 spin_unlock(&npinfo->poll_lock); 142 spin_unlock(&npinfo->poll_lock);
134 } 143 }
135} 144}
@@ -254,7 +263,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
254 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 263 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
255 tries > 0; --tries) { 264 tries > 0; --tries) {
256 if (netif_tx_trylock(dev)) { 265 if (netif_tx_trylock(dev)) {
257 if (!netif_queue_stopped(dev)) 266 if (!netif_queue_stopped(dev) &&
267 !netif_subqueue_stopped(dev, skb->queue_mapping))
258 status = dev->hard_start_xmit(skb, dev); 268 status = dev->hard_start_xmit(skb, dev);
259 netif_tx_unlock(dev); 269 netif_tx_unlock(dev);
260 270
@@ -781,7 +791,6 @@ void netpoll_cleanup(struct netpoll *np)
781 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 791 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
782 } 792 }
783 793
784 np->dev->npinfo = NULL;
785 if (atomic_dec_and_test(&npinfo->refcnt)) { 794 if (atomic_dec_and_test(&npinfo->refcnt)) {
786 skb_queue_purge(&npinfo->arp_tx); 795 skb_queue_purge(&npinfo->arp_tx);
787 skb_queue_purge(&npinfo->txq); 796 skb_queue_purge(&npinfo->txq);
@@ -794,6 +803,7 @@ void netpoll_cleanup(struct netpoll *np)
794 kfree_skb(skb); 803 kfree_skb(skb);
795 } 804 }
796 kfree(npinfo); 805 kfree(npinfo);
806 np->dev->npinfo = NULL;
797 } 807 }
798 } 808 }
799 809
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9cd3a1cb60ef..75215331b045 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -152,6 +152,9 @@
152#include <net/checksum.h> 152#include <net/checksum.h>
153#include <net/ipv6.h> 153#include <net/ipv6.h>
154#include <net/addrconf.h> 154#include <net/addrconf.h>
155#ifdef CONFIG_XFRM
156#include <net/xfrm.h>
157#endif
155#include <asm/byteorder.h> 158#include <asm/byteorder.h>
156#include <linux/rcupdate.h> 159#include <linux/rcupdate.h>
157#include <asm/bitops.h> 160#include <asm/bitops.h>
@@ -181,6 +184,8 @@
181#define F_MPLS_RND (1<<8) /* Random MPLS labels */ 184#define F_MPLS_RND (1<<8) /* Random MPLS labels */
182#define F_VID_RND (1<<9) /* Random VLAN ID */ 185#define F_VID_RND (1<<9) /* Random VLAN ID */
183#define F_SVID_RND (1<<10) /* Random SVLAN ID */ 186#define F_SVID_RND (1<<10) /* Random SVLAN ID */
187#define F_FLOW_SEQ (1<<11) /* Sequential flows */
188#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
184 189
185/* Thread control flag bits */ 190/* Thread control flag bits */
186#define T_TERMINATE (1<<0) 191#define T_TERMINATE (1<<0)
@@ -207,8 +212,15 @@ static struct proc_dir_entry *pg_proc_dir = NULL;
207struct flow_state { 212struct flow_state {
208 __be32 cur_daddr; 213 __be32 cur_daddr;
209 int count; 214 int count;
215#ifdef CONFIG_XFRM
216 struct xfrm_state *x;
217#endif
218 __u32 flags;
210}; 219};
211 220
221/* flow flag bits */
222#define F_INIT (1<<0) /* flow has been initialized */
223
212struct pktgen_dev { 224struct pktgen_dev {
213 /* 225 /*
214 * Try to keep frequent/infrequent used vars. separated. 226 * Try to keep frequent/infrequent used vars. separated.
@@ -228,6 +240,7 @@ struct pktgen_dev {
228 240
229 int min_pkt_size; /* = ETH_ZLEN; */ 241 int min_pkt_size; /* = ETH_ZLEN; */
230 int max_pkt_size; /* = ETH_ZLEN; */ 242 int max_pkt_size; /* = ETH_ZLEN; */
243 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
231 int nfrags; 244 int nfrags;
232 __u32 delay_us; /* Default delay */ 245 __u32 delay_us; /* Default delay */
233 __u32 delay_ns; 246 __u32 delay_ns;
@@ -341,7 +354,11 @@ struct pktgen_dev {
341 unsigned cflows; /* Concurrent flows (config) */ 354 unsigned cflows; /* Concurrent flows (config) */
342 unsigned lflow; /* Flow length (config) */ 355 unsigned lflow; /* Flow length (config) */
343 unsigned nflows; /* accumulated flows (stats) */ 356 unsigned nflows; /* accumulated flows (stats) */
344 357 unsigned curfl; /* current sequenced flow (state)*/
358#ifdef CONFIG_XFRM
359 __u8 ipsmode; /* IPSEC mode (config) */
360 __u8 ipsproto; /* IPSEC type (config) */
361#endif
345 char result[512]; 362 char result[512];
346}; 363};
347 364
@@ -690,6 +707,18 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
690 if (pkt_dev->flags & F_MPLS_RND) 707 if (pkt_dev->flags & F_MPLS_RND)
691 seq_printf(seq, "MPLS_RND "); 708 seq_printf(seq, "MPLS_RND ");
692 709
710 if (pkt_dev->cflows) {
711 if (pkt_dev->flags & F_FLOW_SEQ)
712 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/
713 else
714 seq_printf(seq, "FLOW_RND ");
715 }
716
717#ifdef CONFIG_XFRM
718 if (pkt_dev->flags & F_IPSEC_ON)
719 seq_printf(seq, "IPSEC ");
720#endif
721
693 if (pkt_dev->flags & F_MACSRC_RND) 722 if (pkt_dev->flags & F_MACSRC_RND)
694 seq_printf(seq, "MACSRC_RND "); 723 seq_printf(seq, "MACSRC_RND ");
695 724
@@ -1181,6 +1210,14 @@ static ssize_t pktgen_if_write(struct file *file,
1181 else if (strcmp(f, "!SVID_RND") == 0) 1210 else if (strcmp(f, "!SVID_RND") == 0)
1182 pkt_dev->flags &= ~F_SVID_RND; 1211 pkt_dev->flags &= ~F_SVID_RND;
1183 1212
1213 else if (strcmp(f, "FLOW_SEQ") == 0)
1214 pkt_dev->flags |= F_FLOW_SEQ;
1215
1216#ifdef CONFIG_XFRM
1217 else if (strcmp(f, "IPSEC") == 0)
1218 pkt_dev->flags |= F_IPSEC_ON;
1219#endif
1220
1184 else if (strcmp(f, "!IPV6") == 0) 1221 else if (strcmp(f, "!IPV6") == 0)
1185 pkt_dev->flags &= ~F_IPV6; 1222 pkt_dev->flags &= ~F_IPV6;
1186 1223
@@ -1189,7 +1226,7 @@ static ssize_t pktgen_if_write(struct file *file,
1189 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1226 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
1190 f, 1227 f,
1191 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " 1228 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
1192 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND\n"); 1229 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n");
1193 return count; 1230 return count;
1194 } 1231 }
1195 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1232 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
@@ -2075,6 +2112,70 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us)
2075 pkt_dev->idle_acc += now - start; 2112 pkt_dev->idle_acc += now - start;
2076} 2113}
2077 2114
2115static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
2116{
2117 pkt_dev->pkt_overhead = 0;
2118 pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
2119 pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
2120 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
2121}
2122
2123static inline int f_seen(struct pktgen_dev *pkt_dev, int flow)
2124{
2125
2126 if (pkt_dev->flows[flow].flags & F_INIT)
2127 return 1;
2128 else
2129 return 0;
2130}
2131
2132static inline int f_pick(struct pktgen_dev *pkt_dev)
2133{
2134 int flow = pkt_dev->curfl;
2135
2136 if (pkt_dev->flags & F_FLOW_SEQ) {
2137 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
2138 /* reset time */
2139 pkt_dev->flows[flow].count = 0;
2140 pkt_dev->curfl += 1;
2141 if (pkt_dev->curfl >= pkt_dev->cflows)
2142 pkt_dev->curfl = 0; /*reset */
2143 }
2144 } else {
2145 flow = random32() % pkt_dev->cflows;
2146
2147 if (pkt_dev->flows[flow].count > pkt_dev->lflow)
2148 pkt_dev->flows[flow].count = 0;
2149 }
2150
2151 return pkt_dev->curfl;
2152}
2153
2154
2155#ifdef CONFIG_XFRM
2156/* If there was already an IPSEC SA, we keep it as is, else
2157 * we go look for it ...
2158*/
2159inline
2160void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2161{
2162 struct xfrm_state *x = pkt_dev->flows[flow].x;
2163 if (!x) {
2164 /*slow path: we dont already have xfrm_state*/
2165 x = xfrm_stateonly_find((xfrm_address_t *)&pkt_dev->cur_daddr,
2166 (xfrm_address_t *)&pkt_dev->cur_saddr,
2167 AF_INET,
2168 pkt_dev->ipsmode,
2169 pkt_dev->ipsproto, 0);
2170 if (x) {
2171 pkt_dev->flows[flow].x = x;
2172 set_pkt_overhead(pkt_dev);
2173 pkt_dev->pkt_overhead+=x->props.header_len;
2174 }
2175
2176 }
2177}
2178#endif
2078/* Increment/randomize headers according to flags and current values 2179/* Increment/randomize headers according to flags and current values
2079 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2180 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2080 */ 2181 */
@@ -2084,12 +2185,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2084 __u32 imx; 2185 __u32 imx;
2085 int flow = 0; 2186 int flow = 0;
2086 2187
2087 if (pkt_dev->cflows) { 2188 if (pkt_dev->cflows)
2088 flow = random32() % pkt_dev->cflows; 2189 flow = f_pick(pkt_dev);
2089
2090 if (pkt_dev->flows[flow].count > pkt_dev->lflow)
2091 pkt_dev->flows[flow].count = 0;
2092 }
2093 2190
2094 /* Deal with source MAC */ 2191 /* Deal with source MAC */
2095 if (pkt_dev->src_mac_count > 1) { 2192 if (pkt_dev->src_mac_count > 1) {
@@ -2205,7 +2302,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2205 pkt_dev->cur_saddr = htonl(t); 2302 pkt_dev->cur_saddr = htonl(t);
2206 } 2303 }
2207 2304
2208 if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { 2305 if (pkt_dev->cflows && f_seen(pkt_dev, flow)) {
2209 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 2306 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr;
2210 } else { 2307 } else {
2211 imn = ntohl(pkt_dev->daddr_min); 2308 imn = ntohl(pkt_dev->daddr_min);
@@ -2235,8 +2332,13 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2235 } 2332 }
2236 } 2333 }
2237 if (pkt_dev->cflows) { 2334 if (pkt_dev->cflows) {
2335 pkt_dev->flows[flow].flags |= F_INIT;
2238 pkt_dev->flows[flow].cur_daddr = 2336 pkt_dev->flows[flow].cur_daddr =
2239 pkt_dev->cur_daddr; 2337 pkt_dev->cur_daddr;
2338#ifdef CONFIG_XFRM
2339 if (pkt_dev->flags & F_IPSEC_ON)
2340 get_ipsec_sa(pkt_dev, flow);
2341#endif
2240 pkt_dev->nflows++; 2342 pkt_dev->nflows++;
2241 } 2343 }
2242 } 2344 }
@@ -2277,6 +2379,91 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2277 pkt_dev->flows[flow].count++; 2379 pkt_dev->flows[flow].count++;
2278} 2380}
2279 2381
2382
2383#ifdef CONFIG_XFRM
2384static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2385{
2386 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2387 int err = 0;
2388 struct iphdr *iph;
2389
2390 if (!x)
2391 return 0;
2392 /* XXX: we dont support tunnel mode for now until
2393 * we resolve the dst issue */
2394 if (x->props.mode != XFRM_MODE_TRANSPORT)
2395 return 0;
2396
2397 spin_lock(&x->lock);
2398 iph = ip_hdr(skb);
2399
2400 err = x->mode->output(x, skb);
2401 if (err)
2402 goto error;
2403 err = x->type->output(x, skb);
2404 if (err)
2405 goto error;
2406
2407 x->curlft.bytes +=skb->len;
2408 x->curlft.packets++;
2409 spin_unlock(&x->lock);
2410
2411error:
2412 spin_unlock(&x->lock);
2413 return err;
2414}
2415
2416static inline void free_SAs(struct pktgen_dev *pkt_dev)
2417{
2418 if (pkt_dev->cflows) {
2419 /* let go of the SAs if we have them */
2420 int i = 0;
2421 for (; i < pkt_dev->nflows; i++){
2422 struct xfrm_state *x = pkt_dev->flows[i].x;
2423 if (x) {
2424 xfrm_state_put(x);
2425 pkt_dev->flows[i].x = NULL;
2426 }
2427 }
2428 }
2429}
2430
2431static inline int process_ipsec(struct pktgen_dev *pkt_dev,
2432 struct sk_buff *skb, __be16 protocol)
2433{
2434 if (pkt_dev->flags & F_IPSEC_ON) {
2435 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2436 int nhead = 0;
2437 if (x) {
2438 int ret;
2439 __u8 *eth;
2440 nhead = x->props.header_len - skb_headroom(skb);
2441 if (nhead >0) {
2442 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2443 if (ret < 0) {
2444 printk("Error expanding ipsec packet %d\n",ret);
2445 return 0;
2446 }
2447 }
2448
2449 /* ipsec is not expecting ll header */
2450 skb_pull(skb, ETH_HLEN);
2451 ret = pktgen_output_ipsec(skb, pkt_dev);
2452 if (ret) {
2453 printk("Error creating ipsec packet %d\n",ret);
2454 kfree_skb(skb);
2455 return 0;
2456 }
2457 /* restore ll */
2458 eth = (__u8 *) skb_push(skb, ETH_HLEN);
2459 memcpy(eth, pkt_dev->hh, 12);
2460 *(u16 *) & eth[12] = protocol;
2461 }
2462 }
2463 return 1;
2464}
2465#endif
2466
2280static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2467static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2281{ 2468{
2282 unsigned i; 2469 unsigned i;
@@ -2323,9 +2510,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2323 2510
2324 datalen = (odev->hard_header_len + 16) & ~0xf; 2511 datalen = (odev->hard_header_len + 16) & ~0xf;
2325 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + 2512 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen +
2326 pkt_dev->nr_labels*sizeof(u32) + 2513 pkt_dev->pkt_overhead, GFP_ATOMIC);
2327 VLAN_TAG_SIZE(pkt_dev) + SVLAN_TAG_SIZE(pkt_dev),
2328 GFP_ATOMIC);
2329 if (!skb) { 2514 if (!skb) {
2330 sprintf(pkt_dev->result, "No memory"); 2515 sprintf(pkt_dev->result, "No memory");
2331 return NULL; 2516 return NULL;
@@ -2368,7 +2553,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2368 2553
2369 /* Eth + IPh + UDPh + mpls */ 2554 /* Eth + IPh + UDPh + mpls */
2370 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 2555 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
2371 pkt_dev->nr_labels*sizeof(u32) - VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev); 2556 pkt_dev->pkt_overhead;
2372 if (datalen < sizeof(struct pktgen_hdr)) 2557 if (datalen < sizeof(struct pktgen_hdr))
2373 datalen = sizeof(struct pktgen_hdr); 2558 datalen = sizeof(struct pktgen_hdr);
2374 2559
@@ -2391,8 +2576,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2391 iph->check = ip_fast_csum((void *)iph, iph->ihl); 2576 iph->check = ip_fast_csum((void *)iph, iph->ihl);
2392 skb->protocol = protocol; 2577 skb->protocol = protocol;
2393 skb->mac_header = (skb->network_header - ETH_HLEN - 2578 skb->mac_header = (skb->network_header - ETH_HLEN -
2394 pkt_dev->nr_labels * sizeof(u32) - 2579 pkt_dev->pkt_overhead);
2395 VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev));
2396 skb->dev = odev; 2580 skb->dev = odev;
2397 skb->pkt_type = PACKET_HOST; 2581 skb->pkt_type = PACKET_HOST;
2398 2582
@@ -2463,6 +2647,11 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2463 pgh->tv_usec = htonl(timestamp.tv_usec); 2647 pgh->tv_usec = htonl(timestamp.tv_usec);
2464 } 2648 }
2465 2649
2650#ifdef CONFIG_XFRM
2651 if (!process_ipsec(pkt_dev, skb, protocol))
2652 return NULL;
2653#endif
2654
2466 return skb; 2655 return skb;
2467} 2656}
2468 2657
@@ -2662,9 +2851,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2662 mod_cur_headers(pkt_dev); 2851 mod_cur_headers(pkt_dev);
2663 2852
2664 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2853 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
2665 pkt_dev->nr_labels*sizeof(u32) + 2854 pkt_dev->pkt_overhead, GFP_ATOMIC);
2666 VLAN_TAG_SIZE(pkt_dev) + SVLAN_TAG_SIZE(pkt_dev),
2667 GFP_ATOMIC);
2668 if (!skb) { 2855 if (!skb) {
2669 sprintf(pkt_dev->result, "No memory"); 2856 sprintf(pkt_dev->result, "No memory");
2670 return NULL; 2857 return NULL;
@@ -2708,7 +2895,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2708 /* Eth + IPh + UDPh + mpls */ 2895 /* Eth + IPh + UDPh + mpls */
2709 datalen = pkt_dev->cur_pkt_size - 14 - 2896 datalen = pkt_dev->cur_pkt_size - 14 -
2710 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 2897 sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
2711 pkt_dev->nr_labels*sizeof(u32) - VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev); 2898 pkt_dev->pkt_overhead;
2712 2899
2713 if (datalen < sizeof(struct pktgen_hdr)) { 2900 if (datalen < sizeof(struct pktgen_hdr)) {
2714 datalen = sizeof(struct pktgen_hdr); 2901 datalen = sizeof(struct pktgen_hdr);
@@ -2738,8 +2925,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2738 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); 2925 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
2739 2926
2740 skb->mac_header = (skb->network_header - ETH_HLEN - 2927 skb->mac_header = (skb->network_header - ETH_HLEN -
2741 pkt_dev->nr_labels * sizeof(u32) - 2928 pkt_dev->pkt_overhead);
2742 VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev));
2743 skb->protocol = protocol; 2929 skb->protocol = protocol;
2744 skb->dev = odev; 2930 skb->dev = odev;
2745 skb->pkt_type = PACKET_HOST; 2931 skb->pkt_type = PACKET_HOST;
@@ -2857,6 +3043,7 @@ static void pktgen_run(struct pktgen_thread *t)
2857 pkt_dev->started_at = getCurUs(); 3043 pkt_dev->started_at = getCurUs();
2858 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ 3044 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */
2859 pkt_dev->next_tx_ns = 0; 3045 pkt_dev->next_tx_ns = 0;
3046 set_pkt_overhead(pkt_dev);
2860 3047
2861 strcpy(pkt_dev->result, "Starting"); 3048 strcpy(pkt_dev->result, "Starting");
2862 started++; 3049 started++;
@@ -3139,7 +3326,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3139 } 3326 }
3140 } 3327 }
3141 3328
3142 if (netif_queue_stopped(odev) || need_resched()) { 3329 if ((netif_queue_stopped(odev) ||
3330 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
3331 need_resched()) {
3143 idle_start = getCurUs(); 3332 idle_start = getCurUs();
3144 3333
3145 if (!netif_running(odev)) { 3334 if (!netif_running(odev)) {
@@ -3154,7 +3343,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3154 3343
3155 pkt_dev->idle_acc += getCurUs() - idle_start; 3344 pkt_dev->idle_acc += getCurUs() - idle_start;
3156 3345
3157 if (netif_queue_stopped(odev)) { 3346 if (netif_queue_stopped(odev) ||
3347 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
3158 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3348 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3159 pkt_dev->next_tx_ns = 0; 3349 pkt_dev->next_tx_ns = 0;
3160 goto out; /* Try the next interface */ 3350 goto out; /* Try the next interface */
@@ -3181,7 +3371,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3181 } 3371 }
3182 3372
3183 netif_tx_lock_bh(odev); 3373 netif_tx_lock_bh(odev);
3184 if (!netif_queue_stopped(odev)) { 3374 if (!netif_queue_stopped(odev) &&
3375 !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
3185 3376
3186 atomic_inc(&(pkt_dev->skb->users)); 3377 atomic_inc(&(pkt_dev->skb->users));
3187 retry_now: 3378 retry_now:
@@ -3446,11 +3637,18 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3446 } 3637 }
3447 pkt_dev->entry->proc_fops = &pktgen_if_fops; 3638 pkt_dev->entry->proc_fops = &pktgen_if_fops;
3448 pkt_dev->entry->data = pkt_dev; 3639 pkt_dev->entry->data = pkt_dev;
3640#ifdef CONFIG_XFRM
3641 pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
3642 pkt_dev->ipsproto = IPPROTO_ESP;
3643#endif
3449 3644
3450 return add_dev_to_thread(t, pkt_dev); 3645 return add_dev_to_thread(t, pkt_dev);
3451out2: 3646out2:
3452 dev_put(pkt_dev->odev); 3647 dev_put(pkt_dev->odev);
3453out1: 3648out1:
3649#ifdef CONFIG_XFRM
3650 free_SAs(pkt_dev);
3651#endif
3454 if (pkt_dev->flows) 3652 if (pkt_dev->flows)
3455 vfree(pkt_dev->flows); 3653 vfree(pkt_dev->flows);
3456 kfree(pkt_dev); 3654 kfree(pkt_dev);
@@ -3545,6 +3743,9 @@ static int pktgen_remove_device(struct pktgen_thread *t,
3545 if (pkt_dev->entry) 3743 if (pkt_dev->entry)
3546 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); 3744 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
3547 3745
3746#ifdef CONFIG_XFRM
3747 free_SAs(pkt_dev);
3748#endif
3548 if (pkt_dev->flows) 3749 if (pkt_dev->flows)
3549 vfree(pkt_dev->flows); 3750 vfree(pkt_dev->flows);
3550 kfree(pkt_dev); 3751 kfree(pkt_dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 02e8bf084277..864cbdf31ed7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -97,6 +97,19 @@ int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
97 return 0; 97 return 0;
98} 98}
99 99
100int __rtattr_parse_nested_compat(struct rtattr *tb[], int maxattr,
101 struct rtattr *rta, int len)
102{
103 if (RTA_PAYLOAD(rta) < len)
104 return -1;
105 if (RTA_PAYLOAD(rta) >= RTA_ALIGN(len) + sizeof(struct rtattr)) {
106 rta = RTA_DATA(rta) + RTA_ALIGN(len);
107 return rtattr_parse_nested(tb, maxattr, rta);
108 }
109 memset(tb, 0, sizeof(struct rtattr *) * maxattr);
110 return 0;
111}
112
100static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 113static struct rtnl_link *rtnl_msg_handlers[NPROTO];
101 114
102static inline int rtm_msgindex(int msgtype) 115static inline int rtm_msgindex(int msgtype)
@@ -243,6 +256,150 @@ void rtnl_unregister_all(int protocol)
243 256
244EXPORT_SYMBOL_GPL(rtnl_unregister_all); 257EXPORT_SYMBOL_GPL(rtnl_unregister_all);
245 258
259static LIST_HEAD(link_ops);
260
261/**
262 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
263 * @ops: struct rtnl_link_ops * to register
264 *
265 * The caller must hold the rtnl_mutex. This function should be used
266 * by drivers that create devices during module initialization. It
267 * must be called before registering the devices.
268 *
269 * Returns 0 on success or a negative error code.
270 */
271int __rtnl_link_register(struct rtnl_link_ops *ops)
272{
273 if (!ops->dellink)
274 ops->dellink = unregister_netdevice;
275
276 list_add_tail(&ops->list, &link_ops);
277 return 0;
278}
279
280EXPORT_SYMBOL_GPL(__rtnl_link_register);
281
282/**
283 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
284 * @ops: struct rtnl_link_ops * to register
285 *
286 * Returns 0 on success or a negative error code.
287 */
288int rtnl_link_register(struct rtnl_link_ops *ops)
289{
290 int err;
291
292 rtnl_lock();
293 err = __rtnl_link_register(ops);
294 rtnl_unlock();
295 return err;
296}
297
298EXPORT_SYMBOL_GPL(rtnl_link_register);
299
300/**
301 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
302 * @ops: struct rtnl_link_ops * to unregister
303 *
304 * The caller must hold the rtnl_mutex.
305 */
306void __rtnl_link_unregister(struct rtnl_link_ops *ops)
307{
308 struct net_device *dev, *n;
309
310 for_each_netdev_safe(dev, n) {
311 if (dev->rtnl_link_ops == ops)
312 ops->dellink(dev);
313 }
314 list_del(&ops->list);
315}
316
317EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
318
319/**
320 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
321 * @ops: struct rtnl_link_ops * to unregister
322 */
323void rtnl_link_unregister(struct rtnl_link_ops *ops)
324{
325 rtnl_lock();
326 __rtnl_link_unregister(ops);
327 rtnl_unlock();
328}
329
330EXPORT_SYMBOL_GPL(rtnl_link_unregister);
331
332static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
333{
334 const struct rtnl_link_ops *ops;
335
336 list_for_each_entry(ops, &link_ops, list) {
337 if (!strcmp(ops->kind, kind))
338 return ops;
339 }
340 return NULL;
341}
342
343static size_t rtnl_link_get_size(const struct net_device *dev)
344{
345 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
346 size_t size;
347
348 if (!ops)
349 return 0;
350
351 size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
352 nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
353
354 if (ops->get_size)
355 /* IFLA_INFO_DATA + nested data */
356 size += nlmsg_total_size(sizeof(struct nlattr)) +
357 ops->get_size(dev);
358
359 if (ops->get_xstats_size)
360 size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */
361
362 return size;
363}
364
365static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
366{
367 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
368 struct nlattr *linkinfo, *data;
369 int err = -EMSGSIZE;
370
371 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
372 if (linkinfo == NULL)
373 goto out;
374
375 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
376 goto err_cancel_link;
377 if (ops->fill_xstats) {
378 err = ops->fill_xstats(skb, dev);
379 if (err < 0)
380 goto err_cancel_link;
381 }
382 if (ops->fill_info) {
383 data = nla_nest_start(skb, IFLA_INFO_DATA);
384 if (data == NULL)
385 goto err_cancel_link;
386 err = ops->fill_info(skb, dev);
387 if (err < 0)
388 goto err_cancel_data;
389 nla_nest_end(skb, data);
390 }
391
392 nla_nest_end(skb, linkinfo);
393 return 0;
394
395err_cancel_data:
396 nla_nest_cancel(skb, data);
397err_cancel_link:
398 nla_nest_cancel(skb, linkinfo);
399out:
400 return err;
401}
402
246static const int rtm_min[RTM_NR_FAMILIES] = 403static const int rtm_min[RTM_NR_FAMILIES] =
247{ 404{
248 [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)), 405 [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
@@ -437,7 +594,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
437 a->tx_compressed = b->tx_compressed; 594 a->tx_compressed = b->tx_compressed;
438}; 595};
439 596
440static inline size_t if_nlmsg_size(void) 597static inline size_t if_nlmsg_size(const struct net_device *dev)
441{ 598{
442 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 599 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
443 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 600 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -452,7 +609,8 @@ static inline size_t if_nlmsg_size(void)
452 + nla_total_size(4) /* IFLA_LINK */ 609 + nla_total_size(4) /* IFLA_LINK */
453 + nla_total_size(4) /* IFLA_MASTER */ 610 + nla_total_size(4) /* IFLA_MASTER */
454 + nla_total_size(1) /* IFLA_OPERSTATE */ 611 + nla_total_size(1) /* IFLA_OPERSTATE */
455 + nla_total_size(1); /* IFLA_LINKMODE */ 612 + nla_total_size(1) /* IFLA_LINKMODE */
613 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
456} 614}
457 615
458static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 616static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
@@ -522,6 +680,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
522 } 680 }
523 } 681 }
524 682
683 if (dev->rtnl_link_ops) {
684 if (rtnl_link_fill(skb, dev) < 0)
685 goto nla_put_failure;
686 }
687
525 return nlmsg_end(skb, nlh); 688 return nlmsg_end(skb, nlh);
526 689
527nla_put_failure: 690nla_put_failure:
@@ -553,6 +716,8 @@ cont:
553 716
554static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 717static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
555 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 718 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
719 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
720 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
556 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 721 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
557 [IFLA_MTU] = { .type = NLA_U32 }, 722 [IFLA_MTU] = { .type = NLA_U32 },
558 [IFLA_TXQLEN] = { .type = NLA_U32 }, 723 [IFLA_TXQLEN] = { .type = NLA_U32 },
@@ -561,44 +726,16 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
561 [IFLA_LINKMODE] = { .type = NLA_U8 }, 726 [IFLA_LINKMODE] = { .type = NLA_U8 },
562}; 727};
563 728
564static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 729static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
565{ 730 [IFLA_INFO_KIND] = { .type = NLA_STRING },
566 struct ifinfomsg *ifm; 731 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
567 struct net_device *dev; 732};
568 int err, send_addr_notify = 0, modified = 0;
569 struct nlattr *tb[IFLA_MAX+1];
570 char ifname[IFNAMSIZ];
571
572 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
573 if (err < 0)
574 goto errout;
575
576 if (tb[IFLA_IFNAME])
577 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
578 else
579 ifname[0] = '\0';
580
581 err = -EINVAL;
582 ifm = nlmsg_data(nlh);
583 if (ifm->ifi_index > 0)
584 dev = dev_get_by_index(ifm->ifi_index);
585 else if (tb[IFLA_IFNAME])
586 dev = dev_get_by_name(ifname);
587 else
588 goto errout;
589
590 if (dev == NULL) {
591 err = -ENODEV;
592 goto errout;
593 }
594
595 if (tb[IFLA_ADDRESS] &&
596 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
597 goto errout_dev;
598 733
599 if (tb[IFLA_BROADCAST] && 734static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
600 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 735 struct nlattr **tb, char *ifname, int modified)
601 goto errout_dev; 736{
737 int send_addr_notify = 0;
738 int err;
602 739
603 if (tb[IFLA_MAP]) { 740 if (tb[IFLA_MAP]) {
604 struct rtnl_link_ifmap *u_map; 741 struct rtnl_link_ifmap *u_map;
@@ -606,12 +743,12 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
606 743
607 if (!dev->set_config) { 744 if (!dev->set_config) {
608 err = -EOPNOTSUPP; 745 err = -EOPNOTSUPP;
609 goto errout_dev; 746 goto errout;
610 } 747 }
611 748
612 if (!netif_device_present(dev)) { 749 if (!netif_device_present(dev)) {
613 err = -ENODEV; 750 err = -ENODEV;
614 goto errout_dev; 751 goto errout;
615 } 752 }
616 753
617 u_map = nla_data(tb[IFLA_MAP]); 754 u_map = nla_data(tb[IFLA_MAP]);
@@ -624,7 +761,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
624 761
625 err = dev->set_config(dev, &k_map); 762 err = dev->set_config(dev, &k_map);
626 if (err < 0) 763 if (err < 0)
627 goto errout_dev; 764 goto errout;
628 765
629 modified = 1; 766 modified = 1;
630 } 767 }
@@ -635,19 +772,19 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
635 772
636 if (!dev->set_mac_address) { 773 if (!dev->set_mac_address) {
637 err = -EOPNOTSUPP; 774 err = -EOPNOTSUPP;
638 goto errout_dev; 775 goto errout;
639 } 776 }
640 777
641 if (!netif_device_present(dev)) { 778 if (!netif_device_present(dev)) {
642 err = -ENODEV; 779 err = -ENODEV;
643 goto errout_dev; 780 goto errout;
644 } 781 }
645 782
646 len = sizeof(sa_family_t) + dev->addr_len; 783 len = sizeof(sa_family_t) + dev->addr_len;
647 sa = kmalloc(len, GFP_KERNEL); 784 sa = kmalloc(len, GFP_KERNEL);
648 if (!sa) { 785 if (!sa) {
649 err = -ENOMEM; 786 err = -ENOMEM;
650 goto errout_dev; 787 goto errout;
651 } 788 }
652 sa->sa_family = dev->type; 789 sa->sa_family = dev->type;
653 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 790 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
@@ -655,7 +792,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
655 err = dev->set_mac_address(dev, sa); 792 err = dev->set_mac_address(dev, sa);
656 kfree(sa); 793 kfree(sa);
657 if (err) 794 if (err)
658 goto errout_dev; 795 goto errout;
659 send_addr_notify = 1; 796 send_addr_notify = 1;
660 modified = 1; 797 modified = 1;
661 } 798 }
@@ -663,7 +800,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
663 if (tb[IFLA_MTU]) { 800 if (tb[IFLA_MTU]) {
664 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 801 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
665 if (err < 0) 802 if (err < 0)
666 goto errout_dev; 803 goto errout;
667 modified = 1; 804 modified = 1;
668 } 805 }
669 806
@@ -675,7 +812,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
675 if (ifm->ifi_index > 0 && ifname[0]) { 812 if (ifm->ifi_index > 0 && ifname[0]) {
676 err = dev_change_name(dev, ifname); 813 err = dev_change_name(dev, ifname);
677 if (err < 0) 814 if (err < 0)
678 goto errout_dev; 815 goto errout;
679 modified = 1; 816 modified = 1;
680 } 817 }
681 818
@@ -684,7 +821,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
684 send_addr_notify = 1; 821 send_addr_notify = 1;
685 } 822 }
686 823
687
688 if (ifm->ifi_flags || ifm->ifi_change) { 824 if (ifm->ifi_flags || ifm->ifi_change) {
689 unsigned int flags = ifm->ifi_flags; 825 unsigned int flags = ifm->ifi_flags;
690 826
@@ -712,7 +848,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
712 848
713 err = 0; 849 err = 0;
714 850
715errout_dev: 851errout:
716 if (err < 0 && modified && net_ratelimit()) 852 if (err < 0 && modified && net_ratelimit())
717 printk(KERN_WARNING "A link change request failed with " 853 printk(KERN_WARNING "A link change request failed with "
718 "some changes comitted already. Interface %s may " 854 "some changes comitted already. Interface %s may "
@@ -721,12 +857,239 @@ errout_dev:
721 857
722 if (send_addr_notify) 858 if (send_addr_notify)
723 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 859 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
860 return err;
861}
862
863static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
864{
865 struct ifinfomsg *ifm;
866 struct net_device *dev;
867 int err;
868 struct nlattr *tb[IFLA_MAX+1];
869 char ifname[IFNAMSIZ];
870
871 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
872 if (err < 0)
873 goto errout;
874
875 if (tb[IFLA_IFNAME])
876 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
877 else
878 ifname[0] = '\0';
879
880 err = -EINVAL;
881 ifm = nlmsg_data(nlh);
882 if (ifm->ifi_index > 0)
883 dev = dev_get_by_index(ifm->ifi_index);
884 else if (tb[IFLA_IFNAME])
885 dev = dev_get_by_name(ifname);
886 else
887 goto errout;
888
889 if (dev == NULL) {
890 err = -ENODEV;
891 goto errout;
892 }
724 893
894 if (tb[IFLA_ADDRESS] &&
895 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
896 goto errout_dev;
897
898 if (tb[IFLA_BROADCAST] &&
899 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
900 goto errout_dev;
901
902 err = do_setlink(dev, ifm, tb, ifname, 0);
903errout_dev:
725 dev_put(dev); 904 dev_put(dev);
726errout: 905errout:
727 return err; 906 return err;
728} 907}
729 908
909static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
910{
911 const struct rtnl_link_ops *ops;
912 struct net_device *dev;
913 struct ifinfomsg *ifm;
914 char ifname[IFNAMSIZ];
915 struct nlattr *tb[IFLA_MAX+1];
916 int err;
917
918 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
919 if (err < 0)
920 return err;
921
922 if (tb[IFLA_IFNAME])
923 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
924
925 ifm = nlmsg_data(nlh);
926 if (ifm->ifi_index > 0)
927 dev = __dev_get_by_index(ifm->ifi_index);
928 else if (tb[IFLA_IFNAME])
929 dev = __dev_get_by_name(ifname);
930 else
931 return -EINVAL;
932
933 if (!dev)
934 return -ENODEV;
935
936 ops = dev->rtnl_link_ops;
937 if (!ops)
938 return -EOPNOTSUPP;
939
940 ops->dellink(dev);
941 return 0;
942}
943
944static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
945{
946 const struct rtnl_link_ops *ops;
947 struct net_device *dev;
948 struct ifinfomsg *ifm;
949 char kind[MODULE_NAME_LEN];
950 char ifname[IFNAMSIZ];
951 struct nlattr *tb[IFLA_MAX+1];
952 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
953 int err;
954
955replay:
956 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
957 if (err < 0)
958 return err;
959
960 if (tb[IFLA_IFNAME])
961 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
962 else
963 ifname[0] = '\0';
964
965 ifm = nlmsg_data(nlh);
966 if (ifm->ifi_index > 0)
967 dev = __dev_get_by_index(ifm->ifi_index);
968 else if (ifname[0])
969 dev = __dev_get_by_name(ifname);
970 else
971 dev = NULL;
972
973 if (tb[IFLA_LINKINFO]) {
974 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
975 tb[IFLA_LINKINFO], ifla_info_policy);
976 if (err < 0)
977 return err;
978 } else
979 memset(linkinfo, 0, sizeof(linkinfo));
980
981 if (linkinfo[IFLA_INFO_KIND]) {
982 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
983 ops = rtnl_link_ops_get(kind);
984 } else {
985 kind[0] = '\0';
986 ops = NULL;
987 }
988
989 if (1) {
990 struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
991
992 if (ops) {
993 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
994 err = nla_parse_nested(attr, ops->maxtype,
995 linkinfo[IFLA_INFO_DATA],
996 ops->policy);
997 if (err < 0)
998 return err;
999 data = attr;
1000 }
1001 if (ops->validate) {
1002 err = ops->validate(tb, data);
1003 if (err < 0)
1004 return err;
1005 }
1006 }
1007
1008 if (dev) {
1009 int modified = 0;
1010
1011 if (nlh->nlmsg_flags & NLM_F_EXCL)
1012 return -EEXIST;
1013 if (nlh->nlmsg_flags & NLM_F_REPLACE)
1014 return -EOPNOTSUPP;
1015
1016 if (linkinfo[IFLA_INFO_DATA]) {
1017 if (!ops || ops != dev->rtnl_link_ops ||
1018 !ops->changelink)
1019 return -EOPNOTSUPP;
1020
1021 err = ops->changelink(dev, tb, data);
1022 if (err < 0)
1023 return err;
1024 modified = 1;
1025 }
1026
1027 return do_setlink(dev, ifm, tb, ifname, modified);
1028 }
1029
1030 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
1031 return -ENODEV;
1032
1033 if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change)
1034 return -EOPNOTSUPP;
1035 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
1036 return -EOPNOTSUPP;
1037
1038 if (!ops) {
1039#ifdef CONFIG_KMOD
1040 if (kind[0]) {
1041 __rtnl_unlock();
1042 request_module("rtnl-link-%s", kind);
1043 rtnl_lock();
1044 ops = rtnl_link_ops_get(kind);
1045 if (ops)
1046 goto replay;
1047 }
1048#endif
1049 return -EOPNOTSUPP;
1050 }
1051
1052 if (!ifname[0])
1053 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
1054 dev = alloc_netdev(ops->priv_size, ifname, ops->setup);
1055 if (!dev)
1056 return -ENOMEM;
1057
1058 if (strchr(dev->name, '%')) {
1059 err = dev_alloc_name(dev, dev->name);
1060 if (err < 0)
1061 goto err_free;
1062 }
1063 dev->rtnl_link_ops = ops;
1064
1065 if (tb[IFLA_MTU])
1066 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1067 if (tb[IFLA_ADDRESS])
1068 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
1069 nla_len(tb[IFLA_ADDRESS]));
1070 if (tb[IFLA_BROADCAST])
1071 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
1072 nla_len(tb[IFLA_BROADCAST]));
1073 if (tb[IFLA_TXQLEN])
1074 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
1075 if (tb[IFLA_WEIGHT])
1076 dev->weight = nla_get_u32(tb[IFLA_WEIGHT]);
1077 if (tb[IFLA_OPERSTATE])
1078 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
1079 if (tb[IFLA_LINKMODE])
1080 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
1081
1082 if (ops->newlink)
1083 err = ops->newlink(dev, tb, data);
1084 else
1085 err = register_netdevice(dev);
1086err_free:
1087 if (err < 0)
1088 free_netdev(dev);
1089 return err;
1090 }
1091}
1092
730static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 1093static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
731{ 1094{
732 struct ifinfomsg *ifm; 1095 struct ifinfomsg *ifm;
@@ -747,7 +1110,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
747 } else 1110 } else
748 return -EINVAL; 1111 return -EINVAL;
749 1112
750 nskb = nlmsg_new(if_nlmsg_size(), GFP_KERNEL); 1113 nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
751 if (nskb == NULL) { 1114 if (nskb == NULL) {
752 err = -ENOBUFS; 1115 err = -ENOBUFS;
753 goto errout; 1116 goto errout;
@@ -797,7 +1160,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
797 struct sk_buff *skb; 1160 struct sk_buff *skb;
798 int err = -ENOBUFS; 1161 int err = -ENOBUFS;
799 1162
800 skb = nlmsg_new(if_nlmsg_size(), GFP_KERNEL); 1163 skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
801 if (skb == NULL) 1164 if (skb == NULL)
802 goto errout; 1165 goto errout;
803 1166
@@ -952,6 +1315,8 @@ void __init rtnetlink_init(void)
952 1315
953 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo); 1316 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo);
954 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL); 1317 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL);
1318 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL);
1319 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL);
955 1320
956 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all); 1321 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all);
957 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); 1322 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all);
@@ -960,6 +1325,7 @@ void __init rtnetlink_init(void)
960EXPORT_SYMBOL(__rta_fill); 1325EXPORT_SYMBOL(__rta_fill);
961EXPORT_SYMBOL(rtattr_strlcpy); 1326EXPORT_SYMBOL(rtattr_strlcpy);
962EXPORT_SYMBOL(rtattr_parse); 1327EXPORT_SYMBOL(rtattr_parse);
1328EXPORT_SYMBOL(__rtattr_parse_nested_compat);
963EXPORT_SYMBOL(rtnetlink_put_metrics); 1329EXPORT_SYMBOL(rtnetlink_put_metrics);
964EXPORT_SYMBOL(rtnl_lock); 1330EXPORT_SYMBOL(rtnl_lock);
965EXPORT_SYMBOL(rtnl_trylock); 1331EXPORT_SYMBOL(rtnl_trylock);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3943c3ad9145..0583e8498f13 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -415,9 +415,11 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
415 C(csum); 415 C(csum);
416 C(local_df); 416 C(local_df);
417 n->cloned = 1; 417 n->cloned = 1;
418 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
418 n->nohdr = 0; 419 n->nohdr = 0;
419 C(pkt_type); 420 C(pkt_type);
420 C(ip_summed); 421 C(ip_summed);
422 skb_copy_queue_mapping(n, skb);
421 C(priority); 423 C(priority);
422#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 424#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
423 C(ipvs_property); 425 C(ipvs_property);
@@ -426,6 +428,10 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
426 n->destructor = NULL; 428 n->destructor = NULL;
427 C(mark); 429 C(mark);
428 __nf_copy(n, skb); 430 __nf_copy(n, skb);
431#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
432 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
433 C(nf_trace);
434#endif
429#ifdef CONFIG_NET_SCHED 435#ifdef CONFIG_NET_SCHED
430 C(tc_index); 436 C(tc_index);
431#ifdef CONFIG_NET_CLS_ACT 437#ifdef CONFIG_NET_CLS_ACT
@@ -459,6 +465,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
459#endif 465#endif
460 new->sk = NULL; 466 new->sk = NULL;
461 new->dev = old->dev; 467 new->dev = old->dev;
468 skb_copy_queue_mapping(new, old);
462 new->priority = old->priority; 469 new->priority = old->priority;
463 new->protocol = old->protocol; 470 new->protocol = old->protocol;
464 new->dst = dst_clone(old->dst); 471 new->dst = dst_clone(old->dst);
@@ -482,6 +489,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
482 new->destructor = NULL; 489 new->destructor = NULL;
483 new->mark = old->mark; 490 new->mark = old->mark;
484 __nf_copy(new, old); 491 __nf_copy(new, old);
492#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
493 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
494 new->nf_trace = old->nf_trace;
495#endif
485#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 496#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
486 new->ipvs_property = old->ipvs_property; 497 new->ipvs_property = old->ipvs_property;
487#endif 498#endif
@@ -676,6 +687,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
676 skb->network_header += off; 687 skb->network_header += off;
677 skb->mac_header += off; 688 skb->mac_header += off;
678 skb->cloned = 0; 689 skb->cloned = 0;
690 skb->hdr_len = 0;
679 skb->nohdr = 0; 691 skb->nohdr = 0;
680 atomic_set(&skb_shinfo(skb)->dataref, 1); 692 atomic_set(&skb_shinfo(skb)->dataref, 1);
681 return 0; 693 return 0;
@@ -1930,6 +1942,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
1930 tail = nskb; 1942 tail = nskb;
1931 1943
1932 nskb->dev = skb->dev; 1944 nskb->dev = skb->dev;
1945 skb_copy_queue_mapping(nskb, skb);
1933 nskb->priority = skb->priority; 1946 nskb->priority = skb->priority;
1934 nskb->protocol = skb->protocol; 1947 nskb->protocol = skb->protocol;
1935 nskb->dst = dst_clone(skb->dst); 1948 nskb->dst = dst_clone(skb->dst);
diff --git a/net/core/sock.c b/net/core/sock.c
index c14ce0198d25..091032a250c7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -210,7 +210,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
210 return -EDOM; 210 return -EDOM;
211 211
212 if (tv.tv_sec < 0) { 212 if (tv.tv_sec < 0) {
213 static int warned = 0; 213 static int warned __read_mostly;
214
214 *timeo_p = 0; 215 *timeo_p = 0;
215 if (warned < 10 && net_ratelimit()) 216 if (warned < 10 && net_ratelimit())
216 warned++; 217 warned++;
@@ -1851,46 +1852,15 @@ void proto_unregister(struct proto *prot)
1851EXPORT_SYMBOL(proto_unregister); 1852EXPORT_SYMBOL(proto_unregister);
1852 1853
1853#ifdef CONFIG_PROC_FS 1854#ifdef CONFIG_PROC_FS
1854static inline struct proto *__proto_head(void)
1855{
1856 return list_entry(proto_list.next, struct proto, node);
1857}
1858
1859static inline struct proto *proto_head(void)
1860{
1861 return list_empty(&proto_list) ? NULL : __proto_head();
1862}
1863
1864static inline struct proto *proto_next(struct proto *proto)
1865{
1866 return proto->node.next == &proto_list ? NULL :
1867 list_entry(proto->node.next, struct proto, node);
1868}
1869
1870static inline struct proto *proto_get_idx(loff_t pos)
1871{
1872 struct proto *proto;
1873 loff_t i = 0;
1874
1875 list_for_each_entry(proto, &proto_list, node)
1876 if (i++ == pos)
1877 goto out;
1878
1879 proto = NULL;
1880out:
1881 return proto;
1882}
1883
1884static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 1855static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
1885{ 1856{
1886 read_lock(&proto_list_lock); 1857 read_lock(&proto_list_lock);
1887 return *pos ? proto_get_idx(*pos - 1) : SEQ_START_TOKEN; 1858 return seq_list_start_head(&proto_list, *pos);
1888} 1859}
1889 1860
1890static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1861static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1891{ 1862{
1892 ++*pos; 1863 return seq_list_next(v, &proto_list, pos);
1893 return v == SEQ_START_TOKEN ? proto_head() : proto_next(v);
1894} 1864}
1895 1865
1896static void proto_seq_stop(struct seq_file *seq, void *v) 1866static void proto_seq_stop(struct seq_file *seq, void *v)
@@ -1938,7 +1908,7 @@ static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
1938 1908
1939static int proto_seq_show(struct seq_file *seq, void *v) 1909static int proto_seq_show(struct seq_file *seq, void *v)
1940{ 1910{
1941 if (v == SEQ_START_TOKEN) 1911 if (v == &proto_list)
1942 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 1912 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
1943 "protocol", 1913 "protocol",
1944 "size", 1914 "size",
@@ -1950,7 +1920,7 @@ static int proto_seq_show(struct seq_file *seq, void *v)
1950 "module", 1920 "module",
1951 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 1921 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
1952 else 1922 else
1953 proto_seq_printf(seq, v); 1923 proto_seq_printf(seq, list_entry(v, struct proto, node));
1954 return 0; 1924 return 0;
1955} 1925}
1956 1926
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index ec7fa4d67f08..e91c2b9dc27b 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/dccp/ccids/ccid3.c 2 * net/dccp/ccids/ccid3.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz> 5 * Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * 6 *
7 * An implementation of the DCCP protocol 7 * An implementation of the DCCP protocol
8 * 8 *
@@ -49,7 +49,6 @@ static int ccid3_debug;
49 49
50static struct dccp_tx_hist *ccid3_tx_hist; 50static struct dccp_tx_hist *ccid3_tx_hist;
51static struct dccp_rx_hist *ccid3_rx_hist; 51static struct dccp_rx_hist *ccid3_rx_hist;
52static struct dccp_li_hist *ccid3_li_hist;
53 52
54/* 53/*
55 * Transmitter Half-Connection Routines 54 * Transmitter Half-Connection Routines
@@ -194,25 +193,20 @@ static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
194 * The algorithm is not applicable if RTT < 4 microseconds. 193 * The algorithm is not applicable if RTT < 4 microseconds.
195 */ 194 */
196static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx, 195static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx,
197 struct timeval *now) 196 ktime_t now)
198{ 197{
199 suseconds_t delta;
200 u32 quarter_rtts; 198 u32 quarter_rtts;
201 199
202 if (unlikely(hctx->ccid3hctx_rtt < 4)) /* avoid divide-by-zero */ 200 if (unlikely(hctx->ccid3hctx_rtt < 4)) /* avoid divide-by-zero */
203 return; 201 return;
204 202
205 delta = timeval_delta(now, &hctx->ccid3hctx_t_last_win_count); 203 quarter_rtts = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count);
206 DCCP_BUG_ON(delta < 0); 204 quarter_rtts /= hctx->ccid3hctx_rtt / 4;
207
208 quarter_rtts = (u32)delta / (hctx->ccid3hctx_rtt / 4);
209 205
210 if (quarter_rtts > 0) { 206 if (quarter_rtts > 0) {
211 hctx->ccid3hctx_t_last_win_count = *now; 207 hctx->ccid3hctx_t_last_win_count = now;
212 hctx->ccid3hctx_last_win_count += min_t(u32, quarter_rtts, 5); 208 hctx->ccid3hctx_last_win_count += min_t(u32, quarter_rtts, 5);
213 hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */ 209 hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */
214
215 ccid3_pr_debug("now at %#X\n", hctx->ccid3hctx_last_win_count);
216 } 210 }
217} 211}
218 212
@@ -312,8 +306,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
312{ 306{
313 struct dccp_sock *dp = dccp_sk(sk); 307 struct dccp_sock *dp = dccp_sk(sk);
314 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 308 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
315 struct timeval now; 309 ktime_t now = ktime_get_real();
316 suseconds_t delay; 310 s64 delay;
317 311
318 BUG_ON(hctx == NULL); 312 BUG_ON(hctx == NULL);
319 313
@@ -325,8 +319,6 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
325 if (unlikely(skb->len == 0)) 319 if (unlikely(skb->len == 0))
326 return -EBADMSG; 320 return -EBADMSG;
327 321
328 dccp_timestamp(sk, &now);
329
330 switch (hctx->ccid3hctx_state) { 322 switch (hctx->ccid3hctx_state) {
331 case TFRC_SSTATE_NO_SENT: 323 case TFRC_SSTATE_NO_SENT:
332 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 324 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
@@ -349,7 +341,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
349 ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt); 341 ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt);
350 hctx->ccid3hctx_rtt = dp->dccps_syn_rtt; 342 hctx->ccid3hctx_rtt = dp->dccps_syn_rtt;
351 hctx->ccid3hctx_x = rfc3390_initial_rate(sk); 343 hctx->ccid3hctx_x = rfc3390_initial_rate(sk);
352 hctx->ccid3hctx_t_ld = now; 344 hctx->ccid3hctx_t_ld = ktime_to_timeval(now);
353 } else { 345 } else {
354 /* Sender does not have RTT sample: X = MSS/second */ 346 /* Sender does not have RTT sample: X = MSS/second */
355 hctx->ccid3hctx_x = dp->dccps_mss_cache; 347 hctx->ccid3hctx_x = dp->dccps_mss_cache;
@@ -361,7 +353,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
361 break; 353 break;
362 case TFRC_SSTATE_NO_FBACK: 354 case TFRC_SSTATE_NO_FBACK:
363 case TFRC_SSTATE_FBACK: 355 case TFRC_SSTATE_FBACK:
364 delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now); 356 delay = ktime_us_delta(hctx->ccid3hctx_t_nom, now);
365 ccid3_pr_debug("delay=%ld\n", (long)delay); 357 ccid3_pr_debug("delay=%ld\n", (long)delay);
366 /* 358 /*
367 * Scheduling of packet transmissions [RFC 3448, 4.6] 359 * Scheduling of packet transmissions [RFC 3448, 4.6]
@@ -371,10 +363,10 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
371 * else 363 * else
372 * // send the packet in (t_nom - t_now) milliseconds. 364 * // send the packet in (t_nom - t_now) milliseconds.
373 */ 365 */
374 if (delay - (suseconds_t)hctx->ccid3hctx_delta >= 0) 366 if (delay - (s64)hctx->ccid3hctx_delta >= 1000)
375 return delay / 1000L; 367 return (u32)delay / 1000L;
376 368
377 ccid3_hc_tx_update_win_count(hctx, &now); 369 ccid3_hc_tx_update_win_count(hctx, now);
378 break; 370 break;
379 case TFRC_SSTATE_TERM: 371 case TFRC_SSTATE_TERM:
380 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk); 372 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
@@ -387,8 +379,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
387 hctx->ccid3hctx_idle = 0; 379 hctx->ccid3hctx_idle = 0;
388 380
389 /* set the nominal send time for the next following packet */ 381 /* set the nominal send time for the next following packet */
390 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); 382 hctx->ccid3hctx_t_nom = ktime_add_us(hctx->ccid3hctx_t_nom,
391 383 hctx->ccid3hctx_t_ipi);
392 return 0; 384 return 0;
393} 385}
394 386
@@ -819,154 +811,6 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
819 return 0; 811 return 0;
820} 812}
821 813
822/* calculate first loss interval
823 *
824 * returns estimated loss interval in usecs */
825
826static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
827{
828 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
829 struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
830 u32 x_recv, p;
831 suseconds_t rtt, delta;
832 struct timeval tstamp = { 0, };
833 int interval = 0;
834 int win_count = 0;
835 int step = 0;
836 u64 fval;
837
838 list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
839 dccphrx_node) {
840 if (dccp_rx_hist_entry_data_packet(entry)) {
841 tail = entry;
842
843 switch (step) {
844 case 0:
845 tstamp = entry->dccphrx_tstamp;
846 win_count = entry->dccphrx_ccval;
847 step = 1;
848 break;
849 case 1:
850 interval = win_count - entry->dccphrx_ccval;
851 if (interval < 0)
852 interval += TFRC_WIN_COUNT_LIMIT;
853 if (interval > 4)
854 goto found;
855 break;
856 }
857 }
858 }
859
860 if (unlikely(step == 0)) {
861 DCCP_WARN("%s(%p), packet history has no data packets!\n",
862 dccp_role(sk), sk);
863 return ~0;
864 }
865
866 if (unlikely(interval == 0)) {
867 DCCP_WARN("%s(%p), Could not find a win_count interval > 0."
868 "Defaulting to 1\n", dccp_role(sk), sk);
869 interval = 1;
870 }
871found:
872 if (!tail) {
873 DCCP_CRIT("tail is null\n");
874 return ~0;
875 }
876
877 delta = timeval_delta(&tstamp, &tail->dccphrx_tstamp);
878 DCCP_BUG_ON(delta < 0);
879
880 rtt = delta * 4 / interval;
881 ccid3_pr_debug("%s(%p), approximated RTT to %dus\n",
882 dccp_role(sk), sk, (int)rtt);
883
884 /*
885 * Determine the length of the first loss interval via inverse lookup.
886 * Assume that X_recv can be computed by the throughput equation
887 * s
888 * X_recv = --------
889 * R * fval
890 * Find some p such that f(p) = fval; return 1/p [RFC 3448, 6.3.1].
891 */
892 if (rtt == 0) { /* would result in divide-by-zero */
893 DCCP_WARN("RTT==0\n");
894 return ~0;
895 }
896
897 dccp_timestamp(sk, &tstamp);
898 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
899 DCCP_BUG_ON(delta <= 0);
900
901 x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
902 if (x_recv == 0) { /* would also trigger divide-by-zero */
903 DCCP_WARN("X_recv==0\n");
904 if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) {
905 DCCP_BUG("stored value of X_recv is zero");
906 return ~0;
907 }
908 }
909
910 fval = scaled_div(hcrx->ccid3hcrx_s, rtt);
911 fval = scaled_div32(fval, x_recv);
912 p = tfrc_calc_x_reverse_lookup(fval);
913
914 ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
915 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
916
917 if (p == 0)
918 return ~0;
919 else
920 return 1000000 / p;
921}
922
923static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
924{
925 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
926 struct dccp_li_hist_entry *head;
927 u64 seq_temp;
928
929 if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
930 if (!dccp_li_hist_interval_new(ccid3_li_hist,
931 &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
932 return;
933
934 head = list_entry(hcrx->ccid3hcrx_li_hist.next,
935 struct dccp_li_hist_entry, dccplih_node);
936 head->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
937 } else {
938 struct dccp_li_hist_entry *entry;
939 struct list_head *tail;
940
941 head = list_entry(hcrx->ccid3hcrx_li_hist.next,
942 struct dccp_li_hist_entry, dccplih_node);
943 /* FIXME win count check removed as was wrong */
944 /* should make this check with receive history */
945 /* and compare there as per section 10.2 of RFC4342 */
946
947 /* new loss event detected */
948 /* calculate last interval length */
949 seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
950 entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC);
951
952 if (entry == NULL) {
953 DCCP_BUG("out of memory - can not allocate entry");
954 return;
955 }
956
957 list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
958
959 tail = hcrx->ccid3hcrx_li_hist.prev;
960 list_del(tail);
961 kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
962
963 /* Create the newest interval */
964 entry->dccplih_seqno = seq_loss;
965 entry->dccplih_interval = seq_temp;
966 entry->dccplih_win_count = win_loss;
967 }
968}
969
970static int ccid3_hc_rx_detect_loss(struct sock *sk, 814static int ccid3_hc_rx_detect_loss(struct sock *sk,
971 struct dccp_rx_hist_entry *packet) 815 struct dccp_rx_hist_entry *packet)
972{ 816{
@@ -992,8 +836,15 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
992 while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno) 836 while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
993 > TFRC_RECV_NUM_LATE_LOSS) { 837 > TFRC_RECV_NUM_LATE_LOSS) {
994 loss = 1; 838 loss = 1;
995 ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss, 839 dccp_li_update_li(sk,
996 hcrx->ccid3hcrx_ccval_nonloss); 840 &hcrx->ccid3hcrx_li_hist,
841 &hcrx->ccid3hcrx_hist,
842 &hcrx->ccid3hcrx_tstamp_last_feedback,
843 hcrx->ccid3hcrx_s,
844 hcrx->ccid3hcrx_bytes_recv,
845 hcrx->ccid3hcrx_x_recv,
846 hcrx->ccid3hcrx_seqno_nonloss,
847 hcrx->ccid3hcrx_ccval_nonloss);
997 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss; 848 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
998 dccp_inc_seqno(&tmp_seqno); 849 dccp_inc_seqno(&tmp_seqno);
999 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; 850 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
@@ -1152,7 +1003,7 @@ static void ccid3_hc_rx_exit(struct sock *sk)
1152 dccp_rx_hist_purge(ccid3_rx_hist, &hcrx->ccid3hcrx_hist); 1003 dccp_rx_hist_purge(ccid3_rx_hist, &hcrx->ccid3hcrx_hist);
1153 1004
1154 /* Empty loss interval history */ 1005 /* Empty loss interval history */
1155 dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist); 1006 dccp_li_hist_purge(&hcrx->ccid3hcrx_li_hist);
1156} 1007}
1157 1008
1158static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) 1009static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
@@ -1236,19 +1087,12 @@ static __init int ccid3_module_init(void)
1236 if (ccid3_tx_hist == NULL) 1087 if (ccid3_tx_hist == NULL)
1237 goto out_free_rx; 1088 goto out_free_rx;
1238 1089
1239 ccid3_li_hist = dccp_li_hist_new("ccid3");
1240 if (ccid3_li_hist == NULL)
1241 goto out_free_tx;
1242
1243 rc = ccid_register(&ccid3); 1090 rc = ccid_register(&ccid3);
1244 if (rc != 0) 1091 if (rc != 0)
1245 goto out_free_loss_interval_history; 1092 goto out_free_tx;
1246out: 1093out:
1247 return rc; 1094 return rc;
1248 1095
1249out_free_loss_interval_history:
1250 dccp_li_hist_delete(ccid3_li_hist);
1251 ccid3_li_hist = NULL;
1252out_free_tx: 1096out_free_tx:
1253 dccp_tx_hist_delete(ccid3_tx_hist); 1097 dccp_tx_hist_delete(ccid3_tx_hist);
1254 ccid3_tx_hist = NULL; 1098 ccid3_tx_hist = NULL;
@@ -1271,10 +1115,6 @@ static __exit void ccid3_module_exit(void)
1271 dccp_rx_hist_delete(ccid3_rx_hist); 1115 dccp_rx_hist_delete(ccid3_rx_hist);
1272 ccid3_rx_hist = NULL; 1116 ccid3_rx_hist = NULL;
1273 } 1117 }
1274 if (ccid3_li_hist != NULL) {
1275 dccp_li_hist_delete(ccid3_li_hist);
1276 ccid3_li_hist = NULL;
1277 }
1278} 1118}
1279module_exit(ccid3_module_exit); 1119module_exit(ccid3_module_exit);
1280 1120
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 8d31b389c19c..51d4b804e334 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -36,6 +36,7 @@
36#ifndef _DCCP_CCID3_H_ 36#ifndef _DCCP_CCID3_H_
37#define _DCCP_CCID3_H_ 37#define _DCCP_CCID3_H_
38 38
39#include <linux/ktime.h>
39#include <linux/list.h> 40#include <linux/list.h>
40#include <linux/time.h> 41#include <linux/time.h>
41#include <linux/types.h> 42#include <linux/types.h>
@@ -108,10 +109,10 @@ struct ccid3_hc_tx_sock {
108 enum ccid3_hc_tx_states ccid3hctx_state:8; 109 enum ccid3_hc_tx_states ccid3hctx_state:8;
109 u8 ccid3hctx_last_win_count; 110 u8 ccid3hctx_last_win_count;
110 u8 ccid3hctx_idle; 111 u8 ccid3hctx_idle;
111 struct timeval ccid3hctx_t_last_win_count; 112 ktime_t ccid3hctx_t_last_win_count;
112 struct timer_list ccid3hctx_no_feedback_timer; 113 struct timer_list ccid3hctx_no_feedback_timer;
113 struct timeval ccid3hctx_t_ld; 114 struct timeval ccid3hctx_t_ld;
114 struct timeval ccid3hctx_t_nom; 115 ktime_t ccid3hctx_t_nom;
115 u32 ccid3hctx_delta; 116 u32 ccid3hctx_delta;
116 struct list_head ccid3hctx_hist; 117 struct list_head ccid3hctx_hist;
117 struct ccid3_options_received ccid3hctx_options_received; 118 struct ccid3_options_received ccid3hctx_options_received;
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 372d7e75cdd8..515225f3a464 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/dccp/ccids/lib/loss_interval.c 2 * net/dccp/ccids/lib/loss_interval.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz> 5 * Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -15,58 +15,38 @@
15#include <net/sock.h> 15#include <net/sock.h>
16#include "../../dccp.h" 16#include "../../dccp.h"
17#include "loss_interval.h" 17#include "loss_interval.h"
18#include "packet_history.h"
19#include "tfrc.h"
18 20
19struct dccp_li_hist *dccp_li_hist_new(const char *name) 21#define DCCP_LI_HIST_IVAL_F_LENGTH 8
20{
21 struct dccp_li_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
22 static const char dccp_li_hist_mask[] = "li_hist_%s";
23 char *slab_name;
24
25 if (hist == NULL)
26 goto out;
27
28 slab_name = kmalloc(strlen(name) + sizeof(dccp_li_hist_mask) - 1,
29 GFP_ATOMIC);
30 if (slab_name == NULL)
31 goto out_free_hist;
32
33 sprintf(slab_name, dccp_li_hist_mask, name);
34 hist->dccplih_slab = kmem_cache_create(slab_name,
35 sizeof(struct dccp_li_hist_entry),
36 0, SLAB_HWCACHE_ALIGN,
37 NULL, NULL);
38 if (hist->dccplih_slab == NULL)
39 goto out_free_slab_name;
40out:
41 return hist;
42out_free_slab_name:
43 kfree(slab_name);
44out_free_hist:
45 kfree(hist);
46 hist = NULL;
47 goto out;
48}
49 22
50EXPORT_SYMBOL_GPL(dccp_li_hist_new); 23struct dccp_li_hist_entry {
24 struct list_head dccplih_node;
25 u64 dccplih_seqno:48,
26 dccplih_win_count:4;
27 u32 dccplih_interval;
28};
51 29
52void dccp_li_hist_delete(struct dccp_li_hist *hist) 30static struct kmem_cache *dccp_li_cachep __read_mostly;
53{
54 const char* name = kmem_cache_name(hist->dccplih_slab);
55 31
56 kmem_cache_destroy(hist->dccplih_slab); 32static inline struct dccp_li_hist_entry *dccp_li_hist_entry_new(const gfp_t prio)
57 kfree(name); 33{
58 kfree(hist); 34 return kmem_cache_alloc(dccp_li_cachep, prio);
59} 35}
60 36
61EXPORT_SYMBOL_GPL(dccp_li_hist_delete); 37static inline void dccp_li_hist_entry_delete(struct dccp_li_hist_entry *entry)
38{
39 if (entry != NULL)
40 kmem_cache_free(dccp_li_cachep, entry);
41}
62 42
63void dccp_li_hist_purge(struct dccp_li_hist *hist, struct list_head *list) 43void dccp_li_hist_purge(struct list_head *list)
64{ 44{
65 struct dccp_li_hist_entry *entry, *next; 45 struct dccp_li_hist_entry *entry, *next;
66 46
67 list_for_each_entry_safe(entry, next, list, dccplih_node) { 47 list_for_each_entry_safe(entry, next, list, dccplih_node) {
68 list_del_init(&entry->dccplih_node); 48 list_del_init(&entry->dccplih_node);
69 kmem_cache_free(hist->dccplih_slab, entry); 49 kmem_cache_free(dccp_li_cachep, entry);
70 } 50 }
71} 51}
72 52
@@ -118,16 +98,16 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
118 98
119EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean); 99EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean);
120 100
121int dccp_li_hist_interval_new(struct dccp_li_hist *hist, 101static int dccp_li_hist_interval_new(struct list_head *list,
122 struct list_head *list, const u64 seq_loss, const u8 win_loss) 102 const u64 seq_loss, const u8 win_loss)
123{ 103{
124 struct dccp_li_hist_entry *entry; 104 struct dccp_li_hist_entry *entry;
125 int i; 105 int i;
126 106
127 for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { 107 for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
128 entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC); 108 entry = dccp_li_hist_entry_new(GFP_ATOMIC);
129 if (entry == NULL) { 109 if (entry == NULL) {
130 dccp_li_hist_purge(hist, list); 110 dccp_li_hist_purge(list);
131 DCCP_BUG("loss interval list entry is NULL"); 111 DCCP_BUG("loss interval list entry is NULL");
132 return 0; 112 return 0;
133 } 113 }
@@ -140,4 +120,176 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
140 return 1; 120 return 1;
141} 121}
142 122
143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new); 123/* calculate first loss interval
124 *
125 * returns estimated loss interval in usecs */
126static u32 dccp_li_calc_first_li(struct sock *sk,
127 struct list_head *hist_list,
128 struct timeval *last_feedback,
129 u16 s, u32 bytes_recv,
130 u32 previous_x_recv)
131{
132 struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
133 u32 x_recv, p;
134 suseconds_t rtt, delta;
135 struct timeval tstamp = { 0, 0 };
136 int interval = 0;
137 int win_count = 0;
138 int step = 0;
139 u64 fval;
140
141 list_for_each_entry_safe(entry, next, hist_list, dccphrx_node) {
142 if (dccp_rx_hist_entry_data_packet(entry)) {
143 tail = entry;
144
145 switch (step) {
146 case 0:
147 tstamp = entry->dccphrx_tstamp;
148 win_count = entry->dccphrx_ccval;
149 step = 1;
150 break;
151 case 1:
152 interval = win_count - entry->dccphrx_ccval;
153 if (interval < 0)
154 interval += TFRC_WIN_COUNT_LIMIT;
155 if (interval > 4)
156 goto found;
157 break;
158 }
159 }
160 }
161
162 if (unlikely(step == 0)) {
163 DCCP_WARN("%s(%p), packet history has no data packets!\n",
164 dccp_role(sk), sk);
165 return ~0;
166 }
167
168 if (unlikely(interval == 0)) {
169 DCCP_WARN("%s(%p), Could not find a win_count interval > 0."
170 "Defaulting to 1\n", dccp_role(sk), sk);
171 interval = 1;
172 }
173found:
174 if (!tail) {
175 DCCP_CRIT("tail is null\n");
176 return ~0;
177 }
178
179 delta = timeval_delta(&tstamp, &tail->dccphrx_tstamp);
180 DCCP_BUG_ON(delta < 0);
181
182 rtt = delta * 4 / interval;
183 dccp_pr_debug("%s(%p), approximated RTT to %dus\n",
184 dccp_role(sk), sk, (int)rtt);
185
186 /*
187 * Determine the length of the first loss interval via inverse lookup.
188 * Assume that X_recv can be computed by the throughput equation
189 * s
190 * X_recv = --------
191 * R * fval
192 * Find some p such that f(p) = fval; return 1/p [RFC 3448, 6.3.1].
193 */
194 if (rtt == 0) { /* would result in divide-by-zero */
195 DCCP_WARN("RTT==0\n");
196 return ~0;
197 }
198
199 dccp_timestamp(sk, &tstamp);
200 delta = timeval_delta(&tstamp, last_feedback);
201 DCCP_BUG_ON(delta <= 0);
202
203 x_recv = scaled_div32(bytes_recv, delta);
204 if (x_recv == 0) { /* would also trigger divide-by-zero */
205 DCCP_WARN("X_recv==0\n");
206 if (previous_x_recv == 0) {
207 DCCP_BUG("stored value of X_recv is zero");
208 return ~0;
209 }
210 x_recv = previous_x_recv;
211 }
212
213 fval = scaled_div(s, rtt);
214 fval = scaled_div32(fval, x_recv);
215 p = tfrc_calc_x_reverse_lookup(fval);
216
217 dccp_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
218 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
219
220 if (p == 0)
221 return ~0;
222 else
223 return 1000000 / p;
224}
225
226void dccp_li_update_li(struct sock *sk,
227 struct list_head *li_hist_list,
228 struct list_head *hist_list,
229 struct timeval *last_feedback, u16 s, u32 bytes_recv,
230 u32 previous_x_recv, u64 seq_loss, u8 win_loss)
231{
232 struct dccp_li_hist_entry *head;
233 u64 seq_temp;
234
235 if (list_empty(li_hist_list)) {
236 if (!dccp_li_hist_interval_new(li_hist_list, seq_loss,
237 win_loss))
238 return;
239
240 head = list_entry(li_hist_list->next, struct dccp_li_hist_entry,
241 dccplih_node);
242 head->dccplih_interval = dccp_li_calc_first_li(sk, hist_list,
243 last_feedback,
244 s, bytes_recv,
245 previous_x_recv);
246 } else {
247 struct dccp_li_hist_entry *entry;
248 struct list_head *tail;
249
250 head = list_entry(li_hist_list->next, struct dccp_li_hist_entry,
251 dccplih_node);
252 /* FIXME win count check removed as was wrong */
253 /* should make this check with receive history */
254 /* and compare there as per section 10.2 of RFC4342 */
255
256 /* new loss event detected */
257 /* calculate last interval length */
258 seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
259 entry = dccp_li_hist_entry_new(GFP_ATOMIC);
260
261 if (entry == NULL) {
262 DCCP_BUG("out of memory - can not allocate entry");
263 return;
264 }
265
266 list_add(&entry->dccplih_node, li_hist_list);
267
268 tail = li_hist_list->prev;
269 list_del(tail);
270 kmem_cache_free(dccp_li_cachep, tail);
271
272 /* Create the newest interval */
273 entry->dccplih_seqno = seq_loss;
274 entry->dccplih_interval = seq_temp;
275 entry->dccplih_win_count = win_loss;
276 }
277}
278
279EXPORT_SYMBOL_GPL(dccp_li_update_li);
280
281static __init int dccp_li_init(void)
282{
283 dccp_li_cachep = kmem_cache_create("dccp_li_hist",
284 sizeof(struct dccp_li_hist_entry),
285 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
286 return dccp_li_cachep == NULL ? -ENOBUFS : 0;
287}
288
289static __exit void dccp_li_exit(void)
290{
291 kmem_cache_destroy(dccp_li_cachep);
292}
293
294module_init(dccp_li_init);
295module_exit(dccp_li_exit);
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index eb257014dd74..906c806d6d9d 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -3,8 +3,8 @@
3/* 3/*
4 * net/dccp/ccids/lib/loss_interval.h 4 * net/dccp/ccids/lib/loss_interval.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz> 7 * Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
@@ -14,44 +14,16 @@
14 */ 14 */
15 15
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/slab.h>
18#include <linux/time.h> 17#include <linux/time.h>
19 18
20#define DCCP_LI_HIST_IVAL_F_LENGTH 8 19extern void dccp_li_hist_purge(struct list_head *list);
21
22struct dccp_li_hist {
23 struct kmem_cache *dccplih_slab;
24};
25
26extern struct dccp_li_hist *dccp_li_hist_new(const char *name);
27extern void dccp_li_hist_delete(struct dccp_li_hist *hist);
28
29struct dccp_li_hist_entry {
30 struct list_head dccplih_node;
31 u64 dccplih_seqno:48,
32 dccplih_win_count:4;
33 u32 dccplih_interval;
34};
35
36static inline struct dccp_li_hist_entry *
37 dccp_li_hist_entry_new(struct dccp_li_hist *hist,
38 const gfp_t prio)
39{
40 return kmem_cache_alloc(hist->dccplih_slab, prio);
41}
42
43static inline void dccp_li_hist_entry_delete(struct dccp_li_hist *hist,
44 struct dccp_li_hist_entry *entry)
45{
46 if (entry != NULL)
47 kmem_cache_free(hist->dccplih_slab, entry);
48}
49
50extern void dccp_li_hist_purge(struct dccp_li_hist *hist,
51 struct list_head *list);
52 20
53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); 21extern u32 dccp_li_hist_calc_i_mean(struct list_head *list);
54 22
55extern int dccp_li_hist_interval_new(struct dccp_li_hist *hist, 23extern void dccp_li_update_li(struct sock *sk,
56 struct list_head *list, const u64 seq_loss, const u8 win_loss); 24 struct list_head *li_hist_list,
25 struct list_head *hist_list,
26 struct timeval *last_feedback, u16 s,
27 u32 bytes_recv, u32 previous_x_recv,
28 u64 seq_loss, u8 win_loss);
57#endif /* _DCCP_LI_HIST_ */ 29#endif /* _DCCP_LI_HIST_ */
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index d8ad27bfe01a..e2d74cd7eeeb 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -184,7 +184,7 @@ DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
184/* 184/*
185 * Checksumming routines 185 * Checksumming routines
186 */ 186 */
187static inline int dccp_csum_coverage(const struct sk_buff *skb) 187static inline unsigned int dccp_csum_coverage(const struct sk_buff *skb)
188{ 188{
189 const struct dccp_hdr* dh = dccp_hdr(skb); 189 const struct dccp_hdr* dh = dccp_hdr(skb);
190 190
@@ -195,7 +195,7 @@ static inline int dccp_csum_coverage(const struct sk_buff *skb)
195 195
196static inline void dccp_csum_outgoing(struct sk_buff *skb) 196static inline void dccp_csum_outgoing(struct sk_buff *skb)
197{ 197{
198 int cov = dccp_csum_coverage(skb); 198 unsigned int cov = dccp_csum_coverage(skb);
199 199
200 if (cov >= skb->len) 200 if (cov >= skb->len)
201 dccp_hdr(skb)->dccph_cscov = 0; 201 dccp_hdr(skb)->dccph_cscov = 0;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 31737cdf156a..b158c661867b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -253,17 +253,6 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
253 253
254 if (dst == NULL) { 254 if (dst == NULL) {
255 opt = np->opt; 255 opt = np->opt;
256 if (opt == NULL &&
257 np->rxopt.bits.osrcrt == 2 &&
258 ireq6->pktopts) {
259 struct sk_buff *pktopts = ireq6->pktopts;
260 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
261
262 if (rxopt->srcrt)
263 opt = ipv6_invert_rthdr(sk,
264 (struct ipv6_rt_hdr *)(skb_network_header(pktopts) +
265 rxopt->srcrt));
266 }
267 256
268 if (opt != NULL && opt->srcrt != NULL) { 257 if (opt != NULL && opt->srcrt != NULL) {
269 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; 258 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
@@ -570,15 +559,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
570 if (sk_acceptq_is_full(sk)) 559 if (sk_acceptq_is_full(sk))
571 goto out_overflow; 560 goto out_overflow;
572 561
573 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) {
574 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
575
576 if (rxopt->srcrt)
577 opt = ipv6_invert_rthdr(sk,
578 (struct ipv6_rt_hdr *)(skb_network_header(ireq6->pktopts) +
579 rxopt->srcrt));
580 }
581
582 if (dst == NULL) { 562 if (dst == NULL) {
583 struct in6_addr *final_p = NULL, final; 563 struct in6_addr *final_p = NULL, final;
584 struct flowi fl; 564 struct flowi fl;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index bfa910b6ad25..ed76d4aab4a9 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2304,7 +2304,7 @@ static int dn_socket_seq_show(struct seq_file *seq, void *v)
2304 return 0; 2304 return 0;
2305} 2305}
2306 2306
2307static struct seq_operations dn_socket_seq_ops = { 2307static const struct seq_operations dn_socket_seq_ops = {
2308 .start = dn_socket_seq_start, 2308 .start = dn_socket_seq_start,
2309 .next = dn_socket_seq_next, 2309 .next = dn_socket_seq_next,
2310 .stop = dn_socket_seq_stop, 2310 .stop = dn_socket_seq_stop,
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ab41c1879fd4..fa6604fcf0e7 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -461,7 +461,6 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
461 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { 461 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
462 dn_dn2eth(mac_addr, ifa->ifa_local); 462 dn_dn2eth(mac_addr, ifa->ifa_local);
463 dev_mc_add(dev, mac_addr, ETH_ALEN, 0); 463 dev_mc_add(dev, mac_addr, ETH_ALEN, 0);
464 dev_mc_upload(dev);
465 } 464 }
466 } 465 }
467 466
@@ -1064,8 +1063,6 @@ static int dn_eth_up(struct net_device *dev)
1064 else 1063 else
1065 dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); 1064 dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
1066 1065
1067 dev_mc_upload(dev);
1068
1069 dn_db->use_long = 1; 1066 dn_db->use_long = 1;
1070 1067
1071 return 0; 1068 return 0;
@@ -1419,7 +1416,7 @@ static int dn_dev_seq_show(struct seq_file *seq, void *v)
1419 return 0; 1416 return 0;
1420} 1417}
1421 1418
1422static struct seq_operations dn_dev_seq_ops = { 1419static const struct seq_operations dn_dev_seq_ops = {
1423 .start = dn_dev_seq_start, 1420 .start = dn_dev_seq_start,
1424 .next = dn_dev_seq_next, 1421 .next = dn_dev_seq_next,
1425 .stop = dn_dev_seq_stop, 1422 .stop = dn_dev_seq_stop,
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 4bf066c416e2..174d8a7a6dac 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -569,7 +569,7 @@ static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos)
569 NEIGH_SEQ_NEIGH_ONLY); 569 NEIGH_SEQ_NEIGH_ONLY);
570} 570}
571 571
572static struct seq_operations dn_neigh_seq_ops = { 572static const struct seq_operations dn_neigh_seq_ops = {
573 .start = dn_neigh_seq_start, 573 .start = dn_neigh_seq_start,
574 .next = neigh_seq_next, 574 .next = neigh_seq_next,
575 .stop = neigh_seq_stop, 575 .stop = neigh_seq_stop,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index a8bf106b7a61..82622fb6f68f 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1726,7 +1726,7 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1726 return 0; 1726 return 0;
1727} 1727}
1728 1728
1729static struct seq_operations dn_rt_cache_seq_ops = { 1729static const struct seq_operations dn_rt_cache_seq_ops = {
1730 .start = dn_rt_cache_seq_start, 1730 .start = dn_rt_cache_seq_start,
1731 .next = dn_rt_cache_seq_next, 1731 .next = dn_rt_cache_seq_next,
1732 .stop = dn_rt_cache_seq_stop, 1732 .stop = dn_rt_cache_seq_stop,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 0ac2524f3b68..12c765715acf 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -266,8 +266,11 @@ void eth_header_cache_update(struct hh_cache *hh, struct net_device *dev,
266static int eth_mac_addr(struct net_device *dev, void *p) 266static int eth_mac_addr(struct net_device *dev, void *p)
267{ 267{
268 struct sockaddr *addr = p; 268 struct sockaddr *addr = p;
269
269 if (netif_running(dev)) 270 if (netif_running(dev))
270 return -EBUSY; 271 return -EBUSY;
272 if (!is_valid_ether_addr(addr->sa_data))
273 return -EADDRNOTAVAIL;
271 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 274 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
272 return 0; 275 return 0;
273} 276}
@@ -316,9 +319,10 @@ void ether_setup(struct net_device *dev)
316EXPORT_SYMBOL(ether_setup); 319EXPORT_SYMBOL(ether_setup);
317 320
318/** 321/**
319 * alloc_etherdev - Allocates and sets up an Ethernet device 322 * alloc_etherdev_mq - Allocates and sets up an Ethernet device
320 * @sizeof_priv: Size of additional driver-private structure to be allocated 323 * @sizeof_priv: Size of additional driver-private structure to be allocated
321 * for this Ethernet device 324 * for this Ethernet device
325 * @queue_count: The number of queues this device has.
322 * 326 *
323 * Fill in the fields of the device structure with Ethernet-generic 327 * Fill in the fields of the device structure with Ethernet-generic
324 * values. Basically does everything except registering the device. 328 * values. Basically does everything except registering the device.
@@ -328,8 +332,8 @@ EXPORT_SYMBOL(ether_setup);
328 * this private data area. 332 * this private data area.
329 */ 333 */
330 334
331struct net_device *alloc_etherdev(int sizeof_priv) 335struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
332{ 336{
333 return alloc_netdev(sizeof_priv, "eth%d", ether_setup); 337 return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
334} 338}
335EXPORT_SYMBOL(alloc_etherdev); 339EXPORT_SYMBOL(alloc_etherdev_mq);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 010fbb2d45e9..fb7909774254 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -116,48 +116,6 @@ config IP_ROUTE_MULTIPATH
116 equal "cost" and chooses one of them in a non-deterministic fashion 116 equal "cost" and chooses one of them in a non-deterministic fashion
117 if a matching packet arrives. 117 if a matching packet arrives.
118 118
119config IP_ROUTE_MULTIPATH_CACHED
120 bool "IP: equal cost multipath with caching support (EXPERIMENTAL)"
121 depends on IP_ROUTE_MULTIPATH
122 help
123 Normally, equal cost multipath routing is not supported by the
124 routing cache. If you say Y here, alternative routes are cached
125 and on cache lookup a route is chosen in a configurable fashion.
126
127 If unsure, say N.
128
129config IP_ROUTE_MULTIPATH_RR
130 tristate "MULTIPATH: round robin algorithm"
131 depends on IP_ROUTE_MULTIPATH_CACHED
132 help
133 Multipath routes are chosen according to Round Robin
134
135config IP_ROUTE_MULTIPATH_RANDOM
136 tristate "MULTIPATH: random algorithm"
137 depends on IP_ROUTE_MULTIPATH_CACHED
138 help
139 Multipath routes are chosen in a random fashion. Actually,
140 there is no weight for a route. The advantage of this policy
141 is that it is implemented stateless and therefore introduces only
142 a very small delay.
143
144config IP_ROUTE_MULTIPATH_WRANDOM
145 tristate "MULTIPATH: weighted random algorithm"
146 depends on IP_ROUTE_MULTIPATH_CACHED
147 help
148 Multipath routes are chosen in a weighted random fashion.
149 The per route weights are the weights visible via ip route 2. As the
150 corresponding state management introduces some overhead routing delay
151 is increased.
152
153config IP_ROUTE_MULTIPATH_DRR
154 tristate "MULTIPATH: interface round robin algorithm"
155 depends on IP_ROUTE_MULTIPATH_CACHED
156 help
157 Connections are distributed in a round robin fashion over the
158 available interfaces. This policy makes sense if the connections
159 should be primarily distributed on interfaces and not on routes.
160
161config IP_ROUTE_VERBOSE 119config IP_ROUTE_VERBOSE
162 bool "IP: verbose route monitoring" 120 bool "IP: verbose route monitoring"
163 depends on IP_ADVANCED_ROUTER 121 depends on IP_ADVANCED_ROUTER
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 4ff6c151d7f3..fbf1674e0c2a 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -29,14 +29,9 @@ obj-$(CONFIG_INET_TUNNEL) += tunnel4.o
29obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o 29obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o
30obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o 30obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o
31obj-$(CONFIG_IP_PNP) += ipconfig.o 31obj-$(CONFIG_IP_PNP) += ipconfig.o
32obj-$(CONFIG_IP_ROUTE_MULTIPATH_RR) += multipath_rr.o
33obj-$(CONFIG_IP_ROUTE_MULTIPATH_RANDOM) += multipath_random.o
34obj-$(CONFIG_IP_ROUTE_MULTIPATH_WRANDOM) += multipath_wrandom.o
35obj-$(CONFIG_IP_ROUTE_MULTIPATH_DRR) += multipath_drr.o
36obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/ 32obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/
37obj-$(CONFIG_IP_VS) += ipvs/ 33obj-$(CONFIG_IP_VS) += ipvs/
38obj-$(CONFIG_INET_DIAG) += inet_diag.o 34obj-$(CONFIG_INET_DIAG) += inet_diag.o
39obj-$(CONFIG_IP_ROUTE_MULTIPATH_CACHED) += multipath.o
40obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o 35obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
41obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o 36obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
42obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o 37obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 041fba3fa0aa..06c08e5740fb 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1170,6 +1170,9 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
1170 int ihl; 1170 int ihl;
1171 int id; 1171 int id;
1172 1172
1173 if (!(features & NETIF_F_V4_CSUM))
1174 features &= ~NETIF_F_SG;
1175
1173 if (unlikely(skb_shinfo(skb)->gso_type & 1176 if (unlikely(skb_shinfo(skb)->gso_type &
1174 ~(SKB_GSO_TCPV4 | 1177 ~(SKB_GSO_TCPV4 |
1175 SKB_GSO_UDP | 1178 SKB_GSO_UDP |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 6da8ff597ad3..7a23e59c374a 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -339,3 +339,4 @@ static void __exit ah4_fini(void)
339module_init(ah4_init); 339module_init(ah4_init);
340module_exit(ah4_fini); 340module_exit(ah4_fini);
341MODULE_LICENSE("GPL"); 341MODULE_LICENSE("GPL");
342MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 47c95e8ef045..98767a4f1185 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -481,3 +481,4 @@ static void __exit esp4_fini(void)
481module_init(esp4_init); 481module_init(esp4_init);
482module_exit(esp4_fini); 482module_exit(esp4_fini);
483MODULE_LICENSE("GPL"); 483MODULE_LICENSE("GPL");
484MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 311d633f7f39..2eb909be8041 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -453,7 +453,6 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX+1] = {
453 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 453 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
454 [RTA_PROTOINFO] = { .type = NLA_U32 }, 454 [RTA_PROTOINFO] = { .type = NLA_U32 },
455 [RTA_FLOW] = { .type = NLA_U32 }, 455 [RTA_FLOW] = { .type = NLA_U32 },
456 [RTA_MP_ALGO] = { .type = NLA_U32 },
457}; 456};
458 457
459static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh, 458static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -515,9 +514,6 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh,
515 case RTA_FLOW: 514 case RTA_FLOW:
516 cfg->fc_flow = nla_get_u32(attr); 515 cfg->fc_flow = nla_get_u32(attr);
517 break; 516 break;
518 case RTA_MP_ALGO:
519 cfg->fc_mp_alg = nla_get_u32(attr);
520 break;
521 case RTA_TABLE: 517 case RTA_TABLE:
522 cfg->fc_table = nla_get_u32(attr); 518 cfg->fc_table = nla_get_u32(attr);
523 break; 519 break;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index bb94550d95c3..c434119deb52 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -42,7 +42,6 @@
42#include <net/tcp.h> 42#include <net/tcp.h>
43#include <net/sock.h> 43#include <net/sock.h>
44#include <net/ip_fib.h> 44#include <net/ip_fib.h>
45#include <net/ip_mp_alg.h>
46#include <net/netlink.h> 45#include <net/netlink.h>
47#include <net/nexthop.h> 46#include <net/nexthop.h>
48 47
@@ -697,13 +696,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
697 goto err_inval; 696 goto err_inval;
698 } 697 }
699#endif 698#endif
700#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
701 if (cfg->fc_mp_alg) {
702 if (cfg->fc_mp_alg < IP_MP_ALG_NONE ||
703 cfg->fc_mp_alg > IP_MP_ALG_MAX)
704 goto err_inval;
705 }
706#endif
707 699
708 err = -ENOBUFS; 700 err = -ENOBUFS;
709 if (fib_info_cnt >= fib_hash_size) { 701 if (fib_info_cnt >= fib_hash_size) {
@@ -791,10 +783,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
791#endif 783#endif
792 } 784 }
793 785
794#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
795 fi->fib_mp_alg = cfg->fc_mp_alg;
796#endif
797
798 if (fib_props[cfg->fc_type].error) { 786 if (fib_props[cfg->fc_type].error) {
799 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) 787 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
800 goto err_inval; 788 goto err_inval;
@@ -940,10 +928,6 @@ out_fill_res:
940 res->type = fa->fa_type; 928 res->type = fa->fa_type;
941 res->scope = fa->fa_scope; 929 res->scope = fa->fa_scope;
942 res->fi = fa->fa_info; 930 res->fi = fa->fa_info;
943#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
944 res->netmask = mask;
945 res->network = zone & inet_make_mask(prefixlen);
946#endif
947 atomic_inc(&res->fi->fib_clntref); 931 atomic_inc(&res->fi->fib_clntref);
948 return 0; 932 return 0;
949} 933}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 63282934725e..5c14ed63e56c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -809,7 +809,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
809 809
810 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; 810 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
811 811
812 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { 812 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
813 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
813 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 814 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
814 if (!new_skb) { 815 if (!new_skb) {
815 ip_rt_put(rt); 816 ip_rt_put(rt);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 34ea4547ebbe..c9e2b5e6305e 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -399,6 +399,10 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
399 to->tc_index = from->tc_index; 399 to->tc_index = from->tc_index;
400#endif 400#endif
401 nf_copy(to, from); 401 nf_copy(to, from);
402#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
403 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
404 to->nf_trace = from->nf_trace;
405#endif
402#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 406#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
403 to->ipvs_property = from->ipvs_property; 407 to->ipvs_property = from->ipvs_property;
404#endif 408#endif
@@ -837,7 +841,7 @@ int ip_append_data(struct sock *sk,
837 */ 841 */
838 if (transhdrlen && 842 if (transhdrlen &&
839 length + fragheaderlen <= mtu && 843 length + fragheaderlen <= mtu &&
840 rt->u.dst.dev->features & NETIF_F_ALL_CSUM && 844 rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
841 !exthdrlen) 845 !exthdrlen)
842 csummode = CHECKSUM_PARTIAL; 846 csummode = CHECKSUM_PARTIAL;
843 847
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index ab86137c71d2..e787044a8514 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -485,3 +485,4 @@ MODULE_LICENSE("GPL");
485MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); 485MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173");
486MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); 486MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
487 487
488MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ebd2f2d532f6..396437242a1b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -595,7 +595,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
595 */ 595 */
596 max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr)); 596 max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr));
597 597
598 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { 598 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
599 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
599 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 600 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
600 if (!new_skb) { 601 if (!new_skb) {
601 ip_rt_put(rt); 602 ip_rt_put(rt);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 15ad5dd2d984..8d6901d4e94f 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -549,7 +549,7 @@ static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
549 return 0; 549 return 0;
550} 550}
551 551
552static struct seq_operations ip_vs_app_seq_ops = { 552static const struct seq_operations ip_vs_app_seq_ops = {
553 .start = ip_vs_app_seq_start, 553 .start = ip_vs_app_seq_start,
554 .next = ip_vs_app_seq_next, 554 .next = ip_vs_app_seq_next,
555 .stop = ip_vs_app_seq_stop, 555 .stop = ip_vs_app_seq_stop,
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 7018f97c75dc..3b446b1a6b9c 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -745,7 +745,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
745 return 0; 745 return 0;
746} 746}
747 747
748static struct seq_operations ip_vs_conn_seq_ops = { 748static const struct seq_operations ip_vs_conn_seq_ops = {
749 .start = ip_vs_conn_seq_start, 749 .start = ip_vs_conn_seq_start,
750 .next = ip_vs_conn_seq_next, 750 .next = ip_vs_conn_seq_next,
751 .stop = ip_vs_conn_seq_stop, 751 .stop = ip_vs_conn_seq_stop,
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 68fe1d4d0210..e1052bcf4ed1 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -1783,7 +1783,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1783 return 0; 1783 return 0;
1784} 1784}
1785 1785
1786static struct seq_operations ip_vs_info_seq_ops = { 1786static const struct seq_operations ip_vs_info_seq_ops = {
1787 .start = ip_vs_info_seq_start, 1787 .start = ip_vs_info_seq_start,
1788 .next = ip_vs_info_seq_next, 1788 .next = ip_vs_info_seq_next,
1789 .stop = ip_vs_info_seq_stop, 1789 .stop = ip_vs_info_seq_stop,
diff --git a/net/ipv4/multipath.c b/net/ipv4/multipath.c
deleted file mode 100644
index 4e9ca7c76407..000000000000
--- a/net/ipv4/multipath.c
+++ /dev/null
@@ -1,55 +0,0 @@
1/* multipath.c: IPV4 multipath algorithm support.
2 *
3 * Copyright (C) 2004, 2005 Einar Lueck <elueck@de.ibm.com>
4 * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
5 */
6
7#include <linux/module.h>
8#include <linux/errno.h>
9#include <linux/netdevice.h>
10#include <linux/spinlock.h>
11
12#include <net/ip_mp_alg.h>
13
14static DEFINE_SPINLOCK(alg_table_lock);
15struct ip_mp_alg_ops *ip_mp_alg_table[IP_MP_ALG_MAX + 1];
16
17int multipath_alg_register(struct ip_mp_alg_ops *ops, enum ip_mp_alg n)
18{
19 struct ip_mp_alg_ops **slot;
20 int err;
21
22 if (n < IP_MP_ALG_NONE || n > IP_MP_ALG_MAX ||
23 !ops->mp_alg_select_route)
24 return -EINVAL;
25
26 spin_lock(&alg_table_lock);
27 slot = &ip_mp_alg_table[n];
28 if (*slot != NULL) {
29 err = -EBUSY;
30 } else {
31 *slot = ops;
32 err = 0;
33 }
34 spin_unlock(&alg_table_lock);
35
36 return err;
37}
38EXPORT_SYMBOL(multipath_alg_register);
39
40void multipath_alg_unregister(struct ip_mp_alg_ops *ops, enum ip_mp_alg n)
41{
42 struct ip_mp_alg_ops **slot;
43
44 if (n < IP_MP_ALG_NONE || n > IP_MP_ALG_MAX)
45 return;
46
47 spin_lock(&alg_table_lock);
48 slot = &ip_mp_alg_table[n];
49 if (*slot == ops)
50 *slot = NULL;
51 spin_unlock(&alg_table_lock);
52
53 synchronize_net();
54}
55EXPORT_SYMBOL(multipath_alg_unregister);
diff --git a/net/ipv4/multipath_drr.c b/net/ipv4/multipath_drr.c
deleted file mode 100644
index b03c5ca2c823..000000000000
--- a/net/ipv4/multipath_drr.c
+++ /dev/null
@@ -1,249 +0,0 @@
1/*
2 * Device round robin policy for multipath.
3 *
4 *
5 * Version: $Id: multipath_drr.c,v 1.1.2.1 2004/09/16 07:42:34 elueck Exp $
6 *
7 * Authors: Einar Lueck <elueck@de.ibm.com><lkml@einar-lueck.de>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <asm/system.h>
16#include <asm/uaccess.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/timer.h>
20#include <linux/mm.h>
21#include <linux/kernel.h>
22#include <linux/fcntl.h>
23#include <linux/stat.h>
24#include <linux/socket.h>
25#include <linux/in.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/inetdevice.h>
29#include <linux/igmp.h>
30#include <linux/proc_fs.h>
31#include <linux/seq_file.h>
32#include <linux/module.h>
33#include <linux/mroute.h>
34#include <linux/init.h>
35#include <net/ip.h>
36#include <net/protocol.h>
37#include <linux/skbuff.h>
38#include <net/sock.h>
39#include <net/icmp.h>
40#include <net/udp.h>
41#include <net/raw.h>
42#include <linux/notifier.h>
43#include <linux/if_arp.h>
44#include <linux/netfilter_ipv4.h>
45#include <net/ipip.h>
46#include <net/checksum.h>
47#include <net/ip_mp_alg.h>
48
49struct multipath_device {
50 int ifi; /* interface index of device */
51 atomic_t usecount;
52 int allocated;
53};
54
55#define MULTIPATH_MAX_DEVICECANDIDATES 10
56
57static struct multipath_device state[MULTIPATH_MAX_DEVICECANDIDATES];
58static DEFINE_SPINLOCK(state_lock);
59
60static int inline __multipath_findslot(void)
61{
62 int i;
63
64 for (i = 0; i < MULTIPATH_MAX_DEVICECANDIDATES; i++) {
65 if (state[i].allocated == 0)
66 return i;
67 }
68 return -1;
69}
70
71static int inline __multipath_finddev(int ifindex)
72{
73 int i;
74
75 for (i = 0; i < MULTIPATH_MAX_DEVICECANDIDATES; i++) {
76 if (state[i].allocated != 0 &&
77 state[i].ifi == ifindex)
78 return i;
79 }
80 return -1;
81}
82
83static int drr_dev_event(struct notifier_block *this,
84 unsigned long event, void *ptr)
85{
86 struct net_device *dev = ptr;
87 int devidx;
88
89 switch (event) {
90 case NETDEV_UNREGISTER:
91 case NETDEV_DOWN:
92 spin_lock_bh(&state_lock);
93
94 devidx = __multipath_finddev(dev->ifindex);
95 if (devidx != -1) {
96 state[devidx].allocated = 0;
97 state[devidx].ifi = 0;
98 atomic_set(&state[devidx].usecount, 0);
99 }
100
101 spin_unlock_bh(&state_lock);
102 break;
103 }
104
105 return NOTIFY_DONE;
106}
107
108static struct notifier_block drr_dev_notifier = {
109 .notifier_call = drr_dev_event,
110};
111
112
113static void drr_safe_inc(atomic_t *usecount)
114{
115 int n;
116
117 atomic_inc(usecount);
118
119 n = atomic_read(usecount);
120 if (n <= 0) {
121 int i;
122
123 spin_lock_bh(&state_lock);
124
125 for (i = 0; i < MULTIPATH_MAX_DEVICECANDIDATES; i++)
126 atomic_set(&state[i].usecount, 0);
127
128 spin_unlock_bh(&state_lock);
129 }
130}
131
132static void drr_select_route(const struct flowi *flp,
133 struct rtable *first, struct rtable **rp)
134{
135 struct rtable *nh, *result, *cur_min;
136 int min_usecount = -1;
137 int devidx = -1;
138 int cur_min_devidx = -1;
139
140 /* 1. make sure all alt. nexthops have the same GC related data */
141 /* 2. determine the new candidate to be returned */
142 result = NULL;
143 cur_min = NULL;
144 for (nh = rcu_dereference(first); nh;
145 nh = rcu_dereference(nh->u.dst.rt_next)) {
146 if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
147 multipath_comparekeys(&nh->fl, flp)) {
148 int nh_ifidx = nh->u.dst.dev->ifindex;
149
150 nh->u.dst.lastuse = jiffies;
151 nh->u.dst.__use++;
152 if (result != NULL)
153 continue;
154
155 /* search for the output interface */
156
157 /* this is not SMP safe, only add/remove are
158 * SMP safe as wrong usecount updates have no big
159 * impact
160 */
161 devidx = __multipath_finddev(nh_ifidx);
162 if (devidx == -1) {
163 /* add the interface to the array
164 * SMP safe
165 */
166 spin_lock_bh(&state_lock);
167
168 /* due to SMP: search again */
169 devidx = __multipath_finddev(nh_ifidx);
170 if (devidx == -1) {
171 /* add entry for device */
172 devidx = __multipath_findslot();
173 if (devidx == -1) {
174 /* unlikely but possible */
175 continue;
176 }
177
178 state[devidx].allocated = 1;
179 state[devidx].ifi = nh_ifidx;
180 atomic_set(&state[devidx].usecount, 0);
181 min_usecount = 0;
182 }
183
184 spin_unlock_bh(&state_lock);
185 }
186
187 if (min_usecount == 0) {
188 /* if the device has not been used it is
189 * the primary target
190 */
191 drr_safe_inc(&state[devidx].usecount);
192 result = nh;
193 } else {
194 int count =
195 atomic_read(&state[devidx].usecount);
196
197 if (min_usecount == -1 ||
198 count < min_usecount) {
199 cur_min = nh;
200 cur_min_devidx = devidx;
201 min_usecount = count;
202 }
203 }
204 }
205 }
206
207 if (!result) {
208 if (cur_min) {
209 drr_safe_inc(&state[cur_min_devidx].usecount);
210 result = cur_min;
211 } else {
212 result = first;
213 }
214 }
215
216 *rp = result;
217}
218
219static struct ip_mp_alg_ops drr_ops = {
220 .mp_alg_select_route = drr_select_route,
221};
222
223static int __init drr_init(void)
224{
225 int err = register_netdevice_notifier(&drr_dev_notifier);
226
227 if (err)
228 return err;
229
230 err = multipath_alg_register(&drr_ops, IP_MP_ALG_DRR);
231 if (err)
232 goto fail;
233
234 return 0;
235
236fail:
237 unregister_netdevice_notifier(&drr_dev_notifier);
238 return err;
239}
240
241static void __exit drr_exit(void)
242{
243 unregister_netdevice_notifier(&drr_dev_notifier);
244 multipath_alg_unregister(&drr_ops, IP_MP_ALG_DRR);
245}
246
247module_init(drr_init);
248module_exit(drr_exit);
249MODULE_LICENSE("GPL");
diff --git a/net/ipv4/multipath_random.c b/net/ipv4/multipath_random.c
deleted file mode 100644
index c312785d14d0..000000000000
--- a/net/ipv4/multipath_random.c
+++ /dev/null
@@ -1,114 +0,0 @@
1/*
2 * Random policy for multipath.
3 *
4 *
5 * Version: $Id: multipath_random.c,v 1.1.2.3 2004/09/21 08:42:11 elueck Exp $
6 *
7 * Authors: Einar Lueck <elueck@de.ibm.com><lkml@einar-lueck.de>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <asm/system.h>
16#include <asm/uaccess.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/timer.h>
20#include <linux/mm.h>
21#include <linux/kernel.h>
22#include <linux/fcntl.h>
23#include <linux/stat.h>
24#include <linux/socket.h>
25#include <linux/in.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/inetdevice.h>
29#include <linux/igmp.h>
30#include <linux/proc_fs.h>
31#include <linux/seq_file.h>
32#include <linux/module.h>
33#include <linux/mroute.h>
34#include <linux/init.h>
35#include <linux/random.h>
36#include <net/ip.h>
37#include <net/protocol.h>
38#include <linux/skbuff.h>
39#include <net/sock.h>
40#include <net/icmp.h>
41#include <net/udp.h>
42#include <net/raw.h>
43#include <linux/notifier.h>
44#include <linux/if_arp.h>
45#include <linux/netfilter_ipv4.h>
46#include <net/ipip.h>
47#include <net/checksum.h>
48#include <net/ip_mp_alg.h>
49
50#define MULTIPATH_MAX_CANDIDATES 40
51
52static void random_select_route(const struct flowi *flp,
53 struct rtable *first,
54 struct rtable **rp)
55{
56 struct rtable *rt;
57 struct rtable *decision;
58 unsigned char candidate_count = 0;
59
60 /* count all candidate */
61 for (rt = rcu_dereference(first); rt;
62 rt = rcu_dereference(rt->u.dst.rt_next)) {
63 if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
64 multipath_comparekeys(&rt->fl, flp))
65 ++candidate_count;
66 }
67
68 /* choose a random candidate */
69 decision = first;
70 if (candidate_count > 1) {
71 unsigned char i = 0;
72 unsigned char candidate_no = (unsigned char)
73 (random32() % candidate_count);
74
75 /* find chosen candidate and adjust GC data for all candidates
76 * to ensure they stay in cache
77 */
78 for (rt = first; rt; rt = rt->u.dst.rt_next) {
79 if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
80 multipath_comparekeys(&rt->fl, flp)) {
81 rt->u.dst.lastuse = jiffies;
82
83 if (i == candidate_no)
84 decision = rt;
85
86 if (i >= candidate_count)
87 break;
88
89 i++;
90 }
91 }
92 }
93
94 decision->u.dst.__use++;
95 *rp = decision;
96}
97
98static struct ip_mp_alg_ops random_ops = {
99 .mp_alg_select_route = random_select_route,
100};
101
102static int __init random_init(void)
103{
104 return multipath_alg_register(&random_ops, IP_MP_ALG_RANDOM);
105}
106
107static void __exit random_exit(void)
108{
109 multipath_alg_unregister(&random_ops, IP_MP_ALG_RANDOM);
110}
111
112module_init(random_init);
113module_exit(random_exit);
114MODULE_LICENSE("GPL");
diff --git a/net/ipv4/multipath_rr.c b/net/ipv4/multipath_rr.c
deleted file mode 100644
index 0ad22524f450..000000000000
--- a/net/ipv4/multipath_rr.c
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * Round robin policy for multipath.
3 *
4 *
5 * Version: $Id: multipath_rr.c,v 1.1.2.2 2004/09/16 07:42:34 elueck Exp $
6 *
7 * Authors: Einar Lueck <elueck@de.ibm.com><lkml@einar-lueck.de>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <asm/system.h>
16#include <asm/uaccess.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/timer.h>
20#include <linux/mm.h>
21#include <linux/kernel.h>
22#include <linux/fcntl.h>
23#include <linux/stat.h>
24#include <linux/socket.h>
25#include <linux/in.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/inetdevice.h>
29#include <linux/igmp.h>
30#include <linux/proc_fs.h>
31#include <linux/seq_file.h>
32#include <linux/module.h>
33#include <linux/mroute.h>
34#include <linux/init.h>
35#include <net/ip.h>
36#include <net/protocol.h>
37#include <linux/skbuff.h>
38#include <net/sock.h>
39#include <net/icmp.h>
40#include <net/udp.h>
41#include <net/raw.h>
42#include <linux/notifier.h>
43#include <linux/if_arp.h>
44#include <linux/netfilter_ipv4.h>
45#include <net/ipip.h>
46#include <net/checksum.h>
47#include <net/ip_mp_alg.h>
48
49static void rr_select_route(const struct flowi *flp,
50 struct rtable *first, struct rtable **rp)
51{
52 struct rtable *nh, *result, *min_use_cand = NULL;
53 int min_use = -1;
54
55 /* 1. make sure all alt. nexthops have the same GC related data
56 * 2. determine the new candidate to be returned
57 */
58 result = NULL;
59 for (nh = rcu_dereference(first); nh;
60 nh = rcu_dereference(nh->u.dst.rt_next)) {
61 if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
62 multipath_comparekeys(&nh->fl, flp)) {
63 nh->u.dst.lastuse = jiffies;
64
65 if (min_use == -1 || nh->u.dst.__use < min_use) {
66 min_use = nh->u.dst.__use;
67 min_use_cand = nh;
68 }
69 }
70 }
71 result = min_use_cand;
72 if (!result)
73 result = first;
74
75 result->u.dst.__use++;
76 *rp = result;
77}
78
79static struct ip_mp_alg_ops rr_ops = {
80 .mp_alg_select_route = rr_select_route,
81};
82
83static int __init rr_init(void)
84{
85 return multipath_alg_register(&rr_ops, IP_MP_ALG_RR);
86}
87
88static void __exit rr_exit(void)
89{
90 multipath_alg_unregister(&rr_ops, IP_MP_ALG_RR);
91}
92
93module_init(rr_init);
94module_exit(rr_exit);
95MODULE_LICENSE("GPL");
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c
deleted file mode 100644
index 57c503694539..000000000000
--- a/net/ipv4/multipath_wrandom.c
+++ /dev/null
@@ -1,329 +0,0 @@
1/*
2 * Weighted random policy for multipath.
3 *
4 *
5 * Version: $Id: multipath_wrandom.c,v 1.1.2.3 2004/09/22 07:51:40 elueck Exp $
6 *
7 * Authors: Einar Lueck <elueck@de.ibm.com><lkml@einar-lueck.de>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <asm/system.h>
16#include <asm/uaccess.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/timer.h>
20#include <linux/mm.h>
21#include <linux/kernel.h>
22#include <linux/fcntl.h>
23#include <linux/stat.h>
24#include <linux/socket.h>
25#include <linux/in.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/inetdevice.h>
29#include <linux/igmp.h>
30#include <linux/proc_fs.h>
31#include <linux/seq_file.h>
32#include <linux/module.h>
33#include <linux/mroute.h>
34#include <linux/init.h>
35#include <linux/random.h>
36#include <net/ip.h>
37#include <net/protocol.h>
38#include <linux/skbuff.h>
39#include <net/sock.h>
40#include <net/icmp.h>
41#include <net/udp.h>
42#include <net/raw.h>
43#include <linux/notifier.h>
44#include <linux/if_arp.h>
45#include <linux/netfilter_ipv4.h>
46#include <net/ipip.h>
47#include <net/checksum.h>
48#include <net/ip_fib.h>
49#include <net/ip_mp_alg.h>
50
51#define MULTIPATH_STATE_SIZE 15
52
53struct multipath_candidate {
54 struct multipath_candidate *next;
55 int power;
56 struct rtable *rt;
57};
58
59struct multipath_dest {
60 struct list_head list;
61
62 const struct fib_nh *nh_info;
63 __be32 netmask;
64 __be32 network;
65 unsigned char prefixlen;
66
67 struct rcu_head rcu;
68};
69
70struct multipath_bucket {
71 struct list_head head;
72 spinlock_t lock;
73};
74
75struct multipath_route {
76 struct list_head list;
77
78 int oif;
79 __be32 gw;
80 struct list_head dests;
81
82 struct rcu_head rcu;
83};
84
85/* state: primarily weight per route information */
86static struct multipath_bucket state[MULTIPATH_STATE_SIZE];
87
88static unsigned char __multipath_lookup_weight(const struct flowi *fl,
89 const struct rtable *rt)
90{
91 const int state_idx = rt->idev->dev->ifindex % MULTIPATH_STATE_SIZE;
92 struct multipath_route *r;
93 struct multipath_route *target_route = NULL;
94 struct multipath_dest *d;
95 int weight = 1;
96
97 /* lookup the weight information for a certain route */
98 rcu_read_lock();
99
100 /* find state entry for gateway or add one if necessary */
101 list_for_each_entry_rcu(r, &state[state_idx].head, list) {
102 if (r->gw == rt->rt_gateway &&
103 r->oif == rt->idev->dev->ifindex) {
104 target_route = r;
105 break;
106 }
107 }
108
109 if (!target_route) {
110 /* this should not happen... but we are prepared */
111 printk( KERN_CRIT"%s: missing state for gateway: %u and " \
112 "device %d\n", __FUNCTION__, rt->rt_gateway,
113 rt->idev->dev->ifindex);
114 goto out;
115 }
116
117 /* find state entry for destination */
118 list_for_each_entry_rcu(d, &target_route->dests, list) {
119 __be32 targetnetwork = fl->fl4_dst &
120 inet_make_mask(d->prefixlen);
121
122 if ((targetnetwork & d->netmask) == d->network) {
123 weight = d->nh_info->nh_weight;
124 goto out;
125 }
126 }
127
128out:
129 rcu_read_unlock();
130 return weight;
131}
132
133static void wrandom_init_state(void)
134{
135 int i;
136
137 for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) {
138 INIT_LIST_HEAD(&state[i].head);
139 spin_lock_init(&state[i].lock);
140 }
141}
142
143static void wrandom_select_route(const struct flowi *flp,
144 struct rtable *first,
145 struct rtable **rp)
146{
147 struct rtable *rt;
148 struct rtable *decision;
149 struct multipath_candidate *first_mpc = NULL;
150 struct multipath_candidate *mpc, *last_mpc = NULL;
151 int power = 0;
152 int last_power;
153 int selector;
154 const size_t size_mpc = sizeof(struct multipath_candidate);
155
156 /* collect all candidates and identify their weights */
157 for (rt = rcu_dereference(first); rt;
158 rt = rcu_dereference(rt->u.dst.rt_next)) {
159 if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
160 multipath_comparekeys(&rt->fl, flp)) {
161 struct multipath_candidate* mpc =
162 (struct multipath_candidate*)
163 kmalloc(size_mpc, GFP_ATOMIC);
164
165 if (!mpc)
166 return;
167
168 power += __multipath_lookup_weight(flp, rt) * 10000;
169
170 mpc->power = power;
171 mpc->rt = rt;
172 mpc->next = NULL;
173
174 if (!first_mpc)
175 first_mpc = mpc;
176 else
177 last_mpc->next = mpc;
178
179 last_mpc = mpc;
180 }
181 }
182
183 /* choose a weighted random candidate */
184 decision = first;
185 selector = random32() % power;
186 last_power = 0;
187
188 /* select candidate, adjust GC data and cleanup local state */
189 decision = first;
190 last_mpc = NULL;
191 for (mpc = first_mpc; mpc; mpc = mpc->next) {
192 mpc->rt->u.dst.lastuse = jiffies;
193 if (last_power <= selector && selector < mpc->power)
194 decision = mpc->rt;
195
196 last_power = mpc->power;
197 kfree(last_mpc);
198 last_mpc = mpc;
199 }
200
201 /* concurrent __multipath_flush may lead to !last_mpc */
202 kfree(last_mpc);
203
204 decision->u.dst.__use++;
205 *rp = decision;
206}
207
208static void wrandom_set_nhinfo(__be32 network,
209 __be32 netmask,
210 unsigned char prefixlen,
211 const struct fib_nh *nh)
212{
213 const int state_idx = nh->nh_oif % MULTIPATH_STATE_SIZE;
214 struct multipath_route *r, *target_route = NULL;
215 struct multipath_dest *d, *target_dest = NULL;
216
217 /* store the weight information for a certain route */
218 spin_lock_bh(&state[state_idx].lock);
219
220 /* find state entry for gateway or add one if necessary */
221 list_for_each_entry_rcu(r, &state[state_idx].head, list) {
222 if (r->gw == nh->nh_gw && r->oif == nh->nh_oif) {
223 target_route = r;
224 break;
225 }
226 }
227
228 if (!target_route) {
229 const size_t size_rt = sizeof(struct multipath_route);
230 target_route = (struct multipath_route *)
231 kmalloc(size_rt, GFP_ATOMIC);
232
233 target_route->gw = nh->nh_gw;
234 target_route->oif = nh->nh_oif;
235 memset(&target_route->rcu, 0, sizeof(struct rcu_head));
236 INIT_LIST_HEAD(&target_route->dests);
237
238 list_add_rcu(&target_route->list, &state[state_idx].head);
239 }
240
241 /* find state entry for destination or add one if necessary */
242 list_for_each_entry_rcu(d, &target_route->dests, list) {
243 if (d->nh_info == nh) {
244 target_dest = d;
245 break;
246 }
247 }
248
249 if (!target_dest) {
250 const size_t size_dst = sizeof(struct multipath_dest);
251 target_dest = (struct multipath_dest*)
252 kmalloc(size_dst, GFP_ATOMIC);
253
254 target_dest->nh_info = nh;
255 target_dest->network = network;
256 target_dest->netmask = netmask;
257 target_dest->prefixlen = prefixlen;
258 memset(&target_dest->rcu, 0, sizeof(struct rcu_head));
259
260 list_add_rcu(&target_dest->list, &target_route->dests);
261 }
262 /* else: we already stored this info for another destination =>
263 * we are finished
264 */
265
266 spin_unlock_bh(&state[state_idx].lock);
267}
268
269static void __multipath_free(struct rcu_head *head)
270{
271 struct multipath_route *rt = container_of(head, struct multipath_route,
272 rcu);
273 kfree(rt);
274}
275
276static void __multipath_free_dst(struct rcu_head *head)
277{
278 struct multipath_dest *dst = container_of(head,
279 struct multipath_dest,
280 rcu);
281 kfree(dst);
282}
283
284static void wrandom_flush(void)
285{
286 int i;
287
288 /* defere delete to all entries */
289 for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) {
290 struct multipath_route *r;
291
292 spin_lock_bh(&state[i].lock);
293 list_for_each_entry_rcu(r, &state[i].head, list) {
294 struct multipath_dest *d;
295 list_for_each_entry_rcu(d, &r->dests, list) {
296 list_del_rcu(&d->list);
297 call_rcu(&d->rcu,
298 __multipath_free_dst);
299 }
300 list_del_rcu(&r->list);
301 call_rcu(&r->rcu,
302 __multipath_free);
303 }
304
305 spin_unlock_bh(&state[i].lock);
306 }
307}
308
309static struct ip_mp_alg_ops wrandom_ops = {
310 .mp_alg_select_route = wrandom_select_route,
311 .mp_alg_flush = wrandom_flush,
312 .mp_alg_set_nhinfo = wrandom_set_nhinfo,
313};
314
315static int __init wrandom_init(void)
316{
317 wrandom_init_state();
318
319 return multipath_alg_register(&wrandom_ops, IP_MP_ALG_WRANDOM);
320}
321
322static void __exit wrandom_exit(void)
323{
324 multipath_alg_unregister(&wrandom_ops, IP_MP_ALG_WRANDOM);
325}
326
327module_init(wrandom_init);
328module_exit(wrandom_exit);
329MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 46509fae9fd8..fa97947c6ae1 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -230,7 +230,7 @@ config IP_NF_TARGET_NETMAP
230 To compile it as a module, choose M here. If unsure, say N. 230 To compile it as a module, choose M here. If unsure, say N.
231 231
232config IP_NF_TARGET_SAME 232config IP_NF_TARGET_SAME
233 tristate "SAME target support" 233 tristate "SAME target support (OBSOLETE)"
234 depends on NF_NAT 234 depends on NF_NAT
235 help 235 help
236 This option adds a `SAME' target, which works like the standard SNAT 236 This option adds a `SAME' target, which works like the standard SNAT
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index cae41215e3c7..e981232942a1 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -224,7 +224,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
224 static const char nulldevname[IFNAMSIZ]; 224 static const char nulldevname[IFNAMSIZ];
225 unsigned int verdict = NF_DROP; 225 unsigned int verdict = NF_DROP;
226 struct arphdr *arp; 226 struct arphdr *arp;
227 int hotdrop = 0; 227 bool hotdrop = false;
228 struct arpt_entry *e, *back; 228 struct arpt_entry *e, *back;
229 const char *indev, *outdev; 229 const char *indev, *outdev;
230 void *table_base; 230 void *table_base;
@@ -1140,13 +1140,13 @@ void arpt_unregister_table(struct arpt_table *table)
1140} 1140}
1141 1141
1142/* The built-in targets: standard (NULL) and error. */ 1142/* The built-in targets: standard (NULL) and error. */
1143static struct arpt_target arpt_standard_target = { 1143static struct arpt_target arpt_standard_target __read_mostly = {
1144 .name = ARPT_STANDARD_TARGET, 1144 .name = ARPT_STANDARD_TARGET,
1145 .targetsize = sizeof(int), 1145 .targetsize = sizeof(int),
1146 .family = NF_ARP, 1146 .family = NF_ARP,
1147}; 1147};
1148 1148
1149static struct arpt_target arpt_error_target = { 1149static struct arpt_target arpt_error_target __read_mostly = {
1150 .name = ARPT_ERROR_TARGET, 1150 .name = ARPT_ERROR_TARGET,
1151 .target = arpt_error, 1151 .target = arpt_error,
1152 .targetsize = ARPT_FUNCTION_MAXNAMELEN, 1152 .targetsize = ARPT_FUNCTION_MAXNAMELEN,
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index 6298d404e7c7..c4bdab47597f 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -65,7 +65,7 @@ target(struct sk_buff **pskb,
65 return mangle->target; 65 return mangle->target;
66} 66}
67 67
68static int 68static bool
69checkentry(const char *tablename, const void *e, const struct xt_target *target, 69checkentry(const char *tablename, const void *e, const struct xt_target *target,
70 void *targinfo, unsigned int hook_mask) 70 void *targinfo, unsigned int hook_mask)
71{ 71{
@@ -73,15 +73,15 @@ checkentry(const char *tablename, const void *e, const struct xt_target *target,
73 73
74 if (mangle->flags & ~ARPT_MANGLE_MASK || 74 if (mangle->flags & ~ARPT_MANGLE_MASK ||
75 !(mangle->flags & ARPT_MANGLE_MASK)) 75 !(mangle->flags & ARPT_MANGLE_MASK))
76 return 0; 76 return false;
77 77
78 if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT && 78 if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT &&
79 mangle->target != ARPT_CONTINUE) 79 mangle->target != ARPT_CONTINUE)
80 return 0; 80 return false;
81 return 1; 81 return true;
82} 82}
83 83
84static struct arpt_target arpt_mangle_reg = { 84static struct arpt_target arpt_mangle_reg __read_mostly = {
85 .name = "mangle", 85 .name = "mangle",
86 .target = target, 86 .target = target,
87 .targetsize = sizeof(struct arpt_mangle), 87 .targetsize = sizeof(struct arpt_mangle),
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 9bacf1a03630..e1b402c6b855 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -152,20 +152,20 @@ ip_packet_match(const struct iphdr *ip,
152 return 1; 152 return 1;
153} 153}
154 154
155static inline int 155static inline bool
156ip_checkentry(const struct ipt_ip *ip) 156ip_checkentry(const struct ipt_ip *ip)
157{ 157{
158 if (ip->flags & ~IPT_F_MASK) { 158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n", 159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK); 160 ip->flags & ~IPT_F_MASK);
161 return 0; 161 return false;
162 } 162 }
163 if (ip->invflags & ~IPT_INV_MASK) { 163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n", 164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK); 165 ip->invflags & ~IPT_INV_MASK);
166 return 0; 166 return false;
167 } 167 }
168 return 1; 168 return true;
169} 169}
170 170
171static unsigned int 171static unsigned int
@@ -183,19 +183,19 @@ ipt_error(struct sk_buff **pskb,
183} 183}
184 184
185static inline 185static inline
186int do_match(struct ipt_entry_match *m, 186bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb, 187 const struct sk_buff *skb,
188 const struct net_device *in, 188 const struct net_device *in,
189 const struct net_device *out, 189 const struct net_device *out,
190 int offset, 190 int offset,
191 int *hotdrop) 191 bool *hotdrop)
192{ 192{
193 /* Stop iteration if it doesn't match */ 193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data, 194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop)) 195 offset, ip_hdrlen(skb), hotdrop))
196 return 1; 196 return true;
197 else 197 else
198 return 0; 198 return false;
199} 199}
200 200
201static inline struct ipt_entry * 201static inline struct ipt_entry *
@@ -204,6 +204,112 @@ get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset); 204 return (struct ipt_entry *)(base + offset);
205} 205}
206 206
207/* All zeroes == unconditional rule. */
208static inline int
209unconditional(const struct ipt_ip *ip)
210{
211 unsigned int i;
212
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
215 return 0;
216
217 return 1;
218}
219
220#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222static const char *hooknames[] = {
223 [NF_IP_PRE_ROUTING] = "PREROUTING",
224 [NF_IP_LOCAL_IN] = "INPUT",
225 [NF_IP_FORWARD] = "FORWARD",
226 [NF_IP_LOCAL_OUT] = "OUTPUT",
227 [NF_IP_POST_ROUTING] = "POSTROUTING",
228};
229
230enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
234};
235
236static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
240};
241
242static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
244 .u = {
245 .log = {
246 .level = 4,
247 .logflags = NF_LOG_MASK,
248 },
249 },
250};
251
252static inline int
253get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
256{
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
258
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
262 (*rulenum) = 0;
263 } else if (s == e) {
264 (*rulenum)++;
265
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
269 && t->verdict < 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
275 }
276 return 1;
277 } else
278 (*rulenum)++;
279
280 return 0;
281}
282
283static void trace_packet(struct sk_buff *skb,
284 unsigned int hook,
285 const struct net_device *in,
286 const struct net_device *out,
287 char *tablename,
288 struct xt_table_info *private,
289 struct ipt_entry *e)
290{
291 void *table_base;
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
295
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
298
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
301
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
306
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
310}
311#endif
312
207/* Returns one of the generic firewall policies, like NF_ACCEPT. */ 313/* Returns one of the generic firewall policies, like NF_ACCEPT. */
208unsigned int 314unsigned int
209ipt_do_table(struct sk_buff **pskb, 315ipt_do_table(struct sk_buff **pskb,
@@ -216,7 +322,7 @@ ipt_do_table(struct sk_buff **pskb,
216 u_int16_t offset; 322 u_int16_t offset;
217 struct iphdr *ip; 323 struct iphdr *ip;
218 u_int16_t datalen; 324 u_int16_t datalen;
219 int hotdrop = 0; 325 bool hotdrop = false;
220 /* Initializing verdict to NF_DROP keeps gcc happy. */ 326 /* Initializing verdict to NF_DROP keeps gcc happy. */
221 unsigned int verdict = NF_DROP; 327 unsigned int verdict = NF_DROP;
222 const char *indev, *outdev; 328 const char *indev, *outdev;
@@ -261,6 +367,14 @@ ipt_do_table(struct sk_buff **pskb,
261 367
262 t = ipt_get_target(e); 368 t = ipt_get_target(e);
263 IP_NF_ASSERT(t->u.kernel.target); 369 IP_NF_ASSERT(t->u.kernel.target);
370
371#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
374 if (unlikely((*pskb)->nf_trace))
375 trace_packet(*pskb, hook, in, out,
376 table->name, private, e);
377#endif
264 /* Standard target? */ 378 /* Standard target? */
265 if (!t->u.kernel.target->target) { 379 if (!t->u.kernel.target->target) {
266 int v; 380 int v;
@@ -341,19 +455,6 @@ ipt_do_table(struct sk_buff **pskb,
341#endif 455#endif
342} 456}
343 457
344/* All zeroes == unconditional rule. */
345static inline int
346unconditional(const struct ipt_ip *ip)
347{
348 unsigned int i;
349
350 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
351 if (((__u32 *)ip)[i])
352 return 0;
353
354 return 1;
355}
356
357/* Figures out from what hook each rule can be called: returns 0 if 458/* Figures out from what hook each rule can be called: returns 0 if
358 there are loops. Puts hook bitmask in comefrom. */ 459 there are loops. Puts hook bitmask in comefrom. */
359static int 460static int
@@ -2105,16 +2206,16 @@ void ipt_unregister_table(struct xt_table *table)
2105} 2206}
2106 2207
2107/* Returns 1 if the type and code is matched by the range, 0 otherwise */ 2208/* Returns 1 if the type and code is matched by the range, 0 otherwise */
2108static inline int 2209static inline bool
2109icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, 2210icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2110 u_int8_t type, u_int8_t code, 2211 u_int8_t type, u_int8_t code,
2111 int invert) 2212 bool invert)
2112{ 2213{
2113 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code)) 2214 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2114 ^ invert; 2215 ^ invert;
2115} 2216}
2116 2217
2117static int 2218static bool
2118icmp_match(const struct sk_buff *skb, 2219icmp_match(const struct sk_buff *skb,
2119 const struct net_device *in, 2220 const struct net_device *in,
2120 const struct net_device *out, 2221 const struct net_device *out,
@@ -2122,14 +2223,14 @@ icmp_match(const struct sk_buff *skb,
2122 const void *matchinfo, 2223 const void *matchinfo,
2123 int offset, 2224 int offset,
2124 unsigned int protoff, 2225 unsigned int protoff,
2125 int *hotdrop) 2226 bool *hotdrop)
2126{ 2227{
2127 struct icmphdr _icmph, *ic; 2228 struct icmphdr _icmph, *ic;
2128 const struct ipt_icmp *icmpinfo = matchinfo; 2229 const struct ipt_icmp *icmpinfo = matchinfo;
2129 2230
2130 /* Must not be a fragment. */ 2231 /* Must not be a fragment. */
2131 if (offset) 2232 if (offset)
2132 return 0; 2233 return false;
2133 2234
2134 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph); 2235 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2135 if (ic == NULL) { 2236 if (ic == NULL) {
@@ -2137,8 +2238,8 @@ icmp_match(const struct sk_buff *skb,
2137 * can't. Hence, no choice but to drop. 2238 * can't. Hence, no choice but to drop.
2138 */ 2239 */
2139 duprintf("Dropping evil ICMP tinygram.\n"); 2240 duprintf("Dropping evil ICMP tinygram.\n");
2140 *hotdrop = 1; 2241 *hotdrop = true;
2141 return 0; 2242 return false;
2142 } 2243 }
2143 2244
2144 return icmp_type_code_match(icmpinfo->type, 2245 return icmp_type_code_match(icmpinfo->type,
@@ -2149,7 +2250,7 @@ icmp_match(const struct sk_buff *skb,
2149} 2250}
2150 2251
2151/* Called when user tries to insert an entry of this type. */ 2252/* Called when user tries to insert an entry of this type. */
2152static int 2253static bool
2153icmp_checkentry(const char *tablename, 2254icmp_checkentry(const char *tablename,
2154 const void *info, 2255 const void *info,
2155 const struct xt_match *match, 2256 const struct xt_match *match,
@@ -2163,7 +2264,7 @@ icmp_checkentry(const char *tablename,
2163} 2264}
2164 2265
2165/* The built-in targets: standard (NULL) and error. */ 2266/* The built-in targets: standard (NULL) and error. */
2166static struct xt_target ipt_standard_target = { 2267static struct xt_target ipt_standard_target __read_mostly = {
2167 .name = IPT_STANDARD_TARGET, 2268 .name = IPT_STANDARD_TARGET,
2168 .targetsize = sizeof(int), 2269 .targetsize = sizeof(int),
2169 .family = AF_INET, 2270 .family = AF_INET,
@@ -2174,7 +2275,7 @@ static struct xt_target ipt_standard_target = {
2174#endif 2275#endif
2175}; 2276};
2176 2277
2177static struct xt_target ipt_error_target = { 2278static struct xt_target ipt_error_target __read_mostly = {
2178 .name = IPT_ERROR_TARGET, 2279 .name = IPT_ERROR_TARGET,
2179 .target = ipt_error, 2280 .target = ipt_error,
2180 .targetsize = IPT_FUNCTION_MAXNAMELEN, 2281 .targetsize = IPT_FUNCTION_MAXNAMELEN,
@@ -2197,7 +2298,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
2197#endif 2298#endif
2198}; 2299};
2199 2300
2200static struct xt_match icmp_matchstruct = { 2301static struct xt_match icmp_matchstruct __read_mostly = {
2201 .name = "icmp", 2302 .name = "icmp",
2202 .match = icmp_match, 2303 .match = icmp_match,
2203 .matchsize = sizeof(struct ipt_icmp), 2304 .matchsize = sizeof(struct ipt_icmp),
@@ -2230,7 +2331,7 @@ static int __init ip_tables_init(void)
2230 if (ret < 0) 2331 if (ret < 0)
2231 goto err5; 2332 goto err5;
2232 2333
2233 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n"); 2334 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2234 return 0; 2335 return 0;
2235 2336
2236err5: 2337err5:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 40e273421398..dcc12b183474 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -30,14 +30,6 @@
30 30
31#define CLUSTERIP_VERSION "0.8" 31#define CLUSTERIP_VERSION "0.8"
32 32
33#define DEBUG_CLUSTERIP
34
35#ifdef DEBUG_CLUSTERIP
36#define DEBUGP printk
37#else
38#define DEBUGP
39#endif
40
41MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
42MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 34MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
43MODULE_DESCRIPTION("iptables target for CLUSTERIP"); 35MODULE_DESCRIPTION("iptables target for CLUSTERIP");
@@ -122,9 +114,8 @@ __clusterip_config_find(__be32 clusterip)
122 list_for_each(pos, &clusterip_configs) { 114 list_for_each(pos, &clusterip_configs) {
123 struct clusterip_config *c = list_entry(pos, 115 struct clusterip_config *c = list_entry(pos,
124 struct clusterip_config, list); 116 struct clusterip_config, list);
125 if (c->clusterip == clusterip) { 117 if (c->clusterip == clusterip)
126 return c; 118 return c;
127 }
128 } 119 }
129 120
130 return NULL; 121 return NULL;
@@ -155,9 +146,8 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
155{ 146{
156 int n; 147 int n;
157 148
158 for (n = 0; n < i->num_local_nodes; n++) { 149 for (n = 0; n < i->num_local_nodes; n++)
159 set_bit(i->local_nodes[n] - 1, &c->local_nodes); 150 set_bit(i->local_nodes[n] - 1, &c->local_nodes);
160 }
161} 151}
162 152
163static struct clusterip_config * 153static struct clusterip_config *
@@ -220,27 +210,28 @@ clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum)
220 return 0; 210 return 0;
221} 211}
222 212
223static int 213static bool
224clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) 214clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum)
225{ 215{
226 if (nodenum == 0 || 216 if (nodenum == 0 ||
227 nodenum > c->num_total_nodes) 217 nodenum > c->num_total_nodes)
228 return 1; 218 return true;
229 219
230 if (test_and_clear_bit(nodenum - 1, &c->local_nodes)) 220 if (test_and_clear_bit(nodenum - 1, &c->local_nodes))
231 return 0; 221 return false;
232 222
233 return 1; 223 return true;
234} 224}
235#endif 225#endif
236 226
237static inline u_int32_t 227static inline u_int32_t
238clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config) 228clusterip_hashfn(const struct sk_buff *skb,
229 const struct clusterip_config *config)
239{ 230{
240 struct iphdr *iph = ip_hdr(skb); 231 const struct iphdr *iph = ip_hdr(skb);
241 unsigned long hashval; 232 unsigned long hashval;
242 u_int16_t sport, dport; 233 u_int16_t sport, dport;
243 u_int16_t *ports; 234 const u_int16_t *ports;
244 235
245 switch (iph->protocol) { 236 switch (iph->protocol) {
246 case IPPROTO_TCP: 237 case IPPROTO_TCP:
@@ -249,15 +240,14 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
249 case IPPROTO_SCTP: 240 case IPPROTO_SCTP:
250 case IPPROTO_DCCP: 241 case IPPROTO_DCCP:
251 case IPPROTO_ICMP: 242 case IPPROTO_ICMP:
252 ports = (void *)iph+iph->ihl*4; 243 ports = (const void *)iph+iph->ihl*4;
253 sport = ports[0]; 244 sport = ports[0];
254 dport = ports[1]; 245 dport = ports[1];
255 break; 246 break;
256 default: 247 default:
257 if (net_ratelimit()) { 248 if (net_ratelimit())
258 printk(KERN_NOTICE "CLUSTERIP: unknown protocol `%u'\n", 249 printk(KERN_NOTICE "CLUSTERIP: unknown protocol `%u'\n",
259 iph->protocol); 250 iph->protocol);
260 }
261 sport = dport = 0; 251 sport = dport = 0;
262 } 252 }
263 253
@@ -285,11 +275,11 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
285 } 275 }
286 276
287 /* node numbers are 1..n, not 0..n */ 277 /* node numbers are 1..n, not 0..n */
288 return ((hashval % config->num_total_nodes)+1); 278 return (hashval % config->num_total_nodes) + 1;
289} 279}
290 280
291static inline int 281static inline int
292clusterip_responsible(struct clusterip_config *config, u_int32_t hash) 282clusterip_responsible(const struct clusterip_config *config, u_int32_t hash)
293{ 283{
294 return test_bit(hash - 1, &config->local_nodes); 284 return test_bit(hash - 1, &config->local_nodes);
295} 285}
@@ -353,15 +343,15 @@ target(struct sk_buff **pskb,
353 break; 343 break;
354 } 344 }
355 345
356#ifdef DEBUG_CLUSTERP 346#ifdef DEBUG
357 DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 347 DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
358#endif 348#endif
359 DEBUGP("hash=%u ct_hash=%u ", hash, ct->mark); 349 pr_debug("hash=%u ct_hash=%u ", hash, ct->mark);
360 if (!clusterip_responsible(cipinfo->config, hash)) { 350 if (!clusterip_responsible(cipinfo->config, hash)) {
361 DEBUGP("not responsible\n"); 351 pr_debug("not responsible\n");
362 return NF_DROP; 352 return NF_DROP;
363 } 353 }
364 DEBUGP("responsible\n"); 354 pr_debug("responsible\n");
365 355
366 /* despite being received via linklayer multicast, this is 356 /* despite being received via linklayer multicast, this is
367 * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */ 357 * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */
@@ -370,7 +360,7 @@ target(struct sk_buff **pskb,
370 return XT_CONTINUE; 360 return XT_CONTINUE;
371} 361}
372 362
373static int 363static bool
374checkentry(const char *tablename, 364checkentry(const char *tablename,
375 const void *e_void, 365 const void *e_void,
376 const struct xt_target *target, 366 const struct xt_target *target,
@@ -387,50 +377,34 @@ checkentry(const char *tablename,
387 cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) { 377 cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
388 printk(KERN_WARNING "CLUSTERIP: unknown mode `%u'\n", 378 printk(KERN_WARNING "CLUSTERIP: unknown mode `%u'\n",
389 cipinfo->hash_mode); 379 cipinfo->hash_mode);
390 return 0; 380 return false;
391 381
392 } 382 }
393 if (e->ip.dmsk.s_addr != htonl(0xffffffff) 383 if (e->ip.dmsk.s_addr != htonl(0xffffffff)
394 || e->ip.dst.s_addr == 0) { 384 || e->ip.dst.s_addr == 0) {
395 printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n"); 385 printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n");
396 return 0; 386 return false;
397 } 387 }
398 388
399 /* FIXME: further sanity checks */ 389 /* FIXME: further sanity checks */
400 390
401 config = clusterip_config_find_get(e->ip.dst.s_addr, 1); 391 config = clusterip_config_find_get(e->ip.dst.s_addr, 1);
402 if (config) { 392 if (!config) {
403 if (cipinfo->config != NULL) {
404 /* Case A: This is an entry that gets reloaded, since
405 * it still has a cipinfo->config pointer. Simply
406 * increase the entry refcount and return */
407 if (cipinfo->config != config) {
408 printk(KERN_ERR "CLUSTERIP: Reloaded entry "
409 "has invalid config pointer!\n");
410 return 0;
411 }
412 } else {
413 /* Case B: This is a new rule referring to an existing
414 * clusterip config. */
415 cipinfo->config = config;
416 }
417 } else {
418 /* Case C: This is a completely new clusterip config */
419 if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) { 393 if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
420 printk(KERN_WARNING "CLUSTERIP: no config found for %u.%u.%u.%u, need 'new'\n", NIPQUAD(e->ip.dst.s_addr)); 394 printk(KERN_WARNING "CLUSTERIP: no config found for %u.%u.%u.%u, need 'new'\n", NIPQUAD(e->ip.dst.s_addr));
421 return 0; 395 return false;
422 } else { 396 } else {
423 struct net_device *dev; 397 struct net_device *dev;
424 398
425 if (e->ip.iniface[0] == '\0') { 399 if (e->ip.iniface[0] == '\0') {
426 printk(KERN_WARNING "CLUSTERIP: Please specify an interface name\n"); 400 printk(KERN_WARNING "CLUSTERIP: Please specify an interface name\n");
427 return 0; 401 return false;
428 } 402 }
429 403
430 dev = dev_get_by_name(e->ip.iniface); 404 dev = dev_get_by_name(e->ip.iniface);
431 if (!dev) { 405 if (!dev) {
432 printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface); 406 printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface);
433 return 0; 407 return false;
434 } 408 }
435 409
436 config = clusterip_config_init(cipinfo, 410 config = clusterip_config_init(cipinfo,
@@ -438,20 +412,20 @@ checkentry(const char *tablename,
438 if (!config) { 412 if (!config) {
439 printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n"); 413 printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n");
440 dev_put(dev); 414 dev_put(dev);
441 return 0; 415 return false;
442 } 416 }
443 dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); 417 dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0);
444 } 418 }
445 cipinfo->config = config;
446 } 419 }
420 cipinfo->config = config;
447 421
448 if (nf_ct_l3proto_try_module_get(target->family) < 0) { 422 if (nf_ct_l3proto_try_module_get(target->family) < 0) {
449 printk(KERN_WARNING "can't load conntrack support for " 423 printk(KERN_WARNING "can't load conntrack support for "
450 "proto=%d\n", target->family); 424 "proto=%d\n", target->family);
451 return 0; 425 return false;
452 } 426 }
453 427
454 return 1; 428 return true;
455} 429}
456 430
457/* drop reference count of cluster config when rule is deleted */ 431/* drop reference count of cluster config when rule is deleted */
@@ -468,13 +442,30 @@ static void destroy(const struct xt_target *target, void *targinfo)
468 nf_ct_l3proto_module_put(target->family); 442 nf_ct_l3proto_module_put(target->family);
469} 443}
470 444
471static struct xt_target clusterip_tgt = { 445#ifdef CONFIG_COMPAT
446struct compat_ipt_clusterip_tgt_info
447{
448 u_int32_t flags;
449 u_int8_t clustermac[6];
450 u_int16_t num_total_nodes;
451 u_int16_t num_local_nodes;
452 u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
453 u_int32_t hash_mode;
454 u_int32_t hash_initval;
455 compat_uptr_t config;
456};
457#endif /* CONFIG_COMPAT */
458
459static struct xt_target clusterip_tgt __read_mostly = {
472 .name = "CLUSTERIP", 460 .name = "CLUSTERIP",
473 .family = AF_INET, 461 .family = AF_INET,
474 .target = target, 462 .target = target,
475 .targetsize = sizeof(struct ipt_clusterip_tgt_info),
476 .checkentry = checkentry, 463 .checkentry = checkentry,
477 .destroy = destroy, 464 .destroy = destroy,
465 .targetsize = sizeof(struct ipt_clusterip_tgt_info),
466#ifdef CONFIG_COMPAT
467 .compatsize = sizeof(struct compat_ipt_clusterip_tgt_info),
468#endif /* CONFIG_COMPAT */
478 .me = THIS_MODULE 469 .me = THIS_MODULE
479}; 470};
480 471
@@ -491,7 +482,7 @@ struct arp_payload {
491 __be32 dst_ip; 482 __be32 dst_ip;
492} __attribute__ ((packed)); 483} __attribute__ ((packed));
493 484
494#ifdef CLUSTERIP_DEBUG 485#ifdef DEBUG
495static void arp_print(struct arp_payload *payload) 486static void arp_print(struct arp_payload *payload)
496{ 487{
497#define HBUFFERLEN 30 488#define HBUFFERLEN 30
@@ -547,8 +538,9 @@ arp_mangle(unsigned int hook,
547 * this wouldn't work, since we didn't subscribe the mcast group on 538 * this wouldn't work, since we didn't subscribe the mcast group on
548 * other interfaces */ 539 * other interfaces */
549 if (c->dev != out) { 540 if (c->dev != out) {
550 DEBUGP("CLUSTERIP: not mangling arp reply on different " 541 pr_debug("CLUSTERIP: not mangling arp reply on different "
551 "interface: cip'%s'-skb'%s'\n", c->dev->name, out->name); 542 "interface: cip'%s'-skb'%s'\n",
543 c->dev->name, out->name);
552 clusterip_config_put(c); 544 clusterip_config_put(c);
553 return NF_ACCEPT; 545 return NF_ACCEPT;
554 } 546 }
@@ -556,8 +548,8 @@ arp_mangle(unsigned int hook,
556 /* mangle reply hardware address */ 548 /* mangle reply hardware address */
557 memcpy(payload->src_hw, c->clustermac, arp->ar_hln); 549 memcpy(payload->src_hw, c->clustermac, arp->ar_hln);
558 550
559#ifdef CLUSTERIP_DEBUG 551#ifdef DEBUG
560 DEBUGP(KERN_DEBUG "CLUSTERIP mangled arp reply: "); 552 pr_debug(KERN_DEBUG "CLUSTERIP mangled arp reply: ");
561 arp_print(payload); 553 arp_print(payload);
562#endif 554#endif
563 555
@@ -647,7 +639,7 @@ static int clusterip_seq_show(struct seq_file *s, void *v)
647 return 0; 639 return 0;
648} 640}
649 641
650static struct seq_operations clusterip_seq_ops = { 642static const struct seq_operations clusterip_seq_ops = {
651 .start = clusterip_seq_start, 643 .start = clusterip_seq_start,
652 .next = clusterip_seq_next, 644 .next = clusterip_seq_next,
653 .stop = clusterip_seq_stop, 645 .stop = clusterip_seq_stop,
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 918ca92e534a..f1253bd3837f 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -24,8 +24,8 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
24MODULE_DESCRIPTION("iptables ECN modification module"); 24MODULE_DESCRIPTION("iptables ECN modification module");
25 25
26/* set ECT codepoint from IP header. 26/* set ECT codepoint from IP header.
27 * return 0 if there was an error. */ 27 * return false if there was an error. */
28static inline int 28static inline bool
29set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) 29set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
30{ 30{
31 struct iphdr *iph = ip_hdr(*pskb); 31 struct iphdr *iph = ip_hdr(*pskb);
@@ -33,18 +33,18 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
33 if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { 33 if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) {
34 __u8 oldtos; 34 __u8 oldtos;
35 if (!skb_make_writable(pskb, sizeof(struct iphdr))) 35 if (!skb_make_writable(pskb, sizeof(struct iphdr)))
36 return 0; 36 return false;
37 iph = ip_hdr(*pskb); 37 iph = ip_hdr(*pskb);
38 oldtos = iph->tos; 38 oldtos = iph->tos;
39 iph->tos &= ~IPT_ECN_IP_MASK; 39 iph->tos &= ~IPT_ECN_IP_MASK;
40 iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); 40 iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK);
41 nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); 41 nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
42 } 42 }
43 return 1; 43 return true;
44} 44}
45 45
46/* Return 0 if there was an error. */ 46/* Return false if there was an error. */
47static inline int 47static inline bool
48set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) 48set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
49{ 49{
50 struct tcphdr _tcph, *tcph; 50 struct tcphdr _tcph, *tcph;
@@ -54,16 +54,16 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
54 tcph = skb_header_pointer(*pskb, ip_hdrlen(*pskb), 54 tcph = skb_header_pointer(*pskb, ip_hdrlen(*pskb),
55 sizeof(_tcph), &_tcph); 55 sizeof(_tcph), &_tcph);
56 if (!tcph) 56 if (!tcph)
57 return 0; 57 return false;
58 58
59 if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) || 59 if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) ||
60 tcph->ece == einfo->proto.tcp.ece) && 60 tcph->ece == einfo->proto.tcp.ece) &&
61 ((!(einfo->operation & IPT_ECN_OP_SET_CWR) || 61 (!(einfo->operation & IPT_ECN_OP_SET_CWR) ||
62 tcph->cwr == einfo->proto.tcp.cwr))) 62 tcph->cwr == einfo->proto.tcp.cwr))
63 return 1; 63 return true;
64 64
65 if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) 65 if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph)))
66 return 0; 66 return false;
67 tcph = (void *)ip_hdr(*pskb) + ip_hdrlen(*pskb); 67 tcph = (void *)ip_hdr(*pskb) + ip_hdrlen(*pskb);
68 68
69 oldval = ((__be16 *)tcph)[6]; 69 oldval = ((__be16 *)tcph)[6];
@@ -74,7 +74,7 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
74 74
75 nf_proto_csum_replace2(&tcph->check, *pskb, 75 nf_proto_csum_replace2(&tcph->check, *pskb,
76 oldval, ((__be16 *)tcph)[6], 0); 76 oldval, ((__be16 *)tcph)[6], 0);
77 return 1; 77 return true;
78} 78}
79 79
80static unsigned int 80static unsigned int
@@ -99,7 +99,7 @@ target(struct sk_buff **pskb,
99 return XT_CONTINUE; 99 return XT_CONTINUE;
100} 100}
101 101
102static int 102static bool
103checkentry(const char *tablename, 103checkentry(const char *tablename,
104 const void *e_void, 104 const void *e_void,
105 const struct xt_target *target, 105 const struct xt_target *target,
@@ -112,23 +112,23 @@ checkentry(const char *tablename,
112 if (einfo->operation & IPT_ECN_OP_MASK) { 112 if (einfo->operation & IPT_ECN_OP_MASK) {
113 printk(KERN_WARNING "ECN: unsupported ECN operation %x\n", 113 printk(KERN_WARNING "ECN: unsupported ECN operation %x\n",
114 einfo->operation); 114 einfo->operation);
115 return 0; 115 return false;
116 } 116 }
117 if (einfo->ip_ect & ~IPT_ECN_IP_MASK) { 117 if (einfo->ip_ect & ~IPT_ECN_IP_MASK) {
118 printk(KERN_WARNING "ECN: new ECT codepoint %x out of mask\n", 118 printk(KERN_WARNING "ECN: new ECT codepoint %x out of mask\n",
119 einfo->ip_ect); 119 einfo->ip_ect);
120 return 0; 120 return false;
121 } 121 }
122 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) 122 if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR))
123 && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { 123 && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
124 printk(KERN_WARNING "ECN: cannot use TCP operations on a " 124 printk(KERN_WARNING "ECN: cannot use TCP operations on a "
125 "non-tcp rule\n"); 125 "non-tcp rule\n");
126 return 0; 126 return false;
127 } 127 }
128 return 1; 128 return true;
129} 129}
130 130
131static struct xt_target ipt_ecn_reg = { 131static struct xt_target ipt_ecn_reg __read_mostly = {
132 .name = "ECN", 132 .name = "ECN",
133 .family = AF_INET, 133 .family = AF_INET,
134 .target = target, 134 .target = target,
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index a42c5cd968b1..5937ad150b9f 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -27,12 +27,6 @@ MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
28MODULE_DESCRIPTION("iptables syslog logging module"); 28MODULE_DESCRIPTION("iptables syslog logging module");
29 29
30#if 0
31#define DEBUGP printk
32#else
33#define DEBUGP(format, args...)
34#endif
35
36/* Use lock to serialize, so printks don't overlap */ 30/* Use lock to serialize, so printks don't overlap */
37static DEFINE_SPINLOCK(log_lock); 31static DEFINE_SPINLOCK(log_lock);
38 32
@@ -41,7 +35,8 @@ static void dump_packet(const struct nf_loginfo *info,
41 const struct sk_buff *skb, 35 const struct sk_buff *skb,
42 unsigned int iphoff) 36 unsigned int iphoff)
43{ 37{
44 struct iphdr _iph, *ih; 38 struct iphdr _iph;
39 const struct iphdr *ih;
45 unsigned int logflags; 40 unsigned int logflags;
46 41
47 if (info->type == NF_LOG_TYPE_LOG) 42 if (info->type == NF_LOG_TYPE_LOG)
@@ -100,7 +95,8 @@ static void dump_packet(const struct nf_loginfo *info,
100 95
101 switch (ih->protocol) { 96 switch (ih->protocol) {
102 case IPPROTO_TCP: { 97 case IPPROTO_TCP: {
103 struct tcphdr _tcph, *th; 98 struct tcphdr _tcph;
99 const struct tcphdr *th;
104 100
105 /* Max length: 10 "PROTO=TCP " */ 101 /* Max length: 10 "PROTO=TCP " */
106 printk("PROTO=TCP "); 102 printk("PROTO=TCP ");
@@ -151,7 +147,7 @@ static void dump_packet(const struct nf_loginfo *info,
151 if ((logflags & IPT_LOG_TCPOPT) 147 if ((logflags & IPT_LOG_TCPOPT)
152 && th->doff * 4 > sizeof(struct tcphdr)) { 148 && th->doff * 4 > sizeof(struct tcphdr)) {
153 unsigned char _opt[4 * 15 - sizeof(struct tcphdr)]; 149 unsigned char _opt[4 * 15 - sizeof(struct tcphdr)];
154 unsigned char *op; 150 const unsigned char *op;
155 unsigned int i, optsize; 151 unsigned int i, optsize;
156 152
157 optsize = th->doff * 4 - sizeof(struct tcphdr); 153 optsize = th->doff * 4 - sizeof(struct tcphdr);
@@ -173,7 +169,8 @@ static void dump_packet(const struct nf_loginfo *info,
173 } 169 }
174 case IPPROTO_UDP: 170 case IPPROTO_UDP:
175 case IPPROTO_UDPLITE: { 171 case IPPROTO_UDPLITE: {
176 struct udphdr _udph, *uh; 172 struct udphdr _udph;
173 const struct udphdr *uh;
177 174
178 if (ih->protocol == IPPROTO_UDP) 175 if (ih->protocol == IPPROTO_UDP)
179 /* Max length: 10 "PROTO=UDP " */ 176 /* Max length: 10 "PROTO=UDP " */
@@ -200,7 +197,8 @@ static void dump_packet(const struct nf_loginfo *info,
200 break; 197 break;
201 } 198 }
202 case IPPROTO_ICMP: { 199 case IPPROTO_ICMP: {
203 struct icmphdr _icmph, *ich; 200 struct icmphdr _icmph;
201 const struct icmphdr *ich;
204 static const size_t required_len[NR_ICMP_TYPES+1] 202 static const size_t required_len[NR_ICMP_TYPES+1]
205 = { [ICMP_ECHOREPLY] = 4, 203 = { [ICMP_ECHOREPLY] = 4,
206 [ICMP_DEST_UNREACH] 204 [ICMP_DEST_UNREACH]
@@ -285,7 +283,8 @@ static void dump_packet(const struct nf_loginfo *info,
285 } 283 }
286 /* Max Length */ 284 /* Max Length */
287 case IPPROTO_AH: { 285 case IPPROTO_AH: {
288 struct ip_auth_hdr _ahdr, *ah; 286 struct ip_auth_hdr _ahdr;
287 const struct ip_auth_hdr *ah;
289 288
290 if (ntohs(ih->frag_off) & IP_OFFSET) 289 if (ntohs(ih->frag_off) & IP_OFFSET)
291 break; 290 break;
@@ -307,7 +306,8 @@ static void dump_packet(const struct nf_loginfo *info,
307 break; 306 break;
308 } 307 }
309 case IPPROTO_ESP: { 308 case IPPROTO_ESP: {
310 struct ip_esp_hdr _esph, *eh; 309 struct ip_esp_hdr _esph;
310 const struct ip_esp_hdr *eh;
311 311
312 /* Max length: 10 "PROTO=ESP " */ 312 /* Max length: 10 "PROTO=ESP " */
313 printk("PROTO=ESP "); 313 printk("PROTO=ESP ");
@@ -385,11 +385,13 @@ ipt_log_packet(unsigned int pf,
385 out ? out->name : ""); 385 out ? out->name : "");
386#ifdef CONFIG_BRIDGE_NETFILTER 386#ifdef CONFIG_BRIDGE_NETFILTER
387 if (skb->nf_bridge) { 387 if (skb->nf_bridge) {
388 struct net_device *physindev = skb->nf_bridge->physindev; 388 const struct net_device *physindev;
389 struct net_device *physoutdev = skb->nf_bridge->physoutdev; 389 const struct net_device *physoutdev;
390 390
391 physindev = skb->nf_bridge->physindev;
391 if (physindev && in != physindev) 392 if (physindev && in != physindev)
392 printk("PHYSIN=%s ", physindev->name); 393 printk("PHYSIN=%s ", physindev->name);
394 physoutdev = skb->nf_bridge->physoutdev;
393 if (physoutdev && out != physoutdev) 395 if (physoutdev && out != physoutdev)
394 printk("PHYSOUT=%s ", physoutdev->name); 396 printk("PHYSOUT=%s ", physoutdev->name);
395 } 397 }
@@ -435,27 +437,27 @@ ipt_log_target(struct sk_buff **pskb,
435 return XT_CONTINUE; 437 return XT_CONTINUE;
436} 438}
437 439
438static int ipt_log_checkentry(const char *tablename, 440static bool ipt_log_checkentry(const char *tablename,
439 const void *e, 441 const void *e,
440 const struct xt_target *target, 442 const struct xt_target *target,
441 void *targinfo, 443 void *targinfo,
442 unsigned int hook_mask) 444 unsigned int hook_mask)
443{ 445{
444 const struct ipt_log_info *loginfo = targinfo; 446 const struct ipt_log_info *loginfo = targinfo;
445 447
446 if (loginfo->level >= 8) { 448 if (loginfo->level >= 8) {
447 DEBUGP("LOG: level %u >= 8\n", loginfo->level); 449 pr_debug("LOG: level %u >= 8\n", loginfo->level);
448 return 0; 450 return false;
449 } 451 }
450 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { 452 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
451 DEBUGP("LOG: prefix term %i\n", 453 pr_debug("LOG: prefix term %i\n",
452 loginfo->prefix[sizeof(loginfo->prefix)-1]); 454 loginfo->prefix[sizeof(loginfo->prefix)-1]);
453 return 0; 455 return false;
454 } 456 }
455 return 1; 457 return true;
456} 458}
457 459
458static struct xt_target ipt_log_reg = { 460static struct xt_target ipt_log_reg __read_mostly = {
459 .name = "LOG", 461 .name = "LOG",
460 .family = AF_INET, 462 .family = AF_INET,
461 .target = ipt_log_target, 463 .target = ipt_log_target,
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d4f2d7775330..7c4e4be7c8b3 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -27,17 +27,11 @@ MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
28MODULE_DESCRIPTION("iptables MASQUERADE target module"); 28MODULE_DESCRIPTION("iptables MASQUERADE target module");
29 29
30#if 0
31#define DEBUGP printk
32#else
33#define DEBUGP(format, args...)
34#endif
35
36/* Lock protects masq region inside conntrack */ 30/* Lock protects masq region inside conntrack */
37static DEFINE_RWLOCK(masq_lock); 31static DEFINE_RWLOCK(masq_lock);
38 32
39/* FIXME: Multiple targets. --RR */ 33/* FIXME: Multiple targets. --RR */
40static int 34static bool
41masquerade_check(const char *tablename, 35masquerade_check(const char *tablename,
42 const void *e, 36 const void *e,
43 const struct xt_target *target, 37 const struct xt_target *target,
@@ -47,14 +41,14 @@ masquerade_check(const char *tablename,
47 const struct nf_nat_multi_range_compat *mr = targinfo; 41 const struct nf_nat_multi_range_compat *mr = targinfo;
48 42
49 if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { 43 if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
50 DEBUGP("masquerade_check: bad MAP_IPS.\n"); 44 pr_debug("masquerade_check: bad MAP_IPS.\n");
51 return 0; 45 return false;
52 } 46 }
53 if (mr->rangesize != 1) { 47 if (mr->rangesize != 1) {
54 DEBUGP("masquerade_check: bad rangesize %u.\n", mr->rangesize); 48 pr_debug("masquerade_check: bad rangesize %u\n", mr->rangesize);
55 return 0; 49 return false;
56 } 50 }
57 return 1; 51 return true;
58} 52}
59 53
60static unsigned int 54static unsigned int
@@ -70,7 +64,7 @@ masquerade_target(struct sk_buff **pskb,
70 enum ip_conntrack_info ctinfo; 64 enum ip_conntrack_info ctinfo;
71 struct nf_nat_range newrange; 65 struct nf_nat_range newrange;
72 const struct nf_nat_multi_range_compat *mr; 66 const struct nf_nat_multi_range_compat *mr;
73 struct rtable *rt; 67 const struct rtable *rt;
74 __be32 newsrc; 68 __be32 newsrc;
75 69
76 NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); 70 NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING);
@@ -109,10 +103,10 @@ masquerade_target(struct sk_buff **pskb,
109 return nf_nat_setup_info(ct, &newrange, hooknum); 103 return nf_nat_setup_info(ct, &newrange, hooknum);
110} 104}
111 105
112static inline int 106static int
113device_cmp(struct nf_conn *i, void *ifindex) 107device_cmp(struct nf_conn *i, void *ifindex)
114{ 108{
115 struct nf_conn_nat *nat = nfct_nat(i); 109 const struct nf_conn_nat *nat = nfct_nat(i);
116 int ret; 110 int ret;
117 111
118 if (!nat) 112 if (!nat)
@@ -129,7 +123,7 @@ static int masq_device_event(struct notifier_block *this,
129 unsigned long event, 123 unsigned long event,
130 void *ptr) 124 void *ptr)
131{ 125{
132 struct net_device *dev = ptr; 126 const struct net_device *dev = ptr;
133 127
134 if (event == NETDEV_DOWN) { 128 if (event == NETDEV_DOWN) {
135 /* Device was downed. Search entire table for 129 /* Device was downed. Search entire table for
@@ -147,7 +141,7 @@ static int masq_inet_event(struct notifier_block *this,
147 unsigned long event, 141 unsigned long event,
148 void *ptr) 142 void *ptr)
149{ 143{
150 struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; 144 const struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
151 145
152 if (event == NETDEV_DOWN) { 146 if (event == NETDEV_DOWN) {
153 /* IP address was deleted. Search entire table for 147 /* IP address was deleted. Search entire table for
@@ -169,7 +163,7 @@ static struct notifier_block masq_inet_notifier = {
169 .notifier_call = masq_inet_event, 163 .notifier_call = masq_inet_event,
170}; 164};
171 165
172static struct xt_target masquerade = { 166static struct xt_target masquerade __read_mostly = {
173 .name = "MASQUERADE", 167 .name = "MASQUERADE",
174 .family = AF_INET, 168 .family = AF_INET,
175 .target = masquerade_target, 169 .target = masquerade_target,
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 068c69bce30e..41a011d5a065 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -18,18 +18,11 @@
18#include <linux/netfilter/x_tables.h> 18#include <linux/netfilter/x_tables.h>
19#include <net/netfilter/nf_nat_rule.h> 19#include <net/netfilter/nf_nat_rule.h>
20 20
21#define MODULENAME "NETMAP"
22MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
23MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>"); 22MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>");
24MODULE_DESCRIPTION("iptables 1:1 NAT mapping of IP networks target"); 23MODULE_DESCRIPTION("iptables 1:1 NAT mapping of IP networks target");
25 24
26#if 0 25static bool
27#define DEBUGP printk
28#else
29#define DEBUGP(format, args...)
30#endif
31
32static int
33check(const char *tablename, 26check(const char *tablename,
34 const void *e, 27 const void *e,
35 const struct xt_target *target, 28 const struct xt_target *target,
@@ -39,14 +32,14 @@ check(const char *tablename,
39 const struct nf_nat_multi_range_compat *mr = targinfo; 32 const struct nf_nat_multi_range_compat *mr = targinfo;
40 33
41 if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { 34 if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) {
42 DEBUGP(MODULENAME":check: bad MAP_IPS.\n"); 35 pr_debug("NETMAP:check: bad MAP_IPS.\n");
43 return 0; 36 return false;
44 } 37 }
45 if (mr->rangesize != 1) { 38 if (mr->rangesize != 1) {
46 DEBUGP(MODULENAME":check: bad rangesize %u.\n", mr->rangesize); 39 pr_debug("NETMAP:check: bad rangesize %u.\n", mr->rangesize);
47 return 0; 40 return false;
48 } 41 }
49 return 1; 42 return true;
50} 43}
51 44
52static unsigned int 45static unsigned int
@@ -85,8 +78,8 @@ target(struct sk_buff **pskb,
85 return nf_nat_setup_info(ct, &newrange, hooknum); 78 return nf_nat_setup_info(ct, &newrange, hooknum);
86} 79}
87 80
88static struct xt_target target_module = { 81static struct xt_target target_module __read_mostly = {
89 .name = MODULENAME, 82 .name = "NETMAP",
90 .family = AF_INET, 83 .family = AF_INET,
91 .target = target, 84 .target = target,
92 .targetsize = sizeof(struct nf_nat_multi_range_compat), 85 .targetsize = sizeof(struct nf_nat_multi_range_compat),
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 68cc76a198eb..6ac7a2373316 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -25,14 +25,8 @@ MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 25MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
26MODULE_DESCRIPTION("iptables REDIRECT target module"); 26MODULE_DESCRIPTION("iptables REDIRECT target module");
27 27
28#if 0
29#define DEBUGP printk
30#else
31#define DEBUGP(format, args...)
32#endif
33
34/* FIXME: Take multiple ranges --RR */ 28/* FIXME: Take multiple ranges --RR */
35static int 29static bool
36redirect_check(const char *tablename, 30redirect_check(const char *tablename,
37 const void *e, 31 const void *e,
38 const struct xt_target *target, 32 const struct xt_target *target,
@@ -42,14 +36,14 @@ redirect_check(const char *tablename,
42 const struct nf_nat_multi_range_compat *mr = targinfo; 36 const struct nf_nat_multi_range_compat *mr = targinfo;
43 37
44 if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { 38 if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
45 DEBUGP("redirect_check: bad MAP_IPS.\n"); 39 pr_debug("redirect_check: bad MAP_IPS.\n");
46 return 0; 40 return false;
47 } 41 }
48 if (mr->rangesize != 1) { 42 if (mr->rangesize != 1) {
49 DEBUGP("redirect_check: bad rangesize %u.\n", mr->rangesize); 43 pr_debug("redirect_check: bad rangesize %u.\n", mr->rangesize);
50 return 0; 44 return false;
51 } 45 }
52 return 1; 46 return true;
53} 47}
54 48
55static unsigned int 49static unsigned int
@@ -101,7 +95,7 @@ redirect_target(struct sk_buff **pskb,
101 return nf_nat_setup_info(ct, &newrange, hooknum); 95 return nf_nat_setup_info(ct, &newrange, hooknum);
102} 96}
103 97
104static struct xt_target redirect_reg = { 98static struct xt_target redirect_reg __read_mostly = {
105 .name = "REDIRECT", 99 .name = "REDIRECT",
106 .family = AF_INET, 100 .family = AF_INET,
107 .target = redirect_target, 101 .target = redirect_target,
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 9041e0741f6f..cb038c8fbc9d 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -31,12 +31,6 @@ MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 31MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32MODULE_DESCRIPTION("iptables REJECT target module"); 32MODULE_DESCRIPTION("iptables REJECT target module");
33 33
34#if 0
35#define DEBUGP printk
36#else
37#define DEBUGP(format, args...)
38#endif
39
40/* Send RST reply */ 34/* Send RST reply */
41static void send_reset(struct sk_buff *oldskb, int hook) 35static void send_reset(struct sk_buff *oldskb, int hook)
42{ 36{
@@ -122,7 +116,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
122 tcph->check = 0; 116 tcph->check = 0;
123 tcph->check = tcp_v4_check(sizeof(struct tcphdr), 117 tcph->check = tcp_v4_check(sizeof(struct tcphdr),
124 niph->saddr, niph->daddr, 118 niph->saddr, niph->daddr,
125 csum_partial((char *)tcph, 119 csum_partial(tcph,
126 sizeof(struct tcphdr), 0)); 120 sizeof(struct tcphdr), 0));
127 121
128 /* Set DF, id = 0 */ 122 /* Set DF, id = 0 */
@@ -217,30 +211,30 @@ static unsigned int reject(struct sk_buff **pskb,
217 return NF_DROP; 211 return NF_DROP;
218} 212}
219 213
220static int check(const char *tablename, 214static bool check(const char *tablename,
221 const void *e_void, 215 const void *e_void,
222 const struct xt_target *target, 216 const struct xt_target *target,
223 void *targinfo, 217 void *targinfo,
224 unsigned int hook_mask) 218 unsigned int hook_mask)
225{ 219{
226 const struct ipt_reject_info *rejinfo = targinfo; 220 const struct ipt_reject_info *rejinfo = targinfo;
227 const struct ipt_entry *e = e_void; 221 const struct ipt_entry *e = e_void;
228 222
229 if (rejinfo->with == IPT_ICMP_ECHOREPLY) { 223 if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
230 printk("REJECT: ECHOREPLY no longer supported.\n"); 224 printk("ipt_REJECT: ECHOREPLY no longer supported.\n");
231 return 0; 225 return false;
232 } else if (rejinfo->with == IPT_TCP_RESET) { 226 } else if (rejinfo->with == IPT_TCP_RESET) {
233 /* Must specify that it's a TCP packet */ 227 /* Must specify that it's a TCP packet */
234 if (e->ip.proto != IPPROTO_TCP 228 if (e->ip.proto != IPPROTO_TCP
235 || (e->ip.invflags & XT_INV_PROTO)) { 229 || (e->ip.invflags & XT_INV_PROTO)) {
236 DEBUGP("REJECT: TCP_RESET invalid for non-tcp\n"); 230 printk("ipt_REJECT: TCP_RESET invalid for non-tcp\n");
237 return 0; 231 return false;
238 } 232 }
239 } 233 }
240 return 1; 234 return true;
241} 235}
242 236
243static struct xt_target ipt_reject_reg = { 237static struct xt_target ipt_reject_reg __read_mostly = {
244 .name = "REJECT", 238 .name = "REJECT",
245 .family = AF_INET, 239 .family = AF_INET,
246 .target = reject, 240 .target = reject,
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c
index 511e5ff84938..97641f1a97f6 100644
--- a/net/ipv4/netfilter/ipt_SAME.c
+++ b/net/ipv4/netfilter/ipt_SAME.c
@@ -27,13 +27,7 @@ MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Martin Josefsson <gandalf@wlug.westbo.se>"); 27MODULE_AUTHOR("Martin Josefsson <gandalf@wlug.westbo.se>");
28MODULE_DESCRIPTION("iptables special SNAT module for consistent sourceip"); 28MODULE_DESCRIPTION("iptables special SNAT module for consistent sourceip");
29 29
30#if 0 30static bool
31#define DEBUGP printk
32#else
33#define DEBUGP(format, args...)
34#endif
35
36static int
37same_check(const char *tablename, 31same_check(const char *tablename,
38 const void *e, 32 const void *e,
39 const struct xt_target *target, 33 const struct xt_target *target,
@@ -46,58 +40,56 @@ same_check(const char *tablename,
46 mr->ipnum = 0; 40 mr->ipnum = 0;
47 41
48 if (mr->rangesize < 1) { 42 if (mr->rangesize < 1) {
49 DEBUGP("same_check: need at least one dest range.\n"); 43 pr_debug("same_check: need at least one dest range.\n");
50 return 0; 44 return false;
51 } 45 }
52 if (mr->rangesize > IPT_SAME_MAX_RANGE) { 46 if (mr->rangesize > IPT_SAME_MAX_RANGE) {
53 DEBUGP("same_check: too many ranges specified, maximum " 47 pr_debug("same_check: too many ranges specified, maximum "
54 "is %u ranges\n", 48 "is %u ranges\n", IPT_SAME_MAX_RANGE);
55 IPT_SAME_MAX_RANGE); 49 return false;
56 return 0;
57 } 50 }
58 for (count = 0; count < mr->rangesize; count++) { 51 for (count = 0; count < mr->rangesize; count++) {
59 if (ntohl(mr->range[count].min_ip) > 52 if (ntohl(mr->range[count].min_ip) >
60 ntohl(mr->range[count].max_ip)) { 53 ntohl(mr->range[count].max_ip)) {
61 DEBUGP("same_check: min_ip is larger than max_ip in " 54 pr_debug("same_check: min_ip is larger than max_ip in "
62 "range `%u.%u.%u.%u-%u.%u.%u.%u'.\n", 55 "range `%u.%u.%u.%u-%u.%u.%u.%u'.\n",
63 NIPQUAD(mr->range[count].min_ip), 56 NIPQUAD(mr->range[count].min_ip),
64 NIPQUAD(mr->range[count].max_ip)); 57 NIPQUAD(mr->range[count].max_ip));
65 return 0; 58 return false;
66 } 59 }
67 if (!(mr->range[count].flags & IP_NAT_RANGE_MAP_IPS)) { 60 if (!(mr->range[count].flags & IP_NAT_RANGE_MAP_IPS)) {
68 DEBUGP("same_check: bad MAP_IPS.\n"); 61 pr_debug("same_check: bad MAP_IPS.\n");
69 return 0; 62 return false;
70 } 63 }
71 rangeip = (ntohl(mr->range[count].max_ip) - 64 rangeip = (ntohl(mr->range[count].max_ip) -
72 ntohl(mr->range[count].min_ip) + 1); 65 ntohl(mr->range[count].min_ip) + 1);
73 mr->ipnum += rangeip; 66 mr->ipnum += rangeip;
74 67
75 DEBUGP("same_check: range %u, ipnum = %u\n", count, rangeip); 68 pr_debug("same_check: range %u, ipnum = %u\n", count, rangeip);
76 } 69 }
77 DEBUGP("same_check: total ipaddresses = %u\n", mr->ipnum); 70 pr_debug("same_check: total ipaddresses = %u\n", mr->ipnum);
78 71
79 mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL); 72 mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL);
80 if (!mr->iparray) { 73 if (!mr->iparray) {
81 DEBUGP("same_check: Couldn't allocate %u bytes " 74 pr_debug("same_check: Couldn't allocate %Zu bytes "
82 "for %u ipaddresses!\n", 75 "for %u ipaddresses!\n",
83 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); 76 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
84 return 0; 77 return false;
85 } 78 }
86 DEBUGP("same_check: Allocated %u bytes for %u ipaddresses.\n", 79 pr_debug("same_check: Allocated %Zu bytes for %u ipaddresses.\n",
87 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); 80 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
88 81
89 for (count = 0; count < mr->rangesize; count++) { 82 for (count = 0; count < mr->rangesize; count++) {
90 for (countess = ntohl(mr->range[count].min_ip); 83 for (countess = ntohl(mr->range[count].min_ip);
91 countess <= ntohl(mr->range[count].max_ip); 84 countess <= ntohl(mr->range[count].max_ip);
92 countess++) { 85 countess++) {
93 mr->iparray[index] = countess; 86 mr->iparray[index] = countess;
94 DEBUGP("same_check: Added ipaddress `%u.%u.%u.%u' " 87 pr_debug("same_check: Added ipaddress `%u.%u.%u.%u' "
95 "in index %u.\n", 88 "in index %u.\n", HIPQUAD(countess), index);
96 HIPQUAD(countess), index);
97 index++; 89 index++;
98 } 90 }
99 } 91 }
100 return 1; 92 return true;
101} 93}
102 94
103static void 95static void
@@ -107,8 +99,8 @@ same_destroy(const struct xt_target *target, void *targinfo)
107 99
108 kfree(mr->iparray); 100 kfree(mr->iparray);
109 101
110 DEBUGP("same_destroy: Deallocated %u bytes for %u ipaddresses.\n", 102 pr_debug("same_destroy: Deallocated %Zu bytes for %u ipaddresses.\n",
111 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); 103 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
112} 104}
113 105
114static unsigned int 106static unsigned int
@@ -146,10 +138,9 @@ same_target(struct sk_buff **pskb,
146 138
147 new_ip = htonl(same->iparray[aindex]); 139 new_ip = htonl(same->iparray[aindex]);
148 140
149 DEBUGP("ipt_SAME: src=%u.%u.%u.%u dst=%u.%u.%u.%u, " 141 pr_debug("ipt_SAME: src=%u.%u.%u.%u dst=%u.%u.%u.%u, "
150 "new src=%u.%u.%u.%u\n", 142 "new src=%u.%u.%u.%u\n",
151 NIPQUAD(t->src.ip), NIPQUAD(t->dst.ip), 143 NIPQUAD(t->src.u3.ip), NIPQUAD(t->dst.u3.ip), NIPQUAD(new_ip));
152 NIPQUAD(new_ip));
153 144
154 /* Transfer from original range. */ 145 /* Transfer from original range. */
155 newrange = ((struct nf_nat_range) 146 newrange = ((struct nf_nat_range)
@@ -161,7 +152,7 @@ same_target(struct sk_buff **pskb,
161 return nf_nat_setup_info(ct, &newrange, hooknum); 152 return nf_nat_setup_info(ct, &newrange, hooknum);
162} 153}
163 154
164static struct xt_target same_reg = { 155static struct xt_target same_reg __read_mostly = {
165 .name = "SAME", 156 .name = "SAME",
166 .family = AF_INET, 157 .family = AF_INET,
167 .target = same_target, 158 .target = same_target,
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index 0ad02f249837..25f5d0b39065 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -43,7 +43,7 @@ target(struct sk_buff **pskb,
43 return XT_CONTINUE; 43 return XT_CONTINUE;
44} 44}
45 45
46static int 46static bool
47checkentry(const char *tablename, 47checkentry(const char *tablename,
48 const void *e_void, 48 const void *e_void,
49 const struct xt_target *target, 49 const struct xt_target *target,
@@ -58,12 +58,12 @@ checkentry(const char *tablename,
58 && tos != IPTOS_MINCOST 58 && tos != IPTOS_MINCOST
59 && tos != IPTOS_NORMALSVC) { 59 && tos != IPTOS_NORMALSVC) {
60 printk(KERN_WARNING "TOS: bad tos value %#x\n", tos); 60 printk(KERN_WARNING "TOS: bad tos value %#x\n", tos);
61 return 0; 61 return false;
62 } 62 }
63 return 1; 63 return true;
64} 64}
65 65
66static struct xt_target ipt_tos_reg = { 66static struct xt_target ipt_tos_reg __read_mostly = {
67 .name = "TOS", 67 .name = "TOS",
68 .family = AF_INET, 68 .family = AF_INET,
69 .target = target, 69 .target = target,
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
index a991ec7bd4e7..2b54e7b0cfe8 100644
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -62,25 +62,25 @@ ipt_ttl_target(struct sk_buff **pskb,
62 return XT_CONTINUE; 62 return XT_CONTINUE;
63} 63}
64 64
65static int ipt_ttl_checkentry(const char *tablename, 65static bool ipt_ttl_checkentry(const char *tablename,
66 const void *e, 66 const void *e,
67 const struct xt_target *target, 67 const struct xt_target *target,
68 void *targinfo, 68 void *targinfo,
69 unsigned int hook_mask) 69 unsigned int hook_mask)
70{ 70{
71 struct ipt_TTL_info *info = targinfo; 71 const struct ipt_TTL_info *info = targinfo;
72 72
73 if (info->mode > IPT_TTL_MAXMODE) { 73 if (info->mode > IPT_TTL_MAXMODE) {
74 printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", 74 printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
75 info->mode); 75 info->mode);
76 return 0; 76 return false;
77 } 77 }
78 if ((info->mode != IPT_TTL_SET) && (info->ttl == 0)) 78 if (info->mode != IPT_TTL_SET && info->ttl == 0)
79 return 0; 79 return false;
80 return 1; 80 return true;
81} 81}
82 82
83static struct xt_target ipt_TTL = { 83static struct xt_target ipt_TTL __read_mostly = {
84 .name = "TTL", 84 .name = "TTL",
85 .family = AF_INET, 85 .family = AF_INET,
86 .target = ipt_ttl_target, 86 .target = ipt_ttl_target,
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 23b607b33b32..6ca43e4ca7e3 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -55,13 +55,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
55#define ULOG_NL_EVENT 111 /* Harald's favorite number */ 55#define ULOG_NL_EVENT 111 /* Harald's favorite number */
56#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */ 56#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */
57 57
58#if 0
59#define DEBUGP(format, args...) printk("%s:%s:" format, \
60 __FILE__, __FUNCTION__ , ## args)
61#else
62#define DEBUGP(format, args...)
63#endif
64
65#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) 58#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
66 59
67static unsigned int nlbufsiz = NLMSG_GOODSIZE; 60static unsigned int nlbufsiz = NLMSG_GOODSIZE;
@@ -96,12 +89,12 @@ static void ulog_send(unsigned int nlgroupnum)
96 ulog_buff_t *ub = &ulog_buffers[nlgroupnum]; 89 ulog_buff_t *ub = &ulog_buffers[nlgroupnum];
97 90
98 if (timer_pending(&ub->timer)) { 91 if (timer_pending(&ub->timer)) {
99 DEBUGP("ipt_ULOG: ulog_send: timer was pending, deleting\n"); 92 pr_debug("ipt_ULOG: ulog_send: timer was pending, deleting\n");
100 del_timer(&ub->timer); 93 del_timer(&ub->timer);
101 } 94 }
102 95
103 if (!ub->skb) { 96 if (!ub->skb) {
104 DEBUGP("ipt_ULOG: ulog_send: nothing to send\n"); 97 pr_debug("ipt_ULOG: ulog_send: nothing to send\n");
105 return; 98 return;
106 } 99 }
107 100
@@ -110,8 +103,8 @@ static void ulog_send(unsigned int nlgroupnum)
110 ub->lastnlh->nlmsg_type = NLMSG_DONE; 103 ub->lastnlh->nlmsg_type = NLMSG_DONE;
111 104
112 NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1; 105 NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
113 DEBUGP("ipt_ULOG: throwing %d packets to netlink group %u\n", 106 pr_debug("ipt_ULOG: throwing %d packets to netlink group %u\n",
114 ub->qlen, nlgroupnum + 1); 107 ub->qlen, nlgroupnum + 1);
115 netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC); 108 netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC);
116 109
117 ub->qlen = 0; 110 ub->qlen = 0;
@@ -123,7 +116,7 @@ static void ulog_send(unsigned int nlgroupnum)
123/* timer function to flush queue in flushtimeout time */ 116/* timer function to flush queue in flushtimeout time */
124static void ulog_timer(unsigned long data) 117static void ulog_timer(unsigned long data)
125{ 118{
126 DEBUGP("ipt_ULOG: timer function called, calling ulog_send\n"); 119 pr_debug("ipt_ULOG: timer function called, calling ulog_send\n");
127 120
128 /* lock to protect against somebody modifying our structure 121 /* lock to protect against somebody modifying our structure
129 * from ipt_ulog_target at the same time */ 122 * from ipt_ulog_target at the same time */
@@ -179,12 +172,10 @@ static void ipt_ulog_packet(unsigned int hooknum,
179 unsigned int groupnum = ffs(loginfo->nl_group) - 1; 172 unsigned int groupnum = ffs(loginfo->nl_group) - 1;
180 173
181 /* calculate the size of the skb needed */ 174 /* calculate the size of the skb needed */
182 if ((loginfo->copy_range == 0) || 175 if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
183 (loginfo->copy_range > skb->len)) {
184 copy_len = skb->len; 176 copy_len = skb->len;
185 } else { 177 else
186 copy_len = loginfo->copy_range; 178 copy_len = loginfo->copy_range;
187 }
188 179
189 size = NLMSG_SPACE(sizeof(*pm) + copy_len); 180 size = NLMSG_SPACE(sizeof(*pm) + copy_len);
190 181
@@ -206,8 +197,8 @@ static void ipt_ulog_packet(unsigned int hooknum,
206 goto alloc_failure; 197 goto alloc_failure;
207 } 198 }
208 199
209 DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, 200 pr_debug("ipt_ULOG: qlen %d, qthreshold %Zu\n", ub->qlen,
210 loginfo->qthreshold); 201 loginfo->qthreshold);
211 202
212 /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ 203 /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
213 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, 204 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
@@ -257,9 +248,8 @@ static void ipt_ulog_packet(unsigned int hooknum,
257 BUG(); 248 BUG();
258 249
259 /* check if we are building multi-part messages */ 250 /* check if we are building multi-part messages */
260 if (ub->qlen > 1) { 251 if (ub->qlen > 1)
261 ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; 252 ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
262 }
263 253
264 ub->lastnlh = nlh; 254 ub->lastnlh = nlh;
265 255
@@ -328,25 +318,25 @@ static void ipt_logfn(unsigned int pf,
328 ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); 318 ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
329} 319}
330 320
331static int ipt_ulog_checkentry(const char *tablename, 321static bool ipt_ulog_checkentry(const char *tablename,
332 const void *e, 322 const void *e,
333 const struct xt_target *target, 323 const struct xt_target *target,
334 void *targinfo, 324 void *targinfo,
335 unsigned int hookmask) 325 unsigned int hookmask)
336{ 326{
337 struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo; 327 const struct ipt_ulog_info *loginfo = targinfo;
338 328
339 if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') { 329 if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
340 DEBUGP("ipt_ULOG: prefix term %i\n", 330 pr_debug("ipt_ULOG: prefix term %i\n",
341 loginfo->prefix[sizeof(loginfo->prefix) - 1]); 331 loginfo->prefix[sizeof(loginfo->prefix) - 1]);
342 return 0; 332 return false;
343 } 333 }
344 if (loginfo->qthreshold > ULOG_MAX_QLEN) { 334 if (loginfo->qthreshold > ULOG_MAX_QLEN) {
345 DEBUGP("ipt_ULOG: queue threshold %i > MAX_QLEN\n", 335 pr_debug("ipt_ULOG: queue threshold %Zu > MAX_QLEN\n",
346 loginfo->qthreshold); 336 loginfo->qthreshold);
347 return 0; 337 return false;
348 } 338 }
349 return 1; 339 return true;
350} 340}
351 341
352#ifdef CONFIG_COMPAT 342#ifdef CONFIG_COMPAT
@@ -359,7 +349,7 @@ struct compat_ipt_ulog_info {
359 349
360static void compat_from_user(void *dst, void *src) 350static void compat_from_user(void *dst, void *src)
361{ 351{
362 struct compat_ipt_ulog_info *cl = src; 352 const struct compat_ipt_ulog_info *cl = src;
363 struct ipt_ulog_info l = { 353 struct ipt_ulog_info l = {
364 .nl_group = cl->nl_group, 354 .nl_group = cl->nl_group,
365 .copy_range = cl->copy_range, 355 .copy_range = cl->copy_range,
@@ -372,7 +362,7 @@ static void compat_from_user(void *dst, void *src)
372 362
373static int compat_to_user(void __user *dst, void *src) 363static int compat_to_user(void __user *dst, void *src)
374{ 364{
375 struct ipt_ulog_info *l = src; 365 const struct ipt_ulog_info *l = src;
376 struct compat_ipt_ulog_info cl = { 366 struct compat_ipt_ulog_info cl = {
377 .nl_group = l->nl_group, 367 .nl_group = l->nl_group,
378 .copy_range = l->copy_range, 368 .copy_range = l->copy_range,
@@ -384,7 +374,7 @@ static int compat_to_user(void __user *dst, void *src)
384} 374}
385#endif /* CONFIG_COMPAT */ 375#endif /* CONFIG_COMPAT */
386 376
387static struct xt_target ipt_ulog_reg = { 377static struct xt_target ipt_ulog_reg __read_mostly = {
388 .name = "ULOG", 378 .name = "ULOG",
389 .family = AF_INET, 379 .family = AF_INET,
390 .target = ipt_ulog_target, 380 .target = ipt_ulog_target,
@@ -408,7 +398,7 @@ static int __init ipt_ulog_init(void)
408{ 398{
409 int ret, i; 399 int ret, i;
410 400
411 DEBUGP("ipt_ULOG: init module\n"); 401 pr_debug("ipt_ULOG: init module\n");
412 402
413 if (nlbufsiz > 128*1024) { 403 if (nlbufsiz > 128*1024) {
414 printk("Netlink buffer has to be <= 128kB\n"); 404 printk("Netlink buffer has to be <= 128kB\n");
@@ -440,7 +430,7 @@ static void __exit ipt_ulog_fini(void)
440 ulog_buff_t *ub; 430 ulog_buff_t *ub;
441 int i; 431 int i;
442 432
443 DEBUGP("ipt_ULOG: cleanup_module\n"); 433 pr_debug("ipt_ULOG: cleanup_module\n");
444 434
445 if (nflog) 435 if (nflog)
446 nf_log_unregister(&ipt_ulog_logger); 436 nf_log_unregister(&ipt_ulog_logger);
@@ -451,7 +441,7 @@ static void __exit ipt_ulog_fini(void)
451 for (i = 0; i < ULOG_MAXNLGROUPS; i++) { 441 for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
452 ub = &ulog_buffers[i]; 442 ub = &ulog_buffers[i];
453 if (timer_pending(&ub->timer)) { 443 if (timer_pending(&ub->timer)) {
454 DEBUGP("timer was pending, deleting\n"); 444 pr_debug("timer was pending, deleting\n");
455 del_timer(&ub->timer); 445 del_timer(&ub->timer);
456 } 446 }
457 447
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index a652a1451552..59f01f7ba6b4 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -22,19 +22,19 @@ MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 22MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
23MODULE_DESCRIPTION("iptables addrtype match"); 23MODULE_DESCRIPTION("iptables addrtype match");
24 24
25static inline int match_type(__be32 addr, u_int16_t mask) 25static inline bool match_type(__be32 addr, u_int16_t mask)
26{ 26{
27 return !!(mask & (1 << inet_addr_type(addr))); 27 return !!(mask & (1 << inet_addr_type(addr)));
28} 28}
29 29
30static int match(const struct sk_buff *skb, 30static bool match(const struct sk_buff *skb,
31 const struct net_device *in, const struct net_device *out, 31 const struct net_device *in, const struct net_device *out,
32 const struct xt_match *match, const void *matchinfo, 32 const struct xt_match *match, const void *matchinfo,
33 int offset, unsigned int protoff, int *hotdrop) 33 int offset, unsigned int protoff, bool *hotdrop)
34{ 34{
35 const struct ipt_addrtype_info *info = matchinfo; 35 const struct ipt_addrtype_info *info = matchinfo;
36 const struct iphdr *iph = ip_hdr(skb); 36 const struct iphdr *iph = ip_hdr(skb);
37 int ret = 1; 37 bool ret = true;
38 38
39 if (info->source) 39 if (info->source)
40 ret &= match_type(iph->saddr, info->source)^info->invert_source; 40 ret &= match_type(iph->saddr, info->source)^info->invert_source;
@@ -44,7 +44,7 @@ static int match(const struct sk_buff *skb,
44 return ret; 44 return ret;
45} 45}
46 46
47static struct xt_match addrtype_match = { 47static struct xt_match addrtype_match __read_mostly = {
48 .name = "addrtype", 48 .name = "addrtype",
49 .family = AF_INET, 49 .family = AF_INET,
50 .match = match, 50 .match = match,
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index 18a16782cf40..61b017fd743c 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -25,10 +25,10 @@ MODULE_DESCRIPTION("iptables AH SPI match module");
25#endif 25#endif
26 26
27/* Returns 1 if the spi is matched by the range, 0 otherwise */ 27/* Returns 1 if the spi is matched by the range, 0 otherwise */
28static inline int 28static inline bool
29spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) 29spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
30{ 30{
31 int r=0; 31 bool r;
32 duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', 32 duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
33 min,spi,max); 33 min,spi,max);
34 r=(spi >= min && spi <= max) ^ invert; 34 r=(spi >= min && spi <= max) ^ invert;
@@ -36,7 +36,7 @@ spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert)
36 return r; 36 return r;
37} 37}
38 38
39static int 39static bool
40match(const struct sk_buff *skb, 40match(const struct sk_buff *skb,
41 const struct net_device *in, 41 const struct net_device *in,
42 const struct net_device *out, 42 const struct net_device *out,
@@ -44,14 +44,15 @@ match(const struct sk_buff *skb,
44 const void *matchinfo, 44 const void *matchinfo,
45 int offset, 45 int offset,
46 unsigned int protoff, 46 unsigned int protoff,
47 int *hotdrop) 47 bool *hotdrop)
48{ 48{
49 struct ip_auth_hdr _ahdr, *ah; 49 struct ip_auth_hdr _ahdr;
50 const struct ip_auth_hdr *ah;
50 const struct ipt_ah *ahinfo = matchinfo; 51 const struct ipt_ah *ahinfo = matchinfo;
51 52
52 /* Must not be a fragment. */ 53 /* Must not be a fragment. */
53 if (offset) 54 if (offset)
54 return 0; 55 return false;
55 56
56 ah = skb_header_pointer(skb, protoff, 57 ah = skb_header_pointer(skb, protoff,
57 sizeof(_ahdr), &_ahdr); 58 sizeof(_ahdr), &_ahdr);
@@ -60,7 +61,7 @@ match(const struct sk_buff *skb,
60 * can't. Hence, no choice but to drop. 61 * can't. Hence, no choice but to drop.
61 */ 62 */
62 duprintf("Dropping evil AH tinygram.\n"); 63 duprintf("Dropping evil AH tinygram.\n");
63 *hotdrop = 1; 64 *hotdrop = true;
64 return 0; 65 return 0;
65 } 66 }
66 67
@@ -70,7 +71,7 @@ match(const struct sk_buff *skb,
70} 71}
71 72
72/* Called when user tries to insert an entry of this type. */ 73/* Called when user tries to insert an entry of this type. */
73static int 74static bool
74checkentry(const char *tablename, 75checkentry(const char *tablename,
75 const void *ip_void, 76 const void *ip_void,
76 const struct xt_match *match, 77 const struct xt_match *match,
@@ -82,12 +83,12 @@ checkentry(const char *tablename,
82 /* Must specify no unknown invflags */ 83 /* Must specify no unknown invflags */
83 if (ahinfo->invflags & ~IPT_AH_INV_MASK) { 84 if (ahinfo->invflags & ~IPT_AH_INV_MASK) {
84 duprintf("ipt_ah: unknown flags %X\n", ahinfo->invflags); 85 duprintf("ipt_ah: unknown flags %X\n", ahinfo->invflags);
85 return 0; 86 return false;
86 } 87 }
87 return 1; 88 return true;
88} 89}
89 90
90static struct xt_match ah_match = { 91static struct xt_match ah_match __read_mostly = {
91 .name = "ah", 92 .name = "ah",
92 .family = AF_INET, 93 .family = AF_INET,
93 .match = match, 94 .match = match,
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index 26218122f865..d6925c674069 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -22,95 +22,96 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
22MODULE_DESCRIPTION("iptables ECN matching module"); 22MODULE_DESCRIPTION("iptables ECN matching module");
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
25static inline int match_ip(const struct sk_buff *skb, 25static inline bool match_ip(const struct sk_buff *skb,
26 const struct ipt_ecn_info *einfo) 26 const struct ipt_ecn_info *einfo)
27{ 27{
28 return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; 28 return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect;
29} 29}
30 30
31static inline int match_tcp(const struct sk_buff *skb, 31static inline bool match_tcp(const struct sk_buff *skb,
32 const struct ipt_ecn_info *einfo, 32 const struct ipt_ecn_info *einfo,
33 int *hotdrop) 33 bool *hotdrop)
34{ 34{
35 struct tcphdr _tcph, *th; 35 struct tcphdr _tcph;
36 const struct tcphdr *th;
36 37
37 /* In practice, TCP match does this, so can't fail. But let's 38 /* In practice, TCP match does this, so can't fail. But let's
38 * be good citizens. 39 * be good citizens.
39 */ 40 */
40 th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); 41 th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
41 if (th == NULL) { 42 if (th == NULL) {
42 *hotdrop = 0; 43 *hotdrop = false;
43 return 0; 44 return false;
44 } 45 }
45 46
46 if (einfo->operation & IPT_ECN_OP_MATCH_ECE) { 47 if (einfo->operation & IPT_ECN_OP_MATCH_ECE) {
47 if (einfo->invert & IPT_ECN_OP_MATCH_ECE) { 48 if (einfo->invert & IPT_ECN_OP_MATCH_ECE) {
48 if (th->ece == 1) 49 if (th->ece == 1)
49 return 0; 50 return false;
50 } else { 51 } else {
51 if (th->ece == 0) 52 if (th->ece == 0)
52 return 0; 53 return false;
53 } 54 }
54 } 55 }
55 56
56 if (einfo->operation & IPT_ECN_OP_MATCH_CWR) { 57 if (einfo->operation & IPT_ECN_OP_MATCH_CWR) {
57 if (einfo->invert & IPT_ECN_OP_MATCH_CWR) { 58 if (einfo->invert & IPT_ECN_OP_MATCH_CWR) {
58 if (th->cwr == 1) 59 if (th->cwr == 1)
59 return 0; 60 return false;
60 } else { 61 } else {
61 if (th->cwr == 0) 62 if (th->cwr == 0)
62 return 0; 63 return false;
63 } 64 }
64 } 65 }
65 66
66 return 1; 67 return true;
67} 68}
68 69
69static int match(const struct sk_buff *skb, 70static bool match(const struct sk_buff *skb,
70 const struct net_device *in, const struct net_device *out, 71 const struct net_device *in, const struct net_device *out,
71 const struct xt_match *match, const void *matchinfo, 72 const struct xt_match *match, const void *matchinfo,
72 int offset, unsigned int protoff, int *hotdrop) 73 int offset, unsigned int protoff, bool *hotdrop)
73{ 74{
74 const struct ipt_ecn_info *info = matchinfo; 75 const struct ipt_ecn_info *info = matchinfo;
75 76
76 if (info->operation & IPT_ECN_OP_MATCH_IP) 77 if (info->operation & IPT_ECN_OP_MATCH_IP)
77 if (!match_ip(skb, info)) 78 if (!match_ip(skb, info))
78 return 0; 79 return false;
79 80
80 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { 81 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
81 if (ip_hdr(skb)->protocol != IPPROTO_TCP) 82 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
82 return 0; 83 return false;
83 if (!match_tcp(skb, info, hotdrop)) 84 if (!match_tcp(skb, info, hotdrop))
84 return 0; 85 return false;
85 } 86 }
86 87
87 return 1; 88 return true;
88} 89}
89 90
90static int checkentry(const char *tablename, const void *ip_void, 91static bool checkentry(const char *tablename, const void *ip_void,
91 const struct xt_match *match, 92 const struct xt_match *match,
92 void *matchinfo, unsigned int hook_mask) 93 void *matchinfo, unsigned int hook_mask)
93{ 94{
94 const struct ipt_ecn_info *info = matchinfo; 95 const struct ipt_ecn_info *info = matchinfo;
95 const struct ipt_ip *ip = ip_void; 96 const struct ipt_ip *ip = ip_void;
96 97
97 if (info->operation & IPT_ECN_OP_MATCH_MASK) 98 if (info->operation & IPT_ECN_OP_MATCH_MASK)
98 return 0; 99 return false;
99 100
100 if (info->invert & IPT_ECN_OP_MATCH_MASK) 101 if (info->invert & IPT_ECN_OP_MATCH_MASK)
101 return 0; 102 return false;
102 103
103 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) 104 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)
104 && ip->proto != IPPROTO_TCP) { 105 && ip->proto != IPPROTO_TCP) {
105 printk(KERN_WARNING "ipt_ecn: can't match TCP bits in rule for" 106 printk(KERN_WARNING "ipt_ecn: can't match TCP bits in rule for"
106 " non-tcp packets\n"); 107 " non-tcp packets\n");
107 return 0; 108 return false;
108 } 109 }
109 110
110 return 1; 111 return true;
111} 112}
112 113
113static struct xt_match ecn_match = { 114static struct xt_match ecn_match __read_mostly = {
114 .name = "ecn", 115 .name = "ecn",
115 .family = AF_INET, 116 .family = AF_INET,
116 .match = match, 117 .match = match,
diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c
index 33af9e940887..0106dc955a69 100644
--- a/net/ipv4/netfilter/ipt_iprange.c
+++ b/net/ipv4/netfilter/ipt_iprange.c
@@ -17,53 +17,47 @@ MODULE_LICENSE("GPL");
17MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 17MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
18MODULE_DESCRIPTION("iptables arbitrary IP range match module"); 18MODULE_DESCRIPTION("iptables arbitrary IP range match module");
19 19
20#if 0 20static bool
21#define DEBUGP printk
22#else
23#define DEBUGP(format, args...)
24#endif
25
26static int
27match(const struct sk_buff *skb, 21match(const struct sk_buff *skb,
28 const struct net_device *in, 22 const struct net_device *in,
29 const struct net_device *out, 23 const struct net_device *out,
30 const struct xt_match *match, 24 const struct xt_match *match,
31 const void *matchinfo, 25 const void *matchinfo,
32 int offset, unsigned int protoff, int *hotdrop) 26 int offset, unsigned int protoff, bool *hotdrop)
33{ 27{
34 const struct ipt_iprange_info *info = matchinfo; 28 const struct ipt_iprange_info *info = matchinfo;
35 const struct iphdr *iph = ip_hdr(skb); 29 const struct iphdr *iph = ip_hdr(skb);
36 30
37 if (info->flags & IPRANGE_SRC) { 31 if (info->flags & IPRANGE_SRC) {
38 if (((ntohl(iph->saddr) < ntohl(info->src.min_ip)) 32 if ((ntohl(iph->saddr) < ntohl(info->src.min_ip)
39 || (ntohl(iph->saddr) > ntohl(info->src.max_ip))) 33 || ntohl(iph->saddr) > ntohl(info->src.max_ip))
40 ^ !!(info->flags & IPRANGE_SRC_INV)) { 34 ^ !!(info->flags & IPRANGE_SRC_INV)) {
41 DEBUGP("src IP %u.%u.%u.%u NOT in range %s" 35 pr_debug("src IP %u.%u.%u.%u NOT in range %s"
42 "%u.%u.%u.%u-%u.%u.%u.%u\n", 36 "%u.%u.%u.%u-%u.%u.%u.%u\n",
43 NIPQUAD(iph->saddr), 37 NIPQUAD(iph->saddr),
44 info->flags & IPRANGE_SRC_INV ? "(INV) " : "", 38 info->flags & IPRANGE_SRC_INV ? "(INV) " : "",
45 NIPQUAD(info->src.min_ip), 39 NIPQUAD(info->src.min_ip),
46 NIPQUAD(info->src.max_ip)); 40 NIPQUAD(info->src.max_ip));
47 return 0; 41 return false;
48 } 42 }
49 } 43 }
50 if (info->flags & IPRANGE_DST) { 44 if (info->flags & IPRANGE_DST) {
51 if (((ntohl(iph->daddr) < ntohl(info->dst.min_ip)) 45 if ((ntohl(iph->daddr) < ntohl(info->dst.min_ip)
52 || (ntohl(iph->daddr) > ntohl(info->dst.max_ip))) 46 || ntohl(iph->daddr) > ntohl(info->dst.max_ip))
53 ^ !!(info->flags & IPRANGE_DST_INV)) { 47 ^ !!(info->flags & IPRANGE_DST_INV)) {
54 DEBUGP("dst IP %u.%u.%u.%u NOT in range %s" 48 pr_debug("dst IP %u.%u.%u.%u NOT in range %s"
55 "%u.%u.%u.%u-%u.%u.%u.%u\n", 49 "%u.%u.%u.%u-%u.%u.%u.%u\n",
56 NIPQUAD(iph->daddr), 50 NIPQUAD(iph->daddr),
57 info->flags & IPRANGE_DST_INV ? "(INV) " : "", 51 info->flags & IPRANGE_DST_INV ? "(INV) " : "",
58 NIPQUAD(info->dst.min_ip), 52 NIPQUAD(info->dst.min_ip),
59 NIPQUAD(info->dst.max_ip)); 53 NIPQUAD(info->dst.max_ip));
60 return 0; 54 return false;
61 } 55 }
62 } 56 }
63 return 1; 57 return true;
64} 58}
65 59
66static struct xt_match iprange_match = { 60static struct xt_match iprange_match __read_mostly = {
67 .name = "iprange", 61 .name = "iprange",
68 .family = AF_INET, 62 .family = AF_INET,
69 .match = match, 63 .match = match,
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c
index 7fae9aa8944c..b14e77da7a33 100644
--- a/net/ipv4/netfilter/ipt_owner.c
+++ b/net/ipv4/netfilter/ipt_owner.c
@@ -21,7 +21,7 @@ MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); 21MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
22MODULE_DESCRIPTION("iptables owner match"); 22MODULE_DESCRIPTION("iptables owner match");
23 23
24static int 24static bool
25match(const struct sk_buff *skb, 25match(const struct sk_buff *skb,
26 const struct net_device *in, 26 const struct net_device *in,
27 const struct net_device *out, 27 const struct net_device *out,
@@ -29,29 +29,29 @@ match(const struct sk_buff *skb,
29 const void *matchinfo, 29 const void *matchinfo,
30 int offset, 30 int offset,
31 unsigned int protoff, 31 unsigned int protoff,
32 int *hotdrop) 32 bool *hotdrop)
33{ 33{
34 const struct ipt_owner_info *info = matchinfo; 34 const struct ipt_owner_info *info = matchinfo;
35 35
36 if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file) 36 if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
37 return 0; 37 return false;
38 38
39 if(info->match & IPT_OWNER_UID) { 39 if(info->match & IPT_OWNER_UID) {
40 if ((skb->sk->sk_socket->file->f_uid != info->uid) ^ 40 if ((skb->sk->sk_socket->file->f_uid != info->uid) ^
41 !!(info->invert & IPT_OWNER_UID)) 41 !!(info->invert & IPT_OWNER_UID))
42 return 0; 42 return false;
43 } 43 }
44 44
45 if(info->match & IPT_OWNER_GID) { 45 if(info->match & IPT_OWNER_GID) {
46 if ((skb->sk->sk_socket->file->f_gid != info->gid) ^ 46 if ((skb->sk->sk_socket->file->f_gid != info->gid) ^
47 !!(info->invert & IPT_OWNER_GID)) 47 !!(info->invert & IPT_OWNER_GID))
48 return 0; 48 return false;
49 } 49 }
50 50
51 return 1; 51 return true;
52} 52}
53 53
54static int 54static bool
55checkentry(const char *tablename, 55checkentry(const char *tablename,
56 const void *ip, 56 const void *ip,
57 const struct xt_match *match, 57 const struct xt_match *match,
@@ -63,12 +63,12 @@ checkentry(const char *tablename,
63 if (info->match & (IPT_OWNER_PID|IPT_OWNER_SID|IPT_OWNER_COMM)) { 63 if (info->match & (IPT_OWNER_PID|IPT_OWNER_SID|IPT_OWNER_COMM)) {
64 printk("ipt_owner: pid, sid and command matching " 64 printk("ipt_owner: pid, sid and command matching "
65 "not supported anymore\n"); 65 "not supported anymore\n");
66 return 0; 66 return false;
67 } 67 }
68 return 1; 68 return true;
69} 69}
70 70
71static struct xt_match owner_match = { 71static struct xt_match owner_match __read_mostly = {
72 .name = "owner", 72 .name = "owner",
73 .family = AF_INET, 73 .family = AF_INET,
74 .match = match, 74 .match = match,
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 15a9e8bbb7cc..321804315659 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -163,24 +163,23 @@ static void recent_table_flush(struct recent_table *t)
163 struct recent_entry *e, *next; 163 struct recent_entry *e, *next;
164 unsigned int i; 164 unsigned int i;
165 165
166 for (i = 0; i < ip_list_hash_size; i++) { 166 for (i = 0; i < ip_list_hash_size; i++)
167 list_for_each_entry_safe(e, next, &t->iphash[i], list) 167 list_for_each_entry_safe(e, next, &t->iphash[i], list)
168 recent_entry_remove(t, e); 168 recent_entry_remove(t, e);
169 }
170} 169}
171 170
172static int 171static bool
173ipt_recent_match(const struct sk_buff *skb, 172ipt_recent_match(const struct sk_buff *skb,
174 const struct net_device *in, const struct net_device *out, 173 const struct net_device *in, const struct net_device *out,
175 const struct xt_match *match, const void *matchinfo, 174 const struct xt_match *match, const void *matchinfo,
176 int offset, unsigned int protoff, int *hotdrop) 175 int offset, unsigned int protoff, bool *hotdrop)
177{ 176{
178 const struct ipt_recent_info *info = matchinfo; 177 const struct ipt_recent_info *info = matchinfo;
179 struct recent_table *t; 178 struct recent_table *t;
180 struct recent_entry *e; 179 struct recent_entry *e;
181 __be32 addr; 180 __be32 addr;
182 u_int8_t ttl; 181 u_int8_t ttl;
183 int ret = info->invert; 182 bool ret = info->invert;
184 183
185 if (info->side == IPT_RECENT_DEST) 184 if (info->side == IPT_RECENT_DEST)
186 addr = ip_hdr(skb)->daddr; 185 addr = ip_hdr(skb)->daddr;
@@ -201,16 +200,16 @@ ipt_recent_match(const struct sk_buff *skb,
201 goto out; 200 goto out;
202 e = recent_entry_init(t, addr, ttl); 201 e = recent_entry_init(t, addr, ttl);
203 if (e == NULL) 202 if (e == NULL)
204 *hotdrop = 1; 203 *hotdrop = true;
205 ret ^= 1; 204 ret = !ret;
206 goto out; 205 goto out;
207 } 206 }
208 207
209 if (info->check_set & IPT_RECENT_SET) 208 if (info->check_set & IPT_RECENT_SET)
210 ret ^= 1; 209 ret = !ret;
211 else if (info->check_set & IPT_RECENT_REMOVE) { 210 else if (info->check_set & IPT_RECENT_REMOVE) {
212 recent_entry_remove(t, e); 211 recent_entry_remove(t, e);
213 ret ^= 1; 212 ret = !ret;
214 } else if (info->check_set & (IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) { 213 } else if (info->check_set & (IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) {
215 unsigned long t = jiffies - info->seconds * HZ; 214 unsigned long t = jiffies - info->seconds * HZ;
216 unsigned int i, hits = 0; 215 unsigned int i, hits = 0;
@@ -219,7 +218,7 @@ ipt_recent_match(const struct sk_buff *skb,
219 if (info->seconds && time_after(t, e->stamps[i])) 218 if (info->seconds && time_after(t, e->stamps[i]))
220 continue; 219 continue;
221 if (++hits >= info->hit_count) { 220 if (++hits >= info->hit_count) {
222 ret ^= 1; 221 ret = !ret;
223 break; 222 break;
224 } 223 }
225 } 224 }
@@ -235,7 +234,7 @@ out:
235 return ret; 234 return ret;
236} 235}
237 236
238static int 237static bool
239ipt_recent_checkentry(const char *tablename, const void *ip, 238ipt_recent_checkentry(const char *tablename, const void *ip,
240 const struct xt_match *match, void *matchinfo, 239 const struct xt_match *match, void *matchinfo,
241 unsigned int hook_mask) 240 unsigned int hook_mask)
@@ -243,24 +242,24 @@ ipt_recent_checkentry(const char *tablename, const void *ip,
243 const struct ipt_recent_info *info = matchinfo; 242 const struct ipt_recent_info *info = matchinfo;
244 struct recent_table *t; 243 struct recent_table *t;
245 unsigned i; 244 unsigned i;
246 int ret = 0; 245 bool ret = false;
247 246
248 if (hweight8(info->check_set & 247 if (hweight8(info->check_set &
249 (IPT_RECENT_SET | IPT_RECENT_REMOVE | 248 (IPT_RECENT_SET | IPT_RECENT_REMOVE |
250 IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) != 1) 249 IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) != 1)
251 return 0; 250 return false;
252 if ((info->check_set & (IPT_RECENT_SET | IPT_RECENT_REMOVE)) && 251 if ((info->check_set & (IPT_RECENT_SET | IPT_RECENT_REMOVE)) &&
253 (info->seconds || info->hit_count)) 252 (info->seconds || info->hit_count))
254 return 0; 253 return false;
255 if (info->name[0] == '\0' || 254 if (info->name[0] == '\0' ||
256 strnlen(info->name, IPT_RECENT_NAME_LEN) == IPT_RECENT_NAME_LEN) 255 strnlen(info->name, IPT_RECENT_NAME_LEN) == IPT_RECENT_NAME_LEN)
257 return 0; 256 return false;
258 257
259 mutex_lock(&recent_mutex); 258 mutex_lock(&recent_mutex);
260 t = recent_table_lookup(info->name); 259 t = recent_table_lookup(info->name);
261 if (t != NULL) { 260 if (t != NULL) {
262 t->refcnt++; 261 t->refcnt++;
263 ret = 1; 262 ret = true;
264 goto out; 263 goto out;
265 } 264 }
266 265
@@ -287,7 +286,7 @@ ipt_recent_checkentry(const char *tablename, const void *ip,
287 spin_lock_bh(&recent_lock); 286 spin_lock_bh(&recent_lock);
288 list_add_tail(&t->list, &tables); 287 list_add_tail(&t->list, &tables);
289 spin_unlock_bh(&recent_lock); 288 spin_unlock_bh(&recent_lock);
290 ret = 1; 289 ret = true;
291out: 290out:
292 mutex_unlock(&recent_mutex); 291 mutex_unlock(&recent_mutex);
293 return ret; 292 return ret;
@@ -323,18 +322,16 @@ struct recent_iter_state {
323static void *recent_seq_start(struct seq_file *seq, loff_t *pos) 322static void *recent_seq_start(struct seq_file *seq, loff_t *pos)
324{ 323{
325 struct recent_iter_state *st = seq->private; 324 struct recent_iter_state *st = seq->private;
326 struct recent_table *t = st->table; 325 const struct recent_table *t = st->table;
327 struct recent_entry *e; 326 struct recent_entry *e;
328 loff_t p = *pos; 327 loff_t p = *pos;
329 328
330 spin_lock_bh(&recent_lock); 329 spin_lock_bh(&recent_lock);
331 330
332 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) { 331 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++)
333 list_for_each_entry(e, &t->iphash[st->bucket], list) { 332 list_for_each_entry(e, &t->iphash[st->bucket], list)
334 if (p-- == 0) 333 if (p-- == 0)
335 return e; 334 return e;
336 }
337 }
338 return NULL; 335 return NULL;
339} 336}
340 337
@@ -373,7 +370,7 @@ static int recent_seq_show(struct seq_file *seq, void *v)
373 return 0; 370 return 0;
374} 371}
375 372
376static struct seq_operations recent_seq_ops = { 373static const struct seq_operations recent_seq_ops = {
377 .start = recent_seq_start, 374 .start = recent_seq_start,
378 .next = recent_seq_next, 375 .next = recent_seq_next,
379 .stop = recent_seq_stop, 376 .stop = recent_seq_stop,
@@ -463,7 +460,7 @@ static const struct file_operations recent_fops = {
463}; 460};
464#endif /* CONFIG_PROC_FS */ 461#endif /* CONFIG_PROC_FS */
465 462
466static struct xt_match recent_match = { 463static struct xt_match recent_match __read_mostly = {
467 .name = "recent", 464 .name = "recent",
468 .family = AF_INET, 465 .family = AF_INET,
469 .match = ipt_recent_match, 466 .match = ipt_recent_match,
diff --git a/net/ipv4/netfilter/ipt_tos.c b/net/ipv4/netfilter/ipt_tos.c
index d314844af12b..e740441c973d 100644
--- a/net/ipv4/netfilter/ipt_tos.c
+++ b/net/ipv4/netfilter/ipt_tos.c
@@ -18,7 +18,7 @@
18MODULE_LICENSE("GPL"); 18MODULE_LICENSE("GPL");
19MODULE_DESCRIPTION("iptables TOS match module"); 19MODULE_DESCRIPTION("iptables TOS match module");
20 20
21static int 21static bool
22match(const struct sk_buff *skb, 22match(const struct sk_buff *skb,
23 const struct net_device *in, 23 const struct net_device *in,
24 const struct net_device *out, 24 const struct net_device *out,
@@ -26,14 +26,14 @@ match(const struct sk_buff *skb,
26 const void *matchinfo, 26 const void *matchinfo,
27 int offset, 27 int offset,
28 unsigned int protoff, 28 unsigned int protoff,
29 int *hotdrop) 29 bool *hotdrop)
30{ 30{
31 const struct ipt_tos_info *info = matchinfo; 31 const struct ipt_tos_info *info = matchinfo;
32 32
33 return (ip_hdr(skb)->tos == info->tos) ^ info->invert; 33 return (ip_hdr(skb)->tos == info->tos) ^ info->invert;
34} 34}
35 35
36static struct xt_match tos_match = { 36static struct xt_match tos_match __read_mostly = {
37 .name = "tos", 37 .name = "tos",
38 .family = AF_INET, 38 .family = AF_INET,
39 .match = match, 39 .match = match,
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c
index ab02d9e3139c..a439900a4ba5 100644
--- a/net/ipv4/netfilter/ipt_ttl.c
+++ b/net/ipv4/netfilter/ipt_ttl.c
@@ -18,37 +18,33 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
18MODULE_DESCRIPTION("IP tables TTL matching module"); 18MODULE_DESCRIPTION("IP tables TTL matching module");
19MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
20 20
21static int match(const struct sk_buff *skb, 21static bool match(const struct sk_buff *skb,
22 const struct net_device *in, const struct net_device *out, 22 const struct net_device *in, const struct net_device *out,
23 const struct xt_match *match, const void *matchinfo, 23 const struct xt_match *match, const void *matchinfo,
24 int offset, unsigned int protoff, int *hotdrop) 24 int offset, unsigned int protoff, bool *hotdrop)
25{ 25{
26 const struct ipt_ttl_info *info = matchinfo; 26 const struct ipt_ttl_info *info = matchinfo;
27 const u8 ttl = ip_hdr(skb)->ttl; 27 const u8 ttl = ip_hdr(skb)->ttl;
28 28
29 switch (info->mode) { 29 switch (info->mode) {
30 case IPT_TTL_EQ: 30 case IPT_TTL_EQ:
31 return (ttl == info->ttl); 31 return ttl == info->ttl;
32 break;
33 case IPT_TTL_NE: 32 case IPT_TTL_NE:
34 return (!(ttl == info->ttl)); 33 return ttl != info->ttl;
35 break;
36 case IPT_TTL_LT: 34 case IPT_TTL_LT:
37 return (ttl < info->ttl); 35 return ttl < info->ttl;
38 break;
39 case IPT_TTL_GT: 36 case IPT_TTL_GT:
40 return (ttl > info->ttl); 37 return ttl > info->ttl;
41 break;
42 default: 38 default:
43 printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", 39 printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
44 info->mode); 40 info->mode);
45 return 0; 41 return false;
46 } 42 }
47 43
48 return 0; 44 return false;
49} 45}
50 46
51static struct xt_match ttl_match = { 47static struct xt_match ttl_match __read_mostly = {
52 .name = "ttl", 48 .name = "ttl",
53 .family = AF_INET, 49 .family = AF_INET,
54 .match = match, 50 .match = match,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 6dc72a815f77..3c5629938487 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -24,12 +24,6 @@
24#include <net/netfilter/nf_conntrack_core.h> 24#include <net/netfilter/nf_conntrack_core.h>
25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
26 26
27#if 0
28#define DEBUGP printk
29#else
30#define DEBUGP(format, args...)
31#endif
32
33static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 27static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
34 struct nf_conntrack_tuple *tuple) 28 struct nf_conntrack_tuple *tuple)
35{ 29{
@@ -103,17 +97,6 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
103 return NF_ACCEPT; 97 return NF_ACCEPT;
104} 98}
105 99
106int nf_nat_module_is_loaded = 0;
107EXPORT_SYMBOL_GPL(nf_nat_module_is_loaded);
108
109static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple)
110{
111 if (nf_nat_module_is_loaded)
112 return NF_CT_F_NAT;
113
114 return NF_CT_F_BASIC;
115}
116
117static unsigned int ipv4_confirm(unsigned int hooknum, 100static unsigned int ipv4_confirm(unsigned int hooknum,
118 struct sk_buff **pskb, 101 struct sk_buff **pskb,
119 const struct net_device *in, 102 const struct net_device *in,
@@ -335,17 +318,17 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
335 318
336 /* We only do TCP at the moment: is there a better way? */ 319 /* We only do TCP at the moment: is there a better way? */
337 if (strcmp(sk->sk_prot->name, "TCP")) { 320 if (strcmp(sk->sk_prot->name, "TCP")) {
338 DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n"); 321 pr_debug("SO_ORIGINAL_DST: Not a TCP socket\n");
339 return -ENOPROTOOPT; 322 return -ENOPROTOOPT;
340 } 323 }
341 324
342 if ((unsigned int) *len < sizeof(struct sockaddr_in)) { 325 if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
343 DEBUGP("SO_ORIGINAL_DST: len %u not %u\n", 326 pr_debug("SO_ORIGINAL_DST: len %d not %Zu\n",
344 *len, sizeof(struct sockaddr_in)); 327 *len, sizeof(struct sockaddr_in));
345 return -EINVAL; 328 return -EINVAL;
346 } 329 }
347 330
348 h = nf_conntrack_find_get(&tuple, NULL); 331 h = nf_conntrack_find_get(&tuple);
349 if (h) { 332 if (h) {
350 struct sockaddr_in sin; 333 struct sockaddr_in sin;
351 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 334 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
@@ -357,17 +340,17 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
357 .tuple.dst.u3.ip; 340 .tuple.dst.u3.ip;
358 memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 341 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
359 342
360 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 343 pr_debug("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
361 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 344 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
362 nf_ct_put(ct); 345 nf_ct_put(ct);
363 if (copy_to_user(user, &sin, sizeof(sin)) != 0) 346 if (copy_to_user(user, &sin, sizeof(sin)) != 0)
364 return -EFAULT; 347 return -EFAULT;
365 else 348 else
366 return 0; 349 return 0;
367 } 350 }
368 DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n", 351 pr_debug("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
369 NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port), 352 NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port),
370 NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port)); 353 NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port));
371 return -ENOENT; 354 return -ENOENT;
372} 355}
373 356
@@ -425,7 +408,6 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = {
425 .print_tuple = ipv4_print_tuple, 408 .print_tuple = ipv4_print_tuple,
426 .print_conntrack = ipv4_print_conntrack, 409 .print_conntrack = ipv4_print_conntrack,
427 .prepare = ipv4_prepare, 410 .prepare = ipv4_prepare,
428 .get_features = ipv4_get_features,
429#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 411#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
430 .tuple_to_nfattr = ipv4_tuple_to_nfattr, 412 .tuple_to_nfattr = ipv4_tuple_to_nfattr,
431 .nfattr_to_tuple = ipv4_nfattr_to_tuple, 413 .nfattr_to_tuple = ipv4_nfattr_to_tuple,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 89f933e81035..3da9d73d1b52 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -18,12 +18,6 @@
18#include <net/netfilter/nf_conntrack_l4proto.h> 18#include <net/netfilter/nf_conntrack_l4proto.h>
19#include <net/netfilter/nf_conntrack_expect.h> 19#include <net/netfilter/nf_conntrack_expect.h>
20 20
21#if 0
22#define DEBUGP printk
23#else
24#define DEBUGP(format, args...)
25#endif
26
27#ifdef CONFIG_NF_CT_ACCT 21#ifdef CONFIG_NF_CT_ACCT
28static unsigned int 22static unsigned int
29seq_print_counters(struct seq_file *s, 23seq_print_counters(struct seq_file *s,
@@ -41,35 +35,36 @@ struct ct_iter_state {
41 unsigned int bucket; 35 unsigned int bucket;
42}; 36};
43 37
44static struct list_head *ct_get_first(struct seq_file *seq) 38static struct hlist_node *ct_get_first(struct seq_file *seq)
45{ 39{
46 struct ct_iter_state *st = seq->private; 40 struct ct_iter_state *st = seq->private;
47 41
48 for (st->bucket = 0; 42 for (st->bucket = 0;
49 st->bucket < nf_conntrack_htable_size; 43 st->bucket < nf_conntrack_htable_size;
50 st->bucket++) { 44 st->bucket++) {
51 if (!list_empty(&nf_conntrack_hash[st->bucket])) 45 if (!hlist_empty(&nf_conntrack_hash[st->bucket]))
52 return nf_conntrack_hash[st->bucket].next; 46 return nf_conntrack_hash[st->bucket].first;
53 } 47 }
54 return NULL; 48 return NULL;
55} 49}
56 50
57static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head) 51static struct hlist_node *ct_get_next(struct seq_file *seq,
52 struct hlist_node *head)
58{ 53{
59 struct ct_iter_state *st = seq->private; 54 struct ct_iter_state *st = seq->private;
60 55
61 head = head->next; 56 head = head->next;
62 while (head == &nf_conntrack_hash[st->bucket]) { 57 while (head == NULL) {
63 if (++st->bucket >= nf_conntrack_htable_size) 58 if (++st->bucket >= nf_conntrack_htable_size)
64 return NULL; 59 return NULL;
65 head = nf_conntrack_hash[st->bucket].next; 60 head = nf_conntrack_hash[st->bucket].first;
66 } 61 }
67 return head; 62 return head;
68} 63}
69 64
70static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos) 65static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
71{ 66{
72 struct list_head *head = ct_get_first(seq); 67 struct hlist_node *head = ct_get_first(seq);
73 68
74 if (head) 69 if (head)
75 while (pos && (head = ct_get_next(seq, head))) 70 while (pos && (head = ct_get_next(seq, head)))
@@ -169,7 +164,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
169 return 0; 164 return 0;
170} 165}
171 166
172static struct seq_operations ct_seq_ops = { 167static const struct seq_operations ct_seq_ops = {
173 .start = ct_seq_start, 168 .start = ct_seq_start,
174 .next = ct_seq_next, 169 .next = ct_seq_next,
175 .stop = ct_seq_stop, 170 .stop = ct_seq_stop,
@@ -206,47 +201,68 @@ static const struct file_operations ct_file_ops = {
206}; 201};
207 202
208/* expects */ 203/* expects */
209static void *exp_seq_start(struct seq_file *s, loff_t *pos) 204struct ct_expect_iter_state {
205 unsigned int bucket;
206};
207
208static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
210{ 209{
211 struct list_head *e = &nf_conntrack_expect_list; 210 struct ct_expect_iter_state *st = seq->private;
212 loff_t i;
213 211
214 /* strange seq_file api calls stop even if we fail, 212 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
215 * thus we need to grab lock since stop unlocks */ 213 if (!hlist_empty(&nf_ct_expect_hash[st->bucket]))
216 read_lock_bh(&nf_conntrack_lock); 214 return nf_ct_expect_hash[st->bucket].first;
215 }
216 return NULL;
217}
217 218
218 if (list_empty(e)) 219static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
219 return NULL; 220 struct hlist_node *head)
221{
222 struct ct_expect_iter_state *st = seq->private;
220 223
221 for (i = 0; i <= *pos; i++) { 224 head = head->next;
222 e = e->next; 225 while (head == NULL) {
223 if (e == &nf_conntrack_expect_list) 226 if (++st->bucket >= nf_ct_expect_hsize)
224 return NULL; 227 return NULL;
228 head = nf_ct_expect_hash[st->bucket].first;
225 } 229 }
226 return e; 230 return head;
227} 231}
228 232
229static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) 233static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
230{ 234{
231 struct list_head *e = v; 235 struct hlist_node *head = ct_expect_get_first(seq);
232 236
233 ++*pos; 237 if (head)
234 e = e->next; 238 while (pos && (head = ct_expect_get_next(seq, head)))
239 pos--;
240 return pos ? NULL : head;
241}
235 242
236 if (e == &nf_conntrack_expect_list) 243static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
237 return NULL; 244{
245 read_lock_bh(&nf_conntrack_lock);
246 return ct_expect_get_idx(seq, *pos);
247}
238 248
239 return e; 249static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
250{
251 (*pos)++;
252 return ct_expect_get_next(seq, v);
240} 253}
241 254
242static void exp_seq_stop(struct seq_file *s, void *v) 255static void exp_seq_stop(struct seq_file *seq, void *v)
243{ 256{
244 read_unlock_bh(&nf_conntrack_lock); 257 read_unlock_bh(&nf_conntrack_lock);
245} 258}
246 259
247static int exp_seq_show(struct seq_file *s, void *v) 260static int exp_seq_show(struct seq_file *s, void *v)
248{ 261{
249 struct nf_conntrack_expect *exp = v; 262 struct nf_conntrack_expect *exp;
263 struct hlist_node *n = v;
264
265 exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
250 266
251 if (exp->tuple.src.l3num != AF_INET) 267 if (exp->tuple.src.l3num != AF_INET)
252 return 0; 268 return 0;
@@ -266,7 +282,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
266 return seq_putc(s, '\n'); 282 return seq_putc(s, '\n');
267} 283}
268 284
269static struct seq_operations exp_seq_ops = { 285static const struct seq_operations exp_seq_ops = {
270 .start = exp_seq_start, 286 .start = exp_seq_start,
271 .next = exp_seq_next, 287 .next = exp_seq_next,
272 .stop = exp_seq_stop, 288 .stop = exp_seq_stop,
@@ -275,7 +291,23 @@ static struct seq_operations exp_seq_ops = {
275 291
276static int exp_open(struct inode *inode, struct file *file) 292static int exp_open(struct inode *inode, struct file *file)
277{ 293{
278 return seq_open(file, &exp_seq_ops); 294 struct seq_file *seq;
295 struct ct_expect_iter_state *st;
296 int ret;
297
298 st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
299 if (st == NULL)
300 return -ENOMEM;
301 ret = seq_open(file, &exp_seq_ops);
302 if (ret)
303 goto out_free;
304 seq = file->private_data;
305 seq->private = st;
306 memset(st, 0, sizeof(struct ct_expect_iter_state));
307 return ret;
308out_free:
309 kfree(st);
310 return ret;
279} 311}
280 312
281static const struct file_operations ip_exp_file_ops = { 313static const struct file_operations ip_exp_file_ops = {
@@ -283,7 +315,7 @@ static const struct file_operations ip_exp_file_ops = {
283 .open = exp_open, 315 .open = exp_open,
284 .read = seq_read, 316 .read = seq_read,
285 .llseek = seq_lseek, 317 .llseek = seq_lseek,
286 .release = seq_release 318 .release = seq_release_private,
287}; 319};
288 320
289static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) 321static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
@@ -354,7 +386,7 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
354 return 0; 386 return 0;
355} 387}
356 388
357static struct seq_operations ct_cpu_seq_ops = { 389static const struct seq_operations ct_cpu_seq_ops = {
358 .start = ct_cpu_seq_start, 390 .start = ct_cpu_seq_start,
359 .next = ct_cpu_seq_next, 391 .next = ct_cpu_seq_next,
360 .stop = ct_cpu_seq_stop, 392 .stop = ct_cpu_seq_stop,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index f4fc657c1983..0fe8fb0466ef 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -21,12 +21,6 @@
21 21
22static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ; 22static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
23 23
24#if 0
25#define DEBUGP printk
26#else
27#define DEBUGP(format, args...)
28#endif
29
30static int icmp_pkt_to_tuple(const struct sk_buff *skb, 24static int icmp_pkt_to_tuple(const struct sk_buff *skb,
31 unsigned int dataoff, 25 unsigned int dataoff,
32 struct nf_conntrack_tuple *tuple) 26 struct nf_conntrack_tuple *tuple)
@@ -125,8 +119,8 @@ static int icmp_new(struct nf_conn *conntrack,
125 if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) 119 if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new)
126 || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) { 120 || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) {
127 /* Can't create a new ICMP `conn' with this. */ 121 /* Can't create a new ICMP `conn' with this. */
128 DEBUGP("icmp: can't create new conn with type %u\n", 122 pr_debug("icmp: can't create new conn with type %u\n",
129 conntrack->tuplehash[0].tuple.dst.u.icmp.type); 123 conntrack->tuplehash[0].tuple.dst.u.icmp.type);
130 NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple); 124 NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple);
131 return 0; 125 return 0;
132 } 126 }
@@ -159,8 +153,8 @@ icmp_error_message(struct sk_buff *skb,
159 153
160 /* Ignore ICMP's containing fragments (shouldn't happen) */ 154 /* Ignore ICMP's containing fragments (shouldn't happen) */
161 if (inside->ip.frag_off & htons(IP_OFFSET)) { 155 if (inside->ip.frag_off & htons(IP_OFFSET)) {
162 DEBUGP("icmp_error_message: fragment of proto %u\n", 156 pr_debug("icmp_error_message: fragment of proto %u\n",
163 inside->ip.protocol); 157 inside->ip.protocol);
164 return -NF_ACCEPT; 158 return -NF_ACCEPT;
165 } 159 }
166 160
@@ -172,8 +166,8 @@ icmp_error_message(struct sk_buff *skb,
172 if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET, 166 if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET,
173 inside->ip.protocol, &origtuple, 167 inside->ip.protocol, &origtuple,
174 &nf_conntrack_l3proto_ipv4, innerproto)) { 168 &nf_conntrack_l3proto_ipv4, innerproto)) {
175 DEBUGP("icmp_error_message: ! get_tuple p=%u", 169 pr_debug("icmp_error_message: ! get_tuple p=%u",
176 inside->ip.protocol); 170 inside->ip.protocol);
177 return -NF_ACCEPT; 171 return -NF_ACCEPT;
178 } 172 }
179 173
@@ -181,22 +175,22 @@ icmp_error_message(struct sk_buff *skb,
181 been preserved inside the ICMP. */ 175 been preserved inside the ICMP. */
182 if (!nf_ct_invert_tuple(&innertuple, &origtuple, 176 if (!nf_ct_invert_tuple(&innertuple, &origtuple,
183 &nf_conntrack_l3proto_ipv4, innerproto)) { 177 &nf_conntrack_l3proto_ipv4, innerproto)) {
184 DEBUGP("icmp_error_message: no match\n"); 178 pr_debug("icmp_error_message: no match\n");
185 return -NF_ACCEPT; 179 return -NF_ACCEPT;
186 } 180 }
187 181
188 *ctinfo = IP_CT_RELATED; 182 *ctinfo = IP_CT_RELATED;
189 183
190 h = nf_conntrack_find_get(&innertuple, NULL); 184 h = nf_conntrack_find_get(&innertuple);
191 if (!h) { 185 if (!h) {
192 /* Locally generated ICMPs will match inverted if they 186 /* Locally generated ICMPs will match inverted if they
193 haven't been SNAT'ed yet */ 187 haven't been SNAT'ed yet */
194 /* FIXME: NAT code has to handle half-done double NAT --RR */ 188 /* FIXME: NAT code has to handle half-done double NAT --RR */
195 if (hooknum == NF_IP_LOCAL_OUT) 189 if (hooknum == NF_IP_LOCAL_OUT)
196 h = nf_conntrack_find_get(&origtuple, NULL); 190 h = nf_conntrack_find_get(&origtuple);
197 191
198 if (!h) { 192 if (!h) {
199 DEBUGP("icmp_error_message: no match\n"); 193 pr_debug("icmp_error_message: no match\n");
200 return -NF_ACCEPT; 194 return -NF_ACCEPT;
201 } 195 }
202 196
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 0f17098917bc..bd93a1d71052 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -45,7 +45,7 @@ static unsigned int help(struct sk_buff **pskb,
45 /* Try to get same port: if not, try to change it. */ 45 /* Try to get same port: if not, try to change it. */
46 for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { 46 for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
47 exp->tuple.dst.u.tcp.port = htons(port); 47 exp->tuple.dst.u.tcp.port = htons(port);
48 if (nf_conntrack_expect_related(exp) == 0) 48 if (nf_ct_expect_related(exp) == 0)
49 break; 49 break;
50 } 50 }
51 51
@@ -57,7 +57,7 @@ static unsigned int help(struct sk_buff **pskb,
57 matchoff, matchlen, 57 matchoff, matchlen,
58 buffer, strlen(buffer)); 58 buffer, strlen(buffer));
59 if (ret != NF_ACCEPT) 59 if (ret != NF_ACCEPT)
60 nf_conntrack_unexpect_related(exp); 60 nf_ct_unexpect_related(exp);
61 return ret; 61 return ret;
62} 62}
63 63
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index ea02f00d2dac..e848d8d6292f 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -12,7 +12,6 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/timer.h> 13#include <linux/timer.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/vmalloc.h>
16#include <net/checksum.h> 15#include <net/checksum.h>
17#include <net/icmp.h> 16#include <net/icmp.h>
18#include <net/ip.h> 17#include <net/ip.h>
@@ -32,20 +31,15 @@
32#include <net/netfilter/nf_conntrack_l3proto.h> 31#include <net/netfilter/nf_conntrack_l3proto.h>
33#include <net/netfilter/nf_conntrack_l4proto.h> 32#include <net/netfilter/nf_conntrack_l4proto.h>
34 33
35#if 0
36#define DEBUGP printk
37#else
38#define DEBUGP(format, args...)
39#endif
40
41static DEFINE_RWLOCK(nf_nat_lock); 34static DEFINE_RWLOCK(nf_nat_lock);
42 35
43static struct nf_conntrack_l3proto *l3proto = NULL; 36static struct nf_conntrack_l3proto *l3proto = NULL;
44 37
45/* Calculated at init based on memory size */ 38/* Calculated at init based on memory size */
46static unsigned int nf_nat_htable_size; 39static unsigned int nf_nat_htable_size;
40static int nf_nat_vmalloced;
47 41
48static struct list_head *bysource; 42static struct hlist_head *bysource;
49 43
50#define MAX_IP_NAT_PROTO 256 44#define MAX_IP_NAT_PROTO 256
51static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]; 45static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO];
@@ -87,19 +81,6 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
87 tuple->dst.protonum, 0) % nf_nat_htable_size; 81 tuple->dst.protonum, 0) % nf_nat_htable_size;
88} 82}
89 83
90/* Noone using conntrack by the time this called. */
91static void nf_nat_cleanup_conntrack(struct nf_conn *conn)
92{
93 struct nf_conn_nat *nat;
94 if (!(conn->status & IPS_NAT_DONE_MASK))
95 return;
96
97 nat = nfct_nat(conn);
98 write_lock_bh(&nf_nat_lock);
99 list_del(&nat->info.bysource);
100 write_unlock_bh(&nf_nat_lock);
101}
102
103/* Is this tuple already taken? (not by us) */ 84/* Is this tuple already taken? (not by us) */
104int 85int
105nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 86nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
@@ -166,10 +147,11 @@ find_appropriate_src(const struct nf_conntrack_tuple *tuple,
166 unsigned int h = hash_by_src(tuple); 147 unsigned int h = hash_by_src(tuple);
167 struct nf_conn_nat *nat; 148 struct nf_conn_nat *nat;
168 struct nf_conn *ct; 149 struct nf_conn *ct;
150 struct hlist_node *n;
169 151
170 read_lock_bh(&nf_nat_lock); 152 read_lock_bh(&nf_nat_lock);
171 list_for_each_entry(nat, &bysource[h], info.bysource) { 153 hlist_for_each_entry(nat, n, &bysource[h], bysource) {
172 ct = (struct nf_conn *)((char *)nat - offsetof(struct nf_conn, data)); 154 ct = nat->ct;
173 if (same_src(ct, tuple)) { 155 if (same_src(ct, tuple)) {
174 /* Copy source part from reply tuple. */ 156 /* Copy source part from reply tuple. */
175 nf_ct_invert_tuplepr(result, 157 nf_ct_invert_tuplepr(result,
@@ -254,7 +236,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
254 manips not an issue. */ 236 manips not an issue. */
255 if (maniptype == IP_NAT_MANIP_SRC) { 237 if (maniptype == IP_NAT_MANIP_SRC) {
256 if (find_appropriate_src(orig_tuple, tuple, range)) { 238 if (find_appropriate_src(orig_tuple, tuple, range)) {
257 DEBUGP("get_unique_tuple: Found current src map\n"); 239 pr_debug("get_unique_tuple: Found current src map\n");
258 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) 240 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
259 if (!nf_nat_used_tuple(tuple, ct)) 241 if (!nf_nat_used_tuple(tuple, ct))
260 return; 242 return;
@@ -296,11 +278,20 @@ nf_nat_setup_info(struct nf_conn *ct,
296 unsigned int hooknum) 278 unsigned int hooknum)
297{ 279{
298 struct nf_conntrack_tuple curr_tuple, new_tuple; 280 struct nf_conntrack_tuple curr_tuple, new_tuple;
299 struct nf_conn_nat *nat = nfct_nat(ct); 281 struct nf_conn_nat *nat;
300 struct nf_nat_info *info = &nat->info;
301 int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK); 282 int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
302 enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); 283 enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
303 284
285 /* nat helper or nfctnetlink also setup binding */
286 nat = nfct_nat(ct);
287 if (!nat) {
288 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
289 if (nat == NULL) {
290 pr_debug("failed to add NAT extension\n");
291 return NF_ACCEPT;
292 }
293 }
294
304 NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || 295 NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING ||
305 hooknum == NF_IP_POST_ROUTING || 296 hooknum == NF_IP_POST_ROUTING ||
306 hooknum == NF_IP_LOCAL_IN || 297 hooknum == NF_IP_LOCAL_IN ||
@@ -337,7 +328,10 @@ nf_nat_setup_info(struct nf_conn *ct,
337 328
338 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 329 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
339 write_lock_bh(&nf_nat_lock); 330 write_lock_bh(&nf_nat_lock);
340 list_add(&info->bysource, &bysource[srchash]); 331 /* nf_conntrack_alter_reply might re-allocate exntension aera */
332 nat = nfct_nat(ct);
333 nat->ct = ct;
334 hlist_add_head(&nat->bysource, &bysource[srchash]);
341 write_unlock_bh(&nf_nat_lock); 335 write_unlock_bh(&nf_nat_lock);
342 } 336 }
343 337
@@ -462,8 +456,9 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
462 return 0; 456 return 0;
463 } 457 }
464 458
465 DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", 459 pr_debug("icmp_reply_translation: translating error %p manip %u "
466 *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); 460 "dir %s\n", *pskb, manip,
461 dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
467 462
468 /* rcu_read_lock()ed by nf_hook_slow */ 463 /* rcu_read_lock()ed by nf_hook_slow */
469 l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); 464 l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
@@ -590,17 +585,69 @@ nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range)
590EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr); 585EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr);
591#endif 586#endif
592 587
588/* Noone using conntrack by the time this called. */
589static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
590{
591 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
592
593 if (nat == NULL || nat->ct == NULL)
594 return;
595
596 NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
597
598 write_lock_bh(&nf_nat_lock);
599 hlist_del(&nat->bysource);
600 nat->ct = NULL;
601 write_unlock_bh(&nf_nat_lock);
602}
603
604static void nf_nat_move_storage(struct nf_conn *conntrack, void *old)
605{
606 struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT);
607 struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old;
608 struct nf_conn *ct = old_nat->ct;
609 unsigned int srchash;
610
611 if (!(ct->status & IPS_NAT_DONE_MASK))
612 return;
613
614 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
615
616 write_lock_bh(&nf_nat_lock);
617 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
618 new_nat->ct = ct;
619 write_unlock_bh(&nf_nat_lock);
620}
621
622static struct nf_ct_ext_type nat_extend __read_mostly = {
623 .len = sizeof(struct nf_conn_nat),
624 .align = __alignof__(struct nf_conn_nat),
625 .destroy = nf_nat_cleanup_conntrack,
626 .move = nf_nat_move_storage,
627 .id = NF_CT_EXT_NAT,
628 .flags = NF_CT_EXT_F_PREALLOC,
629};
630
593static int __init nf_nat_init(void) 631static int __init nf_nat_init(void)
594{ 632{
595 size_t i; 633 size_t i;
634 int ret;
635
636 ret = nf_ct_extend_register(&nat_extend);
637 if (ret < 0) {
638 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
639 return ret;
640 }
596 641
597 /* Leave them the same for the moment. */ 642 /* Leave them the same for the moment. */
598 nf_nat_htable_size = nf_conntrack_htable_size; 643 nf_nat_htable_size = nf_conntrack_htable_size;
599 644
600 /* One vmalloc for both hash tables */ 645 bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
601 bysource = vmalloc(sizeof(struct list_head) * nf_nat_htable_size); 646 &nf_nat_vmalloced);
602 if (!bysource) 647 if (!bysource) {
603 return -ENOMEM; 648 ret = -ENOMEM;
649 goto cleanup_extend;
650 }
604 651
605 /* Sew in builtin protocols. */ 652 /* Sew in builtin protocols. */
606 write_lock_bh(&nf_nat_lock); 653 write_lock_bh(&nf_nat_lock);
@@ -612,18 +659,18 @@ static int __init nf_nat_init(void)
612 write_unlock_bh(&nf_nat_lock); 659 write_unlock_bh(&nf_nat_lock);
613 660
614 for (i = 0; i < nf_nat_htable_size; i++) { 661 for (i = 0; i < nf_nat_htable_size; i++) {
615 INIT_LIST_HEAD(&bysource[i]); 662 INIT_HLIST_HEAD(&bysource[i]);
616 } 663 }
617 664
618 /* FIXME: Man, this is a hack. <SIGH> */
619 NF_CT_ASSERT(rcu_dereference(nf_conntrack_destroyed) == NULL);
620 rcu_assign_pointer(nf_conntrack_destroyed, nf_nat_cleanup_conntrack);
621
622 /* Initialize fake conntrack so that NAT will skip it */ 665 /* Initialize fake conntrack so that NAT will skip it */
623 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; 666 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
624 667
625 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 668 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
626 return 0; 669 return 0;
670
671 cleanup_extend:
672 nf_ct_extend_unregister(&nat_extend);
673 return ret;
627} 674}
628 675
629/* Clear NAT section of all conntracks, in case we're loaded again. */ 676/* Clear NAT section of all conntracks, in case we're loaded again. */
@@ -641,10 +688,10 @@ static int clean_nat(struct nf_conn *i, void *data)
641static void __exit nf_nat_cleanup(void) 688static void __exit nf_nat_cleanup(void)
642{ 689{
643 nf_ct_iterate_cleanup(&clean_nat, NULL); 690 nf_ct_iterate_cleanup(&clean_nat, NULL);
644 rcu_assign_pointer(nf_conntrack_destroyed, NULL);
645 synchronize_rcu(); 691 synchronize_rcu();
646 vfree(bysource); 692 nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
647 nf_ct_l3proto_put(l3proto); 693 nf_ct_l3proto_put(l3proto);
694 nf_ct_extend_unregister(&nat_extend);
648} 695}
649 696
650MODULE_LICENSE("GPL"); 697MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index e6bc8e5a72f1..3663bd879c39 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -25,12 +25,6 @@ MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
25MODULE_DESCRIPTION("ftp NAT helper"); 25MODULE_DESCRIPTION("ftp NAT helper");
26MODULE_ALIAS("ip_nat_ftp"); 26MODULE_ALIAS("ip_nat_ftp");
27 27
28#if 0
29#define DEBUGP printk
30#else
31#define DEBUGP(format, args...)
32#endif
33
34/* FIXME: Time out? --RR */ 28/* FIXME: Time out? --RR */
35 29
36static int 30static int
@@ -47,7 +41,7 @@ mangle_rfc959_packet(struct sk_buff **pskb,
47 sprintf(buffer, "%u,%u,%u,%u,%u,%u", 41 sprintf(buffer, "%u,%u,%u,%u,%u,%u",
48 NIPQUAD(newip), port>>8, port&0xFF); 42 NIPQUAD(newip), port>>8, port&0xFF);
49 43
50 DEBUGP("calling nf_nat_mangle_tcp_packet\n"); 44 pr_debug("calling nf_nat_mangle_tcp_packet\n");
51 45
52 return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, 46 return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
53 matchlen, buffer, strlen(buffer)); 47 matchlen, buffer, strlen(buffer));
@@ -67,7 +61,7 @@ mangle_eprt_packet(struct sk_buff **pskb,
67 61
68 sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port); 62 sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
69 63
70 DEBUGP("calling nf_nat_mangle_tcp_packet\n"); 64 pr_debug("calling nf_nat_mangle_tcp_packet\n");
71 65
72 return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, 66 return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
73 matchlen, buffer, strlen(buffer)); 67 matchlen, buffer, strlen(buffer));
@@ -87,7 +81,7 @@ mangle_epsv_packet(struct sk_buff **pskb,
87 81
88 sprintf(buffer, "|||%u|", port); 82 sprintf(buffer, "|||%u|", port);
89 83
90 DEBUGP("calling nf_nat_mangle_tcp_packet\n"); 84 pr_debug("calling nf_nat_mangle_tcp_packet\n");
91 85
92 return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, 86 return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
93 matchlen, buffer, strlen(buffer)); 87 matchlen, buffer, strlen(buffer));
@@ -117,7 +111,7 @@ static unsigned int nf_nat_ftp(struct sk_buff **pskb,
117 int dir = CTINFO2DIR(ctinfo); 111 int dir = CTINFO2DIR(ctinfo);
118 struct nf_conn *ct = exp->master; 112 struct nf_conn *ct = exp->master;
119 113
120 DEBUGP("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); 114 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
121 115
122 /* Connection will come from wherever this packet goes, hence !dir */ 116 /* Connection will come from wherever this packet goes, hence !dir */
123 newip = ct->tuplehash[!dir].tuple.dst.u3.ip; 117 newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
@@ -131,7 +125,7 @@ static unsigned int nf_nat_ftp(struct sk_buff **pskb,
131 /* Try to get same port: if not, try to change it. */ 125 /* Try to get same port: if not, try to change it. */
132 for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { 126 for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
133 exp->tuple.dst.u.tcp.port = htons(port); 127 exp->tuple.dst.u.tcp.port = htons(port);
134 if (nf_conntrack_expect_related(exp) == 0) 128 if (nf_ct_expect_related(exp) == 0)
135 break; 129 break;
136 } 130 }
137 131
@@ -139,7 +133,7 @@ static unsigned int nf_nat_ftp(struct sk_buff **pskb,
139 return NF_DROP; 133 return NF_DROP;
140 134
141 if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo)) { 135 if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo)) {
142 nf_conntrack_unexpect_related(exp); 136 nf_ct_unexpect_related(exp);
143 return NF_DROP; 137 return NF_DROP;
144 } 138 }
145 return NF_ACCEPT; 139 return NF_ACCEPT;
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index c5d2a2d690b8..c1b059a73708 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -21,12 +21,6 @@
21#include <net/netfilter/nf_conntrack_expect.h> 21#include <net/netfilter/nf_conntrack_expect.h>
22#include <linux/netfilter/nf_conntrack_h323.h> 22#include <linux/netfilter/nf_conntrack_h323.h>
23 23
24#if 0
25#define DEBUGP printk
26#else
27#define DEBUGP(format, args...)
28#endif
29
30/****************************************************************************/ 24/****************************************************************************/
31static int set_addr(struct sk_buff **pskb, 25static int set_addr(struct sk_buff **pskb,
32 unsigned char **data, int dataoff, 26 unsigned char **data, int dataoff,
@@ -126,12 +120,11 @@ static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct,
126 (ntohl(addr.ip) & 0xff000000) == 0x7f000000) 120 (ntohl(addr.ip) & 0xff000000) == 0x7f000000)
127 i = 0; 121 i = 0;
128 122
129 DEBUGP 123 pr_debug("nf_nat_ras: set signal address "
130 ("nf_nat_ras: set signal address " 124 "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
131 "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 125 NIPQUAD(addr.ip), port,
132 NIPQUAD(ip), port, 126 NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip),
133 NIPQUAD(ct->tuplehash[!dir].tuple.dst. 127 info->sig_port[!dir]);
134 ip), info->sig_port[!dir]);
135 return set_h225_addr(pskb, data, 0, &taddr[i], 128 return set_h225_addr(pskb, data, 0, &taddr[i],
136 &ct->tuplehash[!dir]. 129 &ct->tuplehash[!dir].
137 tuple.dst.u3, 130 tuple.dst.u3,
@@ -139,12 +132,11 @@ static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct,
139 } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && 132 } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
140 port == info->sig_port[dir]) { 133 port == info->sig_port[dir]) {
141 /* GK->GW */ 134 /* GK->GW */
142 DEBUGP 135 pr_debug("nf_nat_ras: set signal address "
143 ("nf_nat_ras: set signal address " 136 "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
144 "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 137 NIPQUAD(addr.ip), port,
145 NIPQUAD(ip), port, 138 NIPQUAD(ct->tuplehash[!dir].tuple.src.u3.ip),
146 NIPQUAD(ct->tuplehash[!dir].tuple.src. 139 info->sig_port[!dir]);
147 ip), info->sig_port[!dir]);
148 return set_h225_addr(pskb, data, 0, &taddr[i], 140 return set_h225_addr(pskb, data, 0, &taddr[i],
149 &ct->tuplehash[!dir]. 141 &ct->tuplehash[!dir].
150 tuple.src.u3, 142 tuple.src.u3,
@@ -171,12 +163,11 @@ static int set_ras_addr(struct sk_buff **pskb, struct nf_conn *ct,
171 if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && 163 if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) &&
172 addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && 164 addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
173 port == ct->tuplehash[dir].tuple.src.u.udp.port) { 165 port == ct->tuplehash[dir].tuple.src.u.udp.port) {
174 DEBUGP("nf_nat_ras: set rasAddress " 166 pr_debug("nf_nat_ras: set rasAddress "
175 "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 167 "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
176 NIPQUAD(ip), ntohs(port), 168 NIPQUAD(addr.ip), ntohs(port),
177 NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), 169 NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip),
178 ntohs(ct->tuplehash[!dir].tuple.dst.u.udp. 170 ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port));
179 port));
180 return set_h225_addr(pskb, data, 0, &taddr[i], 171 return set_h225_addr(pskb, data, 0, &taddr[i],
181 &ct->tuplehash[!dir].tuple.dst.u3, 172 &ct->tuplehash[!dir].tuple.dst.u3,
182 ct->tuplehash[!dir].tuple. 173 ct->tuplehash[!dir].tuple.
@@ -237,12 +228,12 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
237 for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port); 228 for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port);
238 nated_port != 0; nated_port += 2) { 229 nated_port != 0; nated_port += 2) {
239 rtp_exp->tuple.dst.u.udp.port = htons(nated_port); 230 rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
240 if (nf_conntrack_expect_related(rtp_exp) == 0) { 231 if (nf_ct_expect_related(rtp_exp) == 0) {
241 rtcp_exp->tuple.dst.u.udp.port = 232 rtcp_exp->tuple.dst.u.udp.port =
242 htons(nated_port + 1); 233 htons(nated_port + 1);
243 if (nf_conntrack_expect_related(rtcp_exp) == 0) 234 if (nf_ct_expect_related(rtcp_exp) == 0)
244 break; 235 break;
245 nf_conntrack_unexpect_related(rtp_exp); 236 nf_ct_unexpect_related(rtp_exp);
246 } 237 }
247 } 238 }
248 239
@@ -261,22 +252,22 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
261 info->rtp_port[i][dir] = rtp_port; 252 info->rtp_port[i][dir] = rtp_port;
262 info->rtp_port[i][!dir] = htons(nated_port); 253 info->rtp_port[i][!dir] = htons(nated_port);
263 } else { 254 } else {
264 nf_conntrack_unexpect_related(rtp_exp); 255 nf_ct_unexpect_related(rtp_exp);
265 nf_conntrack_unexpect_related(rtcp_exp); 256 nf_ct_unexpect_related(rtcp_exp);
266 return -1; 257 return -1;
267 } 258 }
268 259
269 /* Success */ 260 /* Success */
270 DEBUGP("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 261 pr_debug("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
271 NIPQUAD(rtp_exp->tuple.src.ip), 262 NIPQUAD(rtp_exp->tuple.src.u3.ip),
272 ntohs(rtp_exp->tuple.src.u.udp.port), 263 ntohs(rtp_exp->tuple.src.u.udp.port),
273 NIPQUAD(rtp_exp->tuple.dst.ip), 264 NIPQUAD(rtp_exp->tuple.dst.u3.ip),
274 ntohs(rtp_exp->tuple.dst.u.udp.port)); 265 ntohs(rtp_exp->tuple.dst.u.udp.port));
275 DEBUGP("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 266 pr_debug("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
276 NIPQUAD(rtcp_exp->tuple.src.ip), 267 NIPQUAD(rtcp_exp->tuple.src.u3.ip),
277 ntohs(rtcp_exp->tuple.src.u.udp.port), 268 ntohs(rtcp_exp->tuple.src.u.udp.port),
278 NIPQUAD(rtcp_exp->tuple.dst.ip), 269 NIPQUAD(rtcp_exp->tuple.dst.u3.ip),
279 ntohs(rtcp_exp->tuple.dst.u.udp.port)); 270 ntohs(rtcp_exp->tuple.dst.u.udp.port));
280 271
281 return 0; 272 return 0;
282} 273}
@@ -299,7 +290,7 @@ static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct,
299 /* Try to get same port: if not, try to change it. */ 290 /* Try to get same port: if not, try to change it. */
300 for (; nated_port != 0; nated_port++) { 291 for (; nated_port != 0; nated_port++) {
301 exp->tuple.dst.u.tcp.port = htons(nated_port); 292 exp->tuple.dst.u.tcp.port = htons(nated_port);
302 if (nf_conntrack_expect_related(exp) == 0) 293 if (nf_ct_expect_related(exp) == 0)
303 break; 294 break;
304 } 295 }
305 296
@@ -313,13 +304,15 @@ static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct,
313 if (set_h245_addr(pskb, data, dataoff, taddr, 304 if (set_h245_addr(pskb, data, dataoff, taddr,
314 &ct->tuplehash[!dir].tuple.dst.u3, 305 &ct->tuplehash[!dir].tuple.dst.u3,
315 htons(nated_port)) < 0) { 306 htons(nated_port)) < 0) {
316 nf_conntrack_unexpect_related(exp); 307 nf_ct_unexpect_related(exp);
317 return -1; 308 return -1;
318 } 309 }
319 310
320 DEBUGP("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 311 pr_debug("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
321 NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), 312 NIPQUAD(exp->tuple.src.u3.ip),
322 NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); 313 ntohs(exp->tuple.src.u.tcp.port),
314 NIPQUAD(exp->tuple.dst.u3.ip),
315 ntohs(exp->tuple.dst.u.tcp.port));
323 316
324 return 0; 317 return 0;
325} 318}
@@ -347,7 +340,7 @@ static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct,
347 /* Try to get same port: if not, try to change it. */ 340 /* Try to get same port: if not, try to change it. */
348 for (; nated_port != 0; nated_port++) { 341 for (; nated_port != 0; nated_port++) {
349 exp->tuple.dst.u.tcp.port = htons(nated_port); 342 exp->tuple.dst.u.tcp.port = htons(nated_port);
350 if (nf_conntrack_expect_related(exp) == 0) 343 if (nf_ct_expect_related(exp) == 0)
351 break; 344 break;
352 } 345 }
353 346
@@ -365,13 +358,15 @@ static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct,
365 info->sig_port[dir] = port; 358 info->sig_port[dir] = port;
366 info->sig_port[!dir] = htons(nated_port); 359 info->sig_port[!dir] = htons(nated_port);
367 } else { 360 } else {
368 nf_conntrack_unexpect_related(exp); 361 nf_ct_unexpect_related(exp);
369 return -1; 362 return -1;
370 } 363 }
371 364
372 DEBUGP("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 365 pr_debug("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
373 NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), 366 NIPQUAD(exp->tuple.src.u3.ip),
374 NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); 367 ntohs(exp->tuple.src.u.tcp.port),
368 NIPQUAD(exp->tuple.dst.u3.ip),
369 ntohs(exp->tuple.dst.u.tcp.port));
375 370
376 return 0; 371 return 0;
377} 372}
@@ -433,7 +428,7 @@ static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct,
433 /* Try to get same port: if not, try to change it. */ 428 /* Try to get same port: if not, try to change it. */
434 for (; nated_port != 0; nated_port++) { 429 for (; nated_port != 0; nated_port++) {
435 exp->tuple.dst.u.tcp.port = htons(nated_port); 430 exp->tuple.dst.u.tcp.port = htons(nated_port);
436 if (nf_conntrack_expect_related(exp) == 0) 431 if (nf_ct_expect_related(exp) == 0)
437 break; 432 break;
438 } 433 }
439 434
@@ -460,14 +455,16 @@ static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct,
460 info->sig_port[!dir]); 455 info->sig_port[!dir]);
461 } 456 }
462 } else { 457 } else {
463 nf_conntrack_unexpect_related(exp); 458 nf_ct_unexpect_related(exp);
464 return -1; 459 return -1;
465 } 460 }
466 461
467 /* Success */ 462 /* Success */
468 DEBUGP("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 463 pr_debug("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
469 NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), 464 NIPQUAD(exp->tuple.src.u3.ip),
470 NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); 465 ntohs(exp->tuple.src.u.tcp.port),
466 NIPQUAD(exp->tuple.dst.u3.ip),
467 ntohs(exp->tuple.dst.u.tcp.port));
471 468
472 return 0; 469 return 0;
473} 470}
@@ -517,7 +514,7 @@ static int nat_callforwarding(struct sk_buff **pskb, struct nf_conn *ct,
517 /* Try to get same port: if not, try to change it. */ 514 /* Try to get same port: if not, try to change it. */
518 for (nated_port = ntohs(port); nated_port != 0; nated_port++) { 515 for (nated_port = ntohs(port); nated_port != 0; nated_port++) {
519 exp->tuple.dst.u.tcp.port = htons(nated_port); 516 exp->tuple.dst.u.tcp.port = htons(nated_port);
520 if (nf_conntrack_expect_related(exp) == 0) 517 if (nf_ct_expect_related(exp) == 0)
521 break; 518 break;
522 } 519 }
523 520
@@ -531,15 +528,17 @@ static int nat_callforwarding(struct sk_buff **pskb, struct nf_conn *ct,
531 if (!set_h225_addr(pskb, data, dataoff, taddr, 528 if (!set_h225_addr(pskb, data, dataoff, taddr,
532 &ct->tuplehash[!dir].tuple.dst.u3, 529 &ct->tuplehash[!dir].tuple.dst.u3,
533 htons(nated_port)) == 0) { 530 htons(nated_port)) == 0) {
534 nf_conntrack_unexpect_related(exp); 531 nf_ct_unexpect_related(exp);
535 return -1; 532 return -1;
536 } 533 }
537 534
538 /* Success */ 535 /* Success */
539 DEBUGP("nf_nat_q931: expect Call Forwarding " 536 pr_debug("nf_nat_q931: expect Call Forwarding "
540 "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", 537 "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
541 NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), 538 NIPQUAD(exp->tuple.src.u3.ip),
542 NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); 539 ntohs(exp->tuple.src.u.tcp.port),
540 NIPQUAD(exp->tuple.dst.u3.ip),
541 ntohs(exp->tuple.dst.u.tcp.port));
543 542
544 return 0; 543 return 0;
545} 544}
@@ -566,8 +565,6 @@ static int __init init(void)
566 rcu_assign_pointer(nat_h245_hook, nat_h245); 565 rcu_assign_pointer(nat_h245_hook, nat_h245);
567 rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); 566 rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding);
568 rcu_assign_pointer(nat_q931_hook, nat_q931); 567 rcu_assign_pointer(nat_q931_hook, nat_q931);
569
570 DEBUGP("nf_nat_h323: init success\n");
571 return 0; 568 return 0;
572} 569}
573 570
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 15b6e5ce3a04..93d8a0a8f035 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -26,13 +26,9 @@
26#include <net/netfilter/nf_nat_core.h> 26#include <net/netfilter/nf_nat_core.h>
27#include <net/netfilter/nf_nat_helper.h> 27#include <net/netfilter/nf_nat_helper.h>
28 28
29#if 0 29#define DUMP_OFFSET(x) \
30#define DEBUGP printk 30 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
31#define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos); 31 x->offset_before, x->offset_after, x->correction_pos);
32#else
33#define DEBUGP(format, args...)
34#define DUMP_OFFSET(x)
35#endif
36 32
37static DEFINE_SPINLOCK(nf_nat_seqofs_lock); 33static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
38 34
@@ -47,15 +43,15 @@ adjust_tcp_sequence(u32 seq,
47 struct nf_nat_seq *this_way, *other_way; 43 struct nf_nat_seq *this_way, *other_way;
48 struct nf_conn_nat *nat = nfct_nat(ct); 44 struct nf_conn_nat *nat = nfct_nat(ct);
49 45
50 DEBUGP("nf_nat_resize_packet: old_size = %u, new_size = %u\n", 46 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
51 (*skb)->len, new_size); 47 ntohl(seq), seq);
52 48
53 dir = CTINFO2DIR(ctinfo); 49 dir = CTINFO2DIR(ctinfo);
54 50
55 this_way = &nat->info.seq[dir]; 51 this_way = &nat->seq[dir];
56 other_way = &nat->info.seq[!dir]; 52 other_way = &nat->seq[!dir];
57 53
58 DEBUGP("nf_nat_resize_packet: Seq_offset before: "); 54 pr_debug("nf_nat_resize_packet: Seq_offset before: ");
59 DUMP_OFFSET(this_way); 55 DUMP_OFFSET(this_way);
60 56
61 spin_lock_bh(&nf_nat_seqofs_lock); 57 spin_lock_bh(&nf_nat_seqofs_lock);
@@ -72,7 +68,7 @@ adjust_tcp_sequence(u32 seq,
72 } 68 }
73 spin_unlock_bh(&nf_nat_seqofs_lock); 69 spin_unlock_bh(&nf_nat_seqofs_lock);
74 70
75 DEBUGP("nf_nat_resize_packet: Seq_offset after: "); 71 pr_debug("nf_nat_resize_packet: Seq_offset after: ");
76 DUMP_OFFSET(this_way); 72 DUMP_OFFSET(this_way);
77} 73}
78 74
@@ -100,14 +96,12 @@ static void mangle_contents(struct sk_buff *skb,
100 96
101 /* update skb info */ 97 /* update skb info */
102 if (rep_len > match_len) { 98 if (rep_len > match_len) {
103 DEBUGP("nf_nat_mangle_packet: Extending packet by " 99 pr_debug("nf_nat_mangle_packet: Extending packet by "
104 "%u from %u bytes\n", rep_len - match_len, 100 "%u from %u bytes\n", rep_len - match_len, skb->len);
105 skb->len);
106 skb_put(skb, rep_len - match_len); 101 skb_put(skb, rep_len - match_len);
107 } else { 102 } else {
108 DEBUGP("nf_nat_mangle_packet: Shrinking packet from " 103 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
109 "%u from %u bytes\n", match_len - rep_len, 104 "%u from %u bytes\n", match_len - rep_len, skb->len);
110 skb->len);
111 __skb_trim(skb, skb->len + rep_len - match_len); 105 __skb_trim(skb, skb->len + rep_len - match_len);
112 } 106 }
113 107
@@ -178,7 +172,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
178 datalen = (*pskb)->len - iph->ihl*4; 172 datalen = (*pskb)->len - iph->ihl*4;
179 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { 173 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
180 if (!(rt->rt_flags & RTCF_LOCAL) && 174 if (!(rt->rt_flags & RTCF_LOCAL) &&
181 (*pskb)->dev->features & NETIF_F_ALL_CSUM) { 175 (*pskb)->dev->features & NETIF_F_V4_CSUM) {
182 (*pskb)->ip_summed = CHECKSUM_PARTIAL; 176 (*pskb)->ip_summed = CHECKSUM_PARTIAL;
183 (*pskb)->csum_start = skb_headroom(*pskb) + 177 (*pskb)->csum_start = skb_headroom(*pskb) +
184 skb_network_offset(*pskb) + 178 skb_network_offset(*pskb) +
@@ -190,7 +184,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
190 tcph->check = 0; 184 tcph->check = 0;
191 tcph->check = tcp_v4_check(datalen, 185 tcph->check = tcp_v4_check(datalen,
192 iph->saddr, iph->daddr, 186 iph->saddr, iph->daddr,
193 csum_partial((char *)tcph, 187 csum_partial(tcph,
194 datalen, 0)); 188 datalen, 0));
195 } 189 }
196 } else 190 } else
@@ -265,7 +259,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
265 259
266 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { 260 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
267 if (!(rt->rt_flags & RTCF_LOCAL) && 261 if (!(rt->rt_flags & RTCF_LOCAL) &&
268 (*pskb)->dev->features & NETIF_F_ALL_CSUM) { 262 (*pskb)->dev->features & NETIF_F_V4_CSUM) {
269 (*pskb)->ip_summed = CHECKSUM_PARTIAL; 263 (*pskb)->ip_summed = CHECKSUM_PARTIAL;
270 (*pskb)->csum_start = skb_headroom(*pskb) + 264 (*pskb)->csum_start = skb_headroom(*pskb) +
271 skb_network_offset(*pskb) + 265 skb_network_offset(*pskb) +
@@ -278,7 +272,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
278 udph->check = 0; 272 udph->check = 0;
279 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 273 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
280 datalen, IPPROTO_UDP, 274 datalen, IPPROTO_UDP,
281 csum_partial((char *)udph, 275 csum_partial(udph,
282 datalen, 0)); 276 datalen, 0));
283 if (!udph->check) 277 if (!udph->check)
284 udph->check = CSUM_MANGLED_0; 278 udph->check = CSUM_MANGLED_0;
@@ -320,9 +314,9 @@ sack_adjust(struct sk_buff *skb,
320 new_end_seq = htonl(ntohl(sack->end_seq) 314 new_end_seq = htonl(ntohl(sack->end_seq)
321 - natseq->offset_before); 315 - natseq->offset_before);
322 316
323 DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", 317 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
324 ntohl(sack->start_seq), new_start_seq, 318 ntohl(sack->start_seq), new_start_seq,
325 ntohl(sack->end_seq), new_end_seq); 319 ntohl(sack->end_seq), new_end_seq);
326 320
327 nf_proto_csum_replace4(&tcph->check, skb, 321 nf_proto_csum_replace4(&tcph->check, skb,
328 sack->start_seq, new_start_seq, 0); 322 sack->start_seq, new_start_seq, 0);
@@ -372,8 +366,7 @@ nf_nat_sack_adjust(struct sk_buff **pskb,
372 op[1] >= 2+TCPOLEN_SACK_PERBLOCK && 366 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
373 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) 367 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
374 sack_adjust(*pskb, tcph, optoff+2, 368 sack_adjust(*pskb, tcph, optoff+2,
375 optoff+op[1], 369 optoff+op[1], &nat->seq[!dir]);
376 &nat->info.seq[!dir]);
377 optoff += op[1]; 370 optoff += op[1];
378 } 371 }
379 } 372 }
@@ -394,8 +387,8 @@ nf_nat_seq_adjust(struct sk_buff **pskb,
394 387
395 dir = CTINFO2DIR(ctinfo); 388 dir = CTINFO2DIR(ctinfo);
396 389
397 this_way = &nat->info.seq[dir]; 390 this_way = &nat->seq[dir];
398 other_way = &nat->info.seq[!dir]; 391 other_way = &nat->seq[!dir];
399 392
400 if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) 393 if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph)))
401 return 0; 394 return 0;
@@ -415,9 +408,9 @@ nf_nat_seq_adjust(struct sk_buff **pskb,
415 nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0); 408 nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0);
416 nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0); 409 nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0);
417 410
418 DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n", 411 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
419 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), 412 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
420 ntohl(newack)); 413 ntohl(newack));
421 414
422 tcph->seq = newseq; 415 tcph->seq = newseq;
423 tcph->ack_seq = newack; 416 tcph->ack_seq = newack;
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index 9b8c0daea744..bcf274bba602 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -22,12 +22,6 @@
22#include <net/netfilter/nf_conntrack_expect.h> 22#include <net/netfilter/nf_conntrack_expect.h>
23#include <linux/netfilter/nf_conntrack_irc.h> 23#include <linux/netfilter/nf_conntrack_irc.h>
24 24
25#if 0
26#define DEBUGP printk
27#else
28#define DEBUGP(format, args...)
29#endif
30
31MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); 25MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
32MODULE_DESCRIPTION("IRC (DCC) NAT helper"); 26MODULE_DESCRIPTION("IRC (DCC) NAT helper");
33MODULE_LICENSE("GPL"); 27MODULE_LICENSE("GPL");
@@ -44,9 +38,6 @@ static unsigned int help(struct sk_buff **pskb,
44 u_int16_t port; 38 u_int16_t port;
45 unsigned int ret; 39 unsigned int ret;
46 40
47 DEBUGP("IRC_NAT: info (seq %u + %u) in %u\n",
48 expect->seq, exp_irc_info->len, ntohl(tcph->seq));
49
50 /* Reply comes from server. */ 41 /* Reply comes from server. */
51 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 42 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
52 exp->dir = IP_CT_DIR_REPLY; 43 exp->dir = IP_CT_DIR_REPLY;
@@ -55,7 +46,7 @@ static unsigned int help(struct sk_buff **pskb,
55 /* Try to get same port: if not, try to change it. */ 46 /* Try to get same port: if not, try to change it. */
56 for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { 47 for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
57 exp->tuple.dst.u.tcp.port = htons(port); 48 exp->tuple.dst.u.tcp.port = htons(port);
58 if (nf_conntrack_expect_related(exp) == 0) 49 if (nf_ct_expect_related(exp) == 0)
59 break; 50 break;
60 } 51 }
61 52
@@ -64,14 +55,14 @@ static unsigned int help(struct sk_buff **pskb,
64 55
65 ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); 56 ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip);
66 sprintf(buffer, "%u %u", ip, port); 57 sprintf(buffer, "%u %u", ip, port);
67 DEBUGP("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n", 58 pr_debug("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n",
68 buffer, NIPQUAD(ip), port); 59 buffer, NIPQUAD(ip), port);
69 60
70 ret = nf_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, 61 ret = nf_nat_mangle_tcp_packet(pskb, exp->master, ctinfo,
71 matchoff, matchlen, buffer, 62 matchoff, matchlen, buffer,
72 strlen(buffer)); 63 strlen(buffer));
73 if (ret != NF_ACCEPT) 64 if (ret != NF_ACCEPT)
74 nf_conntrack_unexpect_related(exp); 65 nf_ct_unexpect_related(exp);
75 return ret; 66 return ret;
76} 67}
77 68
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index a66888749ceb..984ec8308b2e 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -37,14 +37,6 @@ MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
37MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP"); 37MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
38MODULE_ALIAS("ip_nat_pptp"); 38MODULE_ALIAS("ip_nat_pptp");
39 39
40#if 0
41extern const char *pptp_msg_name[];
42#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
43 __FUNCTION__, ## args)
44#else
45#define DEBUGP(format, args...)
46#endif
47
48static void pptp_nat_expected(struct nf_conn *ct, 40static void pptp_nat_expected(struct nf_conn *ct,
49 struct nf_conntrack_expect *exp) 41 struct nf_conntrack_expect *exp)
50{ 42{
@@ -60,7 +52,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
60 52
61 /* And here goes the grand finale of corrosion... */ 53 /* And here goes the grand finale of corrosion... */
62 if (exp->dir == IP_CT_DIR_ORIGINAL) { 54 if (exp->dir == IP_CT_DIR_ORIGINAL) {
63 DEBUGP("we are PNS->PAC\n"); 55 pr_debug("we are PNS->PAC\n");
64 /* therefore, build tuple for PAC->PNS */ 56 /* therefore, build tuple for PAC->PNS */
65 t.src.l3num = AF_INET; 57 t.src.l3num = AF_INET;
66 t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; 58 t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
@@ -69,7 +61,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
69 t.dst.u.gre.key = ct_pptp_info->pns_call_id; 61 t.dst.u.gre.key = ct_pptp_info->pns_call_id;
70 t.dst.protonum = IPPROTO_GRE; 62 t.dst.protonum = IPPROTO_GRE;
71 } else { 63 } else {
72 DEBUGP("we are PAC->PNS\n"); 64 pr_debug("we are PAC->PNS\n");
73 /* build tuple for PNS->PAC */ 65 /* build tuple for PNS->PAC */
74 t.src.l3num = AF_INET; 66 t.src.l3num = AF_INET;
75 t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; 67 t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
@@ -79,15 +71,15 @@ static void pptp_nat_expected(struct nf_conn *ct,
79 t.dst.protonum = IPPROTO_GRE; 71 t.dst.protonum = IPPROTO_GRE;
80 } 72 }
81 73
82 DEBUGP("trying to unexpect other dir: "); 74 pr_debug("trying to unexpect other dir: ");
83 NF_CT_DUMP_TUPLE(&t); 75 NF_CT_DUMP_TUPLE(&t);
84 other_exp = nf_conntrack_expect_find_get(&t); 76 other_exp = nf_ct_expect_find_get(&t);
85 if (other_exp) { 77 if (other_exp) {
86 nf_conntrack_unexpect_related(other_exp); 78 nf_ct_unexpect_related(other_exp);
87 nf_conntrack_expect_put(other_exp); 79 nf_ct_expect_put(other_exp);
88 DEBUGP("success\n"); 80 pr_debug("success\n");
89 } else { 81 } else {
90 DEBUGP("not found!\n"); 82 pr_debug("not found!\n");
91 } 83 }
92 84
93 /* This must be a fresh one. */ 85 /* This must be a fresh one. */
@@ -161,9 +153,9 @@ pptp_outbound_pkt(struct sk_buff **pskb,
161 cid_off = offsetof(union pptp_ctrl_union, clrreq.callID); 153 cid_off = offsetof(union pptp_ctrl_union, clrreq.callID);
162 break; 154 break;
163 default: 155 default:
164 DEBUGP("unknown outbound packet 0x%04x:%s\n", msg, 156 pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
165 (msg <= PPTP_MSG_MAX)? 157 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
166 pptp_msg_name[msg]:pptp_msg_name[0]); 158 pptp_msg_name[0]);
167 /* fall through */ 159 /* fall through */
168 case PPTP_SET_LINK_INFO: 160 case PPTP_SET_LINK_INFO:
169 /* only need to NAT in case PAC is behind NAT box */ 161 /* only need to NAT in case PAC is behind NAT box */
@@ -179,8 +171,8 @@ pptp_outbound_pkt(struct sk_buff **pskb,
179 171
180 /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass 172 /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
181 * down to here */ 173 * down to here */
182 DEBUGP("altering call id from 0x%04x to 0x%04x\n", 174 pr_debug("altering call id from 0x%04x to 0x%04x\n",
183 ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); 175 ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));
184 176
185 /* mangle packet */ 177 /* mangle packet */
186 if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, 178 if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
@@ -255,8 +247,9 @@ pptp_inbound_pkt(struct sk_buff **pskb,
255 pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); 247 pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
256 break; 248 break;
257 default: 249 default:
258 DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)? 250 pr_debug("unknown inbound packet %s\n",
259 pptp_msg_name[msg]:pptp_msg_name[0]); 251 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
252 pptp_msg_name[0]);
260 /* fall through */ 253 /* fall through */
261 case PPTP_START_SESSION_REQUEST: 254 case PPTP_START_SESSION_REQUEST:
262 case PPTP_START_SESSION_REPLY: 255 case PPTP_START_SESSION_REPLY:
@@ -272,8 +265,8 @@ pptp_inbound_pkt(struct sk_buff **pskb,
272 * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */ 265 * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
273 266
274 /* mangle packet */ 267 /* mangle packet */
275 DEBUGP("altering peer call id from 0x%04x to 0x%04x\n", 268 pr_debug("altering peer call id from 0x%04x to 0x%04x\n",
276 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); 269 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
277 270
278 if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, 271 if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
279 pcid_off + sizeof(struct pptp_pkt_hdr) + 272 pcid_off + sizeof(struct pptp_pkt_hdr) +
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index c3908bc5a709..2e40cc83526a 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -36,13 +36,6 @@ MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); 36MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
37MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); 37MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
38 38
39#if 0
40#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
41 __FUNCTION__, ## args)
42#else
43#define DEBUGP(x, args...)
44#endif
45
46/* is key in given range between min and max */ 39/* is key in given range between min and max */
47static int 40static int
48gre_in_range(const struct nf_conntrack_tuple *tuple, 41gre_in_range(const struct nf_conntrack_tuple *tuple,
@@ -83,7 +76,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
83 keyptr = &tuple->dst.u.gre.key; 76 keyptr = &tuple->dst.u.gre.key;
84 77
85 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { 78 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
86 DEBUGP("%p: NATing GRE PPTP\n", conntrack); 79 pr_debug("%p: NATing GRE PPTP\n", conntrack);
87 min = 1; 80 min = 1;
88 range_size = 0xffff; 81 range_size = 0xffff;
89 } else { 82 } else {
@@ -91,7 +84,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
91 range_size = ntohs(range->max.gre.key) - min + 1; 84 range_size = ntohs(range->max.gre.key) - min + 1;
92 } 85 }
93 86
94 DEBUGP("min = %u, range_size = %u\n", min, range_size); 87 pr_debug("min = %u, range_size = %u\n", min, range_size);
95 88
96 for (i = 0; i < range_size; i++, key++) { 89 for (i = 0; i < range_size; i++, key++) {
97 *keyptr = htons(min + key % range_size); 90 *keyptr = htons(min + key % range_size);
@@ -99,7 +92,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
99 return 1; 92 return 1;
100 } 93 }
101 94
102 DEBUGP("%p: no NAT mapping\n", conntrack); 95 pr_debug("%p: no NAT mapping\n", conntrack);
103 return 0; 96 return 0;
104} 97}
105 98
@@ -132,11 +125,11 @@ gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff,
132 * Try to behave like "nf_nat_proto_unknown" */ 125 * Try to behave like "nf_nat_proto_unknown" */
133 break; 126 break;
134 case GRE_VERSION_PPTP: 127 case GRE_VERSION_PPTP:
135 DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key)); 128 pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
136 pgreh->call_id = tuple->dst.u.gre.key; 129 pgreh->call_id = tuple->dst.u.gre.key;
137 break; 130 break;
138 default: 131 default:
139 DEBUGP("can't nat unknown GRE version\n"); 132 pr_debug("can't nat unknown GRE version\n");
140 return 0; 133 return 0;
141 } 134 }
142 return 1; 135 return 1;
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 6740736c5e79..0f45427e5fdc 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -24,12 +24,6 @@
24#include <net/netfilter/nf_nat_core.h> 24#include <net/netfilter/nf_nat_core.h>
25#include <net/netfilter/nf_nat_rule.h> 25#include <net/netfilter/nf_nat_rule.h>
26 26
27#if 0
28#define DEBUGP printk
29#else
30#define DEBUGP(format, args...)
31#endif
32
33#define NAT_VALID_HOOKS ((1<<NF_IP_PRE_ROUTING) | (1<<NF_IP_POST_ROUTING) | (1<<NF_IP_LOCAL_OUT)) 27#define NAT_VALID_HOOKS ((1<<NF_IP_PRE_ROUTING) | (1<<NF_IP_POST_ROUTING) | (1<<NF_IP_LOCAL_OUT))
34 28
35static struct 29static struct
@@ -140,39 +134,39 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
140 return nf_nat_setup_info(ct, &mr->range[0], hooknum); 134 return nf_nat_setup_info(ct, &mr->range[0], hooknum);
141} 135}
142 136
143static int ipt_snat_checkentry(const char *tablename, 137static bool ipt_snat_checkentry(const char *tablename,
144 const void *entry, 138 const void *entry,
145 const struct xt_target *target, 139 const struct xt_target *target,
146 void *targinfo, 140 void *targinfo,
147 unsigned int hook_mask) 141 unsigned int hook_mask)
148{ 142{
149 struct nf_nat_multi_range_compat *mr = targinfo; 143 struct nf_nat_multi_range_compat *mr = targinfo;
150 144
151 /* Must be a valid range */ 145 /* Must be a valid range */
152 if (mr->rangesize != 1) { 146 if (mr->rangesize != 1) {
153 printk("SNAT: multiple ranges no longer supported\n"); 147 printk("SNAT: multiple ranges no longer supported\n");
154 return 0; 148 return false;
155 } 149 }
156 return 1; 150 return true;
157} 151}
158 152
159static int ipt_dnat_checkentry(const char *tablename, 153static bool ipt_dnat_checkentry(const char *tablename,
160 const void *entry, 154 const void *entry,
161 const struct xt_target *target, 155 const struct xt_target *target,
162 void *targinfo, 156 void *targinfo,
163 unsigned int hook_mask) 157 unsigned int hook_mask)
164{ 158{
165 struct nf_nat_multi_range_compat *mr = targinfo; 159 struct nf_nat_multi_range_compat *mr = targinfo;
166 160
167 /* Must be a valid range */ 161 /* Must be a valid range */
168 if (mr->rangesize != 1) { 162 if (mr->rangesize != 1) {
169 printk("DNAT: multiple ranges no longer supported\n"); 163 printk("DNAT: multiple ranges no longer supported\n");
170 return 0; 164 return false;
171 } 165 }
172 return 1; 166 return true;
173} 167}
174 168
175inline unsigned int 169unsigned int
176alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 170alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
177{ 171{
178 /* Force range to this IP; let proto decide mapping for 172 /* Force range to this IP; let proto decide mapping for
@@ -186,8 +180,8 @@ alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
186 struct nf_nat_range range 180 struct nf_nat_range range
187 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; 181 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } };
188 182
189 DEBUGP("Allocating NULL binding for %p (%u.%u.%u.%u)\n", 183 pr_debug("Allocating NULL binding for %p (%u.%u.%u.%u)\n",
190 ct, NIPQUAD(ip)); 184 ct, NIPQUAD(ip));
191 return nf_nat_setup_info(ct, &range, hooknum); 185 return nf_nat_setup_info(ct, &range, hooknum);
192} 186}
193 187
@@ -205,8 +199,8 @@ alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum)
205 struct nf_nat_range range 199 struct nf_nat_range range
206 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } }; 200 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } };
207 201
208 DEBUGP("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n", 202 pr_debug("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n",
209 ct, NIPQUAD(ip)); 203 ct, NIPQUAD(ip));
210 return nf_nat_setup_info(ct, &range, hooknum); 204 return nf_nat_setup_info(ct, &range, hooknum);
211} 205}
212 206
@@ -228,7 +222,7 @@ int nf_nat_rule_find(struct sk_buff **pskb,
228 return ret; 222 return ret;
229} 223}
230 224
231static struct xt_target ipt_snat_reg = { 225static struct xt_target ipt_snat_reg __read_mostly = {
232 .name = "SNAT", 226 .name = "SNAT",
233 .target = ipt_snat_target, 227 .target = ipt_snat_target,
234 .targetsize = sizeof(struct nf_nat_multi_range_compat), 228 .targetsize = sizeof(struct nf_nat_multi_range_compat),
@@ -238,7 +232,7 @@ static struct xt_target ipt_snat_reg = {
238 .family = AF_INET, 232 .family = AF_INET,
239}; 233};
240 234
241static struct xt_target ipt_dnat_reg = { 235static struct xt_target ipt_dnat_reg __read_mostly = {
242 .name = "DNAT", 236 .name = "DNAT",
243 .target = ipt_dnat_target, 237 .target = ipt_dnat_target,
244 .targetsize = sizeof(struct nf_nat_multi_range_compat), 238 .targetsize = sizeof(struct nf_nat_multi_range_compat),
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index fac97cf51ae5..a889ec3ec83a 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -26,12 +26,6 @@ MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
26MODULE_DESCRIPTION("SIP NAT helper"); 26MODULE_DESCRIPTION("SIP NAT helper");
27MODULE_ALIAS("ip_nat_sip"); 27MODULE_ALIAS("ip_nat_sip");
28 28
29#if 0
30#define DEBUGP printk
31#else
32#define DEBUGP(format, args...)
33#endif
34
35struct addr_map { 29struct addr_map {
36 struct { 30 struct {
37 char src[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 31 char src[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
@@ -257,10 +251,12 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb,
257 __be32 newip; 251 __be32 newip;
258 u_int16_t port; 252 u_int16_t port;
259 253
260 DEBUGP("ip_nat_sdp():\n");
261
262 /* Connection will come from reply */ 254 /* Connection will come from reply */
263 newip = ct->tuplehash[!dir].tuple.dst.u3.ip; 255 if (ct->tuplehash[dir].tuple.src.u3.ip ==
256 ct->tuplehash[!dir].tuple.dst.u3.ip)
257 newip = exp->tuple.dst.u3.ip;
258 else
259 newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
264 260
265 exp->saved_ip = exp->tuple.dst.u3.ip; 261 exp->saved_ip = exp->tuple.dst.u3.ip;
266 exp->tuple.dst.u3.ip = newip; 262 exp->tuple.dst.u3.ip = newip;
@@ -274,7 +270,7 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb,
274 /* Try to get same port: if not, try to change it. */ 270 /* Try to get same port: if not, try to change it. */
275 for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) { 271 for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) {
276 exp->tuple.dst.u.udp.port = htons(port); 272 exp->tuple.dst.u.udp.port = htons(port);
277 if (nf_conntrack_expect_related(exp) == 0) 273 if (nf_ct_expect_related(exp) == 0)
278 break; 274 break;
279 } 275 }
280 276
@@ -282,7 +278,7 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb,
282 return NF_DROP; 278 return NF_DROP;
283 279
284 if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) { 280 if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) {
285 nf_conntrack_unexpect_related(exp); 281 nf_ct_unexpect_related(exp);
286 return NF_DROP; 282 return NF_DROP;
287 } 283 }
288 return NF_ACCEPT; 284 return NF_ACCEPT;
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 6e88505d6162..6bfcd3a90f08 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1276,9 +1276,6 @@ static struct nf_conntrack_helper snmp_helper __read_mostly = {
1276 .tuple.src.l3num = AF_INET, 1276 .tuple.src.l3num = AF_INET,
1277 .tuple.src.u.udp.port = __constant_htons(SNMP_PORT), 1277 .tuple.src.u.udp.port = __constant_htons(SNMP_PORT),
1278 .tuple.dst.protonum = IPPROTO_UDP, 1278 .tuple.dst.protonum = IPPROTO_UDP,
1279 .mask.src.l3num = 0xFFFF,
1280 .mask.src.u.udp.port = __constant_htons(0xFFFF),
1281 .mask.dst.protonum = 0xFF,
1282}; 1279};
1283 1280
1284static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { 1281static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
@@ -1290,9 +1287,6 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1290 .tuple.src.l3num = AF_INET, 1287 .tuple.src.l3num = AF_INET,
1291 .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT), 1288 .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT),
1292 .tuple.dst.protonum = IPPROTO_UDP, 1289 .tuple.dst.protonum = IPPROTO_UDP,
1293 .mask.src.l3num = 0xFFFF,
1294 .mask.src.u.udp.port = __constant_htons(0xFFFF),
1295 .mask.dst.protonum = 0xFF,
1296}; 1290};
1297 1291
1298/***************************************************************************** 1292/*****************************************************************************
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 55dac36dbc85..332814dac503 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -19,6 +19,7 @@
19 19
20#include <net/netfilter/nf_conntrack.h> 20#include <net/netfilter/nf_conntrack.h>
21#include <net/netfilter/nf_conntrack_core.h> 21#include <net/netfilter/nf_conntrack_core.h>
22#include <net/netfilter/nf_conntrack_extend.h>
22#include <net/netfilter/nf_nat.h> 23#include <net/netfilter/nf_nat.h>
23#include <net/netfilter/nf_nat_rule.h> 24#include <net/netfilter/nf_nat_rule.h>
24#include <net/netfilter/nf_nat_protocol.h> 25#include <net/netfilter/nf_nat_protocol.h>
@@ -26,12 +27,6 @@
26#include <net/netfilter/nf_nat_helper.h> 27#include <net/netfilter/nf_nat_helper.h>
27#include <linux/netfilter_ipv4/ip_tables.h> 28#include <linux/netfilter_ipv4/ip_tables.h>
28 29
29#if 0
30#define DEBUGP printk
31#else
32#define DEBUGP(format, args...)
33#endif
34
35#ifdef CONFIG_XFRM 30#ifdef CONFIG_XFRM
36static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) 31static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
37{ 32{
@@ -113,8 +108,13 @@ nf_nat_fn(unsigned int hooknum,
113 return NF_ACCEPT; 108 return NF_ACCEPT;
114 109
115 nat = nfct_nat(ct); 110 nat = nfct_nat(ct);
116 if (!nat) 111 if (!nat) {
117 return NF_ACCEPT; 112 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
113 if (nat == NULL) {
114 pr_debug("failed to add NAT extension\n");
115 return NF_ACCEPT;
116 }
117 }
118 118
119 switch (ctinfo) { 119 switch (ctinfo) {
120 case IP_CT_RELATED: 120 case IP_CT_RELATED:
@@ -148,9 +148,9 @@ nf_nat_fn(unsigned int hooknum,
148 return ret; 148 return ret;
149 } 149 }
150 } else 150 } else
151 DEBUGP("Already setup manip %s for ct %p\n", 151 pr_debug("Already setup manip %s for ct %p\n",
152 maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", 152 maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
153 ct); 153 ct);
154 break; 154 break;
155 155
156 default: 156 default:
@@ -264,7 +264,7 @@ nf_nat_adjust(unsigned int hooknum,
264 264
265 ct = nf_ct_get(*pskb, &ctinfo); 265 ct = nf_ct_get(*pskb, &ctinfo);
266 if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { 266 if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
267 DEBUGP("nf_nat_standalone: adjusting sequence number\n"); 267 pr_debug("nf_nat_standalone: adjusting sequence number\n");
268 if (!nf_nat_seq_adjust(pskb, ct, ctinfo)) 268 if (!nf_nat_seq_adjust(pskb, ct, ctinfo))
269 return NF_DROP; 269 return NF_DROP;
270 } 270 }
@@ -326,26 +326,10 @@ static struct nf_hook_ops nf_nat_ops[] = {
326 326
327static int __init nf_nat_standalone_init(void) 327static int __init nf_nat_standalone_init(void)
328{ 328{
329 int size, ret = 0; 329 int ret = 0;
330 330
331 need_conntrack(); 331 need_conntrack();
332 332
333 size = ALIGN(sizeof(struct nf_conn), __alignof__(struct nf_conn_nat)) +
334 sizeof(struct nf_conn_nat);
335 ret = nf_conntrack_register_cache(NF_CT_F_NAT, "nf_nat:base", size);
336 if (ret < 0) {
337 printk(KERN_ERR "nf_nat_init: Unable to create slab cache\n");
338 return ret;
339 }
340
341 size = ALIGN(size, __alignof__(struct nf_conn_help)) +
342 sizeof(struct nf_conn_help);
343 ret = nf_conntrack_register_cache(NF_CT_F_NAT|NF_CT_F_HELP,
344 "nf_nat:help", size);
345 if (ret < 0) {
346 printk(KERN_ERR "nf_nat_init: Unable to create slab cache\n");
347 goto cleanup_register_cache;
348 }
349#ifdef CONFIG_XFRM 333#ifdef CONFIG_XFRM
350 BUG_ON(ip_nat_decode_session != NULL); 334 BUG_ON(ip_nat_decode_session != NULL);
351 ip_nat_decode_session = nat_decode_session; 335 ip_nat_decode_session = nat_decode_session;
@@ -360,7 +344,6 @@ static int __init nf_nat_standalone_init(void)
360 printk("nf_nat_init: can't register hooks.\n"); 344 printk("nf_nat_init: can't register hooks.\n");
361 goto cleanup_rule_init; 345 goto cleanup_rule_init;
362 } 346 }
363 nf_nat_module_is_loaded = 1;
364 return ret; 347 return ret;
365 348
366 cleanup_rule_init: 349 cleanup_rule_init:
@@ -370,9 +353,6 @@ static int __init nf_nat_standalone_init(void)
370 ip_nat_decode_session = NULL; 353 ip_nat_decode_session = NULL;
371 synchronize_net(); 354 synchronize_net();
372#endif 355#endif
373 nf_conntrack_unregister_cache(NF_CT_F_NAT|NF_CT_F_HELP);
374 cleanup_register_cache:
375 nf_conntrack_unregister_cache(NF_CT_F_NAT);
376 return ret; 356 return ret;
377} 357}
378 358
@@ -380,7 +360,6 @@ static void __exit nf_nat_standalone_fini(void)
380{ 360{
381 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); 361 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
382 nf_nat_rule_cleanup(); 362 nf_nat_rule_cleanup();
383 nf_nat_module_is_loaded = 0;
384#ifdef CONFIG_XFRM 363#ifdef CONFIG_XFRM
385 ip_nat_decode_session = NULL; 364 ip_nat_decode_session = NULL;
386 synchronize_net(); 365 synchronize_net();
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 2566b79de224..04dfeaefec02 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -30,7 +30,7 @@ static unsigned int help(struct sk_buff **pskb,
30 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; 30 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
31 exp->dir = IP_CT_DIR_REPLY; 31 exp->dir = IP_CT_DIR_REPLY;
32 exp->expectfn = nf_nat_follow_master; 32 exp->expectfn = nf_nat_follow_master;
33 if (nf_conntrack_expect_related(exp) != 0) 33 if (nf_ct_expect_related(exp) != 0)
34 return NF_DROP; 34 return NF_DROP;
35 return NF_ACCEPT; 35 return NF_ACCEPT;
36} 36}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 29ca63e81ced..88fa648d7ba3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -101,7 +101,6 @@
101#include <net/tcp.h> 101#include <net/tcp.h>
102#include <net/icmp.h> 102#include <net/icmp.h>
103#include <net/xfrm.h> 103#include <net/xfrm.h>
104#include <net/ip_mp_alg.h>
105#include <net/netevent.h> 104#include <net/netevent.h>
106#include <net/rtnetlink.h> 105#include <net/rtnetlink.h>
107#ifdef CONFIG_SYSCTL 106#ifdef CONFIG_SYSCTL
@@ -168,7 +167,7 @@ static struct dst_ops ipv4_dst_ops = {
168 167
169#define ECN_OR_COST(class) TC_PRIO_##class 168#define ECN_OR_COST(class) TC_PRIO_##class
170 169
171__u8 ip_tos2prio[16] = { 170const __u8 ip_tos2prio[16] = {
172 TC_PRIO_BESTEFFORT, 171 TC_PRIO_BESTEFFORT,
173 ECN_OR_COST(FILLER), 172 ECN_OR_COST(FILLER),
174 TC_PRIO_BESTEFFORT, 173 TC_PRIO_BESTEFFORT,
@@ -495,13 +494,11 @@ static const struct file_operations rt_cpu_seq_fops = {
495 494
496static __inline__ void rt_free(struct rtable *rt) 495static __inline__ void rt_free(struct rtable *rt)
497{ 496{
498 multipath_remove(rt);
499 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 497 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
500} 498}
501 499
502static __inline__ void rt_drop(struct rtable *rt) 500static __inline__ void rt_drop(struct rtable *rt)
503{ 501{
504 multipath_remove(rt);
505 ip_rt_put(rt); 502 ip_rt_put(rt);
506 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 503 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
507} 504}
@@ -574,52 +571,6 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
574 (fl1->iif ^ fl2->iif)) == 0; 571 (fl1->iif ^ fl2->iif)) == 0;
575} 572}
576 573
577#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
578static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
579 struct rtable *expentry,
580 int *removed_count)
581{
582 int passedexpired = 0;
583 struct rtable **nextstep = NULL;
584 struct rtable **rthp = chain_head;
585 struct rtable *rth;
586
587 if (removed_count)
588 *removed_count = 0;
589
590 while ((rth = *rthp) != NULL) {
591 if (rth == expentry)
592 passedexpired = 1;
593
594 if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
595 compare_keys(&(*rthp)->fl, &expentry->fl)) {
596 if (*rthp == expentry) {
597 *rthp = rth->u.dst.rt_next;
598 continue;
599 } else {
600 *rthp = rth->u.dst.rt_next;
601 rt_free(rth);
602 if (removed_count)
603 ++(*removed_count);
604 }
605 } else {
606 if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
607 passedexpired && !nextstep)
608 nextstep = &rth->u.dst.rt_next;
609
610 rthp = &rth->u.dst.rt_next;
611 }
612 }
613
614 rt_free(expentry);
615 if (removed_count)
616 ++(*removed_count);
617
618 return nextstep;
619}
620#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
621
622
623/* This runs via a timer and thus is always in BH context. */ 574/* This runs via a timer and thus is always in BH context. */
624static void rt_check_expire(unsigned long dummy) 575static void rt_check_expire(unsigned long dummy)
625{ 576{
@@ -658,22 +609,8 @@ static void rt_check_expire(unsigned long dummy)
658 } 609 }
659 610
660 /* Cleanup aged off entries. */ 611 /* Cleanup aged off entries. */
661#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
662 /* remove all related balanced entries if necessary */
663 if (rth->u.dst.flags & DST_BALANCED) {
664 rthp = rt_remove_balanced_route(
665 &rt_hash_table[i].chain,
666 rth, NULL);
667 if (!rthp)
668 break;
669 } else {
670 *rthp = rth->u.dst.rt_next;
671 rt_free(rth);
672 }
673#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
674 *rthp = rth->u.dst.rt_next; 612 *rthp = rth->u.dst.rt_next;
675 rt_free(rth); 613 rt_free(rth);
676#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
677 } 614 }
678 spin_unlock(rt_hash_lock_addr(i)); 615 spin_unlock(rt_hash_lock_addr(i));
679 616
@@ -721,9 +658,6 @@ void rt_cache_flush(int delay)
721 if (delay < 0) 658 if (delay < 0)
722 delay = ip_rt_min_delay; 659 delay = ip_rt_min_delay;
723 660
724 /* flush existing multipath state*/
725 multipath_flush();
726
727 spin_lock_bh(&rt_flush_lock); 661 spin_lock_bh(&rt_flush_lock);
728 662
729 if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) { 663 if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) {
@@ -842,30 +776,9 @@ static int rt_garbage_collect(void)
842 rthp = &rth->u.dst.rt_next; 776 rthp = &rth->u.dst.rt_next;
843 continue; 777 continue;
844 } 778 }
845#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
846 /* remove all related balanced entries
847 * if necessary
848 */
849 if (rth->u.dst.flags & DST_BALANCED) {
850 int r;
851
852 rthp = rt_remove_balanced_route(
853 &rt_hash_table[k].chain,
854 rth,
855 &r);
856 goal -= r;
857 if (!rthp)
858 break;
859 } else {
860 *rthp = rth->u.dst.rt_next;
861 rt_free(rth);
862 goal--;
863 }
864#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
865 *rthp = rth->u.dst.rt_next; 779 *rthp = rth->u.dst.rt_next;
866 rt_free(rth); 780 rt_free(rth);
867 goal--; 781 goal--;
868#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
869 } 782 }
870 spin_unlock_bh(rt_hash_lock_addr(k)); 783 spin_unlock_bh(rt_hash_lock_addr(k));
871 if (goal <= 0) 784 if (goal <= 0)
@@ -939,12 +852,7 @@ restart:
939 852
940 spin_lock_bh(rt_hash_lock_addr(hash)); 853 spin_lock_bh(rt_hash_lock_addr(hash));
941 while ((rth = *rthp) != NULL) { 854 while ((rth = *rthp) != NULL) {
942#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
943 if (!(rth->u.dst.flags & DST_BALANCED) &&
944 compare_keys(&rth->fl, &rt->fl)) {
945#else
946 if (compare_keys(&rth->fl, &rt->fl)) { 855 if (compare_keys(&rth->fl, &rt->fl)) {
947#endif
948 /* Put it first */ 856 /* Put it first */
949 *rthp = rth->u.dst.rt_next; 857 *rthp = rth->u.dst.rt_next;
950 /* 858 /*
@@ -1774,10 +1682,6 @@ static inline int __mkroute_input(struct sk_buff *skb,
1774 1682
1775 atomic_set(&rth->u.dst.__refcnt, 1); 1683 atomic_set(&rth->u.dst.__refcnt, 1);
1776 rth->u.dst.flags= DST_HOST; 1684 rth->u.dst.flags= DST_HOST;
1777#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
1778 if (res->fi->fib_nhs > 1)
1779 rth->u.dst.flags |= DST_BALANCED;
1780#endif
1781 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 1685 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1782 rth->u.dst.flags |= DST_NOPOLICY; 1686 rth->u.dst.flags |= DST_NOPOLICY;
1783 if (IN_DEV_CONF_GET(out_dev, NOXFRM)) 1687 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
@@ -1812,11 +1716,11 @@ static inline int __mkroute_input(struct sk_buff *skb,
1812 return err; 1716 return err;
1813} 1717}
1814 1718
1815static inline int ip_mkroute_input_def(struct sk_buff *skb, 1719static inline int ip_mkroute_input(struct sk_buff *skb,
1816 struct fib_result* res, 1720 struct fib_result* res,
1817 const struct flowi *fl, 1721 const struct flowi *fl,
1818 struct in_device *in_dev, 1722 struct in_device *in_dev,
1819 __be32 daddr, __be32 saddr, u32 tos) 1723 __be32 daddr, __be32 saddr, u32 tos)
1820{ 1724{
1821 struct rtable* rth = NULL; 1725 struct rtable* rth = NULL;
1822 int err; 1726 int err;
@@ -1837,63 +1741,6 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
1837 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1741 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1838} 1742}
1839 1743
1840static inline int ip_mkroute_input(struct sk_buff *skb,
1841 struct fib_result* res,
1842 const struct flowi *fl,
1843 struct in_device *in_dev,
1844 __be32 daddr, __be32 saddr, u32 tos)
1845{
1846#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
1847 struct rtable* rth = NULL, *rtres;
1848 unsigned char hop, hopcount;
1849 int err = -EINVAL;
1850 unsigned int hash;
1851
1852 if (res->fi)
1853 hopcount = res->fi->fib_nhs;
1854 else
1855 hopcount = 1;
1856
1857 /* distinguish between multipath and singlepath */
1858 if (hopcount < 2)
1859 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
1860 saddr, tos);
1861
1862 /* add all alternatives to the routing cache */
1863 for (hop = 0; hop < hopcount; hop++) {
1864 res->nh_sel = hop;
1865
1866 /* put reference to previous result */
1867 if (hop)
1868 ip_rt_put(rtres);
1869
1870 /* create a routing cache entry */
1871 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos,
1872 &rth);
1873 if (err)
1874 return err;
1875
1876 /* put it into the cache */
1877 hash = rt_hash(daddr, saddr, fl->iif);
1878 err = rt_intern_hash(hash, rth, &rtres);
1879 if (err)
1880 return err;
1881
1882 /* forward hop information to multipath impl. */
1883 multipath_set_nhinfo(rth,
1884 FIB_RES_NETWORK(*res),
1885 FIB_RES_NETMASK(*res),
1886 res->prefixlen,
1887 &FIB_RES_NH(*res));
1888 }
1889 skb->dst = &rtres->u.dst;
1890 return err;
1891#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
1892 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos);
1893#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
1894}
1895
1896
1897/* 1744/*
1898 * NOTE. We drop all the packets that has local source 1745 * NOTE. We drop all the packets that has local source
1899 * addresses, because every properly looped back packet 1746 * addresses, because every properly looped back packet
@@ -2211,13 +2058,6 @@ static inline int __mkroute_output(struct rtable **result,
2211 2058
2212 atomic_set(&rth->u.dst.__refcnt, 1); 2059 atomic_set(&rth->u.dst.__refcnt, 1);
2213 rth->u.dst.flags= DST_HOST; 2060 rth->u.dst.flags= DST_HOST;
2214#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2215 if (res->fi) {
2216 rth->rt_multipath_alg = res->fi->fib_mp_alg;
2217 if (res->fi->fib_nhs > 1)
2218 rth->u.dst.flags |= DST_BALANCED;
2219 }
2220#endif
2221 if (IN_DEV_CONF_GET(in_dev, NOXFRM)) 2061 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2222 rth->u.dst.flags |= DST_NOXFRM; 2062 rth->u.dst.flags |= DST_NOXFRM;
2223 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2063 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
@@ -2277,12 +2117,12 @@ static inline int __mkroute_output(struct rtable **result,
2277 return err; 2117 return err;
2278} 2118}
2279 2119
2280static inline int ip_mkroute_output_def(struct rtable **rp, 2120static inline int ip_mkroute_output(struct rtable **rp,
2281 struct fib_result* res, 2121 struct fib_result* res,
2282 const struct flowi *fl, 2122 const struct flowi *fl,
2283 const struct flowi *oldflp, 2123 const struct flowi *oldflp,
2284 struct net_device *dev_out, 2124 struct net_device *dev_out,
2285 unsigned flags) 2125 unsigned flags)
2286{ 2126{
2287 struct rtable *rth = NULL; 2127 struct rtable *rth = NULL;
2288 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); 2128 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
@@ -2295,68 +2135,6 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
2295 return err; 2135 return err;
2296} 2136}
2297 2137
2298static inline int ip_mkroute_output(struct rtable** rp,
2299 struct fib_result* res,
2300 const struct flowi *fl,
2301 const struct flowi *oldflp,
2302 struct net_device *dev_out,
2303 unsigned flags)
2304{
2305#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2306 unsigned char hop;
2307 unsigned hash;
2308 int err = -EINVAL;
2309 struct rtable *rth = NULL;
2310
2311 if (res->fi && res->fi->fib_nhs > 1) {
2312 unsigned char hopcount = res->fi->fib_nhs;
2313
2314 for (hop = 0; hop < hopcount; hop++) {
2315 struct net_device *dev2nexthop;
2316
2317 res->nh_sel = hop;
2318
2319 /* hold a work reference to the output device */
2320 dev2nexthop = FIB_RES_DEV(*res);
2321 dev_hold(dev2nexthop);
2322
2323 /* put reference to previous result */
2324 if (hop)
2325 ip_rt_put(*rp);
2326
2327 err = __mkroute_output(&rth, res, fl, oldflp,
2328 dev2nexthop, flags);
2329
2330 if (err != 0)
2331 goto cleanup;
2332
2333 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src,
2334 oldflp->oif);
2335 err = rt_intern_hash(hash, rth, rp);
2336
2337 /* forward hop information to multipath impl. */
2338 multipath_set_nhinfo(rth,
2339 FIB_RES_NETWORK(*res),
2340 FIB_RES_NETMASK(*res),
2341 res->prefixlen,
2342 &FIB_RES_NH(*res));
2343 cleanup:
2344 /* release work reference to output device */
2345 dev_put(dev2nexthop);
2346
2347 if (err != 0)
2348 return err;
2349 }
2350 return err;
2351 } else {
2352 return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out,
2353 flags);
2354 }
2355#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
2356 return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out, flags);
2357#endif
2358}
2359
2360/* 2138/*
2361 * Major route resolver routine. 2139 * Major route resolver routine.
2362 */ 2140 */
@@ -2570,17 +2348,6 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2570 rth->fl.mark == flp->mark && 2348 rth->fl.mark == flp->mark &&
2571 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2349 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2572 (IPTOS_RT_MASK | RTO_ONLINK))) { 2350 (IPTOS_RT_MASK | RTO_ONLINK))) {
2573
2574 /* check for multipath routes and choose one if
2575 * necessary
2576 */
2577 if (multipath_select_route(flp, rth, rp)) {
2578 dst_hold(&(*rp)->u.dst);
2579 RT_CACHE_STAT_INC(out_hit);
2580 rcu_read_unlock_bh();
2581 return 0;
2582 }
2583
2584 rth->u.dst.lastuse = jiffies; 2351 rth->u.dst.lastuse = jiffies;
2585 dst_hold(&rth->u.dst); 2352 dst_hold(&rth->u.dst);
2586 rth->u.dst.__use++; 2353 rth->u.dst.__use++;
@@ -2729,10 +2496,6 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2729 if (rt->u.dst.tclassid) 2496 if (rt->u.dst.tclassid)
2730 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid); 2497 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2731#endif 2498#endif
2732#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2733 if (rt->rt_multipath_alg != IP_MP_ALG_NONE)
2734 NLA_PUT_U32(skb, RTA_MP_ALGO, rt->rt_multipath_alg);
2735#endif
2736 if (rt->fl.iif) 2499 if (rt->fl.iif)
2737 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2500 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2738 else if (rt->rt_src != rt->fl.fl4_src) 2501 else if (rt->rt_src != rt->fl.fl4_src)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 354721d67f69..3f5f7423b95c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2045,10 +2045,7 @@ static void *established_get_first(struct seq_file *seq)
2045 struct hlist_node *node; 2045 struct hlist_node *node;
2046 struct inet_timewait_sock *tw; 2046 struct inet_timewait_sock *tw;
2047 2047
2048 /* We can reschedule _before_ having picked the target: */ 2048 read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2049 cond_resched_softirq();
2050
2051 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
2052 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 2049 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2053 if (sk->sk_family != st->family) { 2050 if (sk->sk_family != st->family) {
2054 continue; 2051 continue;
@@ -2065,7 +2062,7 @@ static void *established_get_first(struct seq_file *seq)
2065 rc = tw; 2062 rc = tw;
2066 goto out; 2063 goto out;
2067 } 2064 }
2068 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); 2065 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2069 st->state = TCP_SEQ_STATE_ESTABLISHED; 2066 st->state = TCP_SEQ_STATE_ESTABLISHED;
2070 } 2067 }
2071out: 2068out:
@@ -2092,14 +2089,11 @@ get_tw:
2092 cur = tw; 2089 cur = tw;
2093 goto out; 2090 goto out;
2094 } 2091 }
2095 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); 2092 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2096 st->state = TCP_SEQ_STATE_ESTABLISHED; 2093 st->state = TCP_SEQ_STATE_ESTABLISHED;
2097 2094
2098 /* We can reschedule between buckets: */
2099 cond_resched_softirq();
2100
2101 if (++st->bucket < tcp_hashinfo.ehash_size) { 2095 if (++st->bucket < tcp_hashinfo.ehash_size) {
2102 read_lock(&tcp_hashinfo.ehash[st->bucket].lock); 2096 read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2103 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); 2097 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2104 } else { 2098 } else {
2105 cur = NULL; 2099 cur = NULL;
@@ -2144,7 +2138,6 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2144 2138
2145 if (!rc) { 2139 if (!rc) {
2146 inet_listen_unlock(&tcp_hashinfo); 2140 inet_listen_unlock(&tcp_hashinfo);
2147 local_bh_disable();
2148 st->state = TCP_SEQ_STATE_ESTABLISHED; 2141 st->state = TCP_SEQ_STATE_ESTABLISHED;
2149 rc = established_get_idx(seq, pos); 2142 rc = established_get_idx(seq, pos);
2150 } 2143 }
@@ -2177,7 +2170,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2177 rc = listening_get_next(seq, v); 2170 rc = listening_get_next(seq, v);
2178 if (!rc) { 2171 if (!rc) {
2179 inet_listen_unlock(&tcp_hashinfo); 2172 inet_listen_unlock(&tcp_hashinfo);
2180 local_bh_disable();
2181 st->state = TCP_SEQ_STATE_ESTABLISHED; 2173 st->state = TCP_SEQ_STATE_ESTABLISHED;
2182 rc = established_get_first(seq); 2174 rc = established_get_first(seq);
2183 } 2175 }
@@ -2209,8 +2201,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2209 case TCP_SEQ_STATE_TIME_WAIT: 2201 case TCP_SEQ_STATE_TIME_WAIT:
2210 case TCP_SEQ_STATE_ESTABLISHED: 2202 case TCP_SEQ_STATE_ESTABLISHED:
2211 if (v) 2203 if (v)
2212 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); 2204 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
2213 local_bh_enable();
2214 break; 2205 break;
2215 } 2206 }
2216} 2207}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 53232dd6fb48..20aea1595c4d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -699,6 +699,14 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
699 tp->fackets_out -= diff; 699 tp->fackets_out -= diff;
700 if ((int)tp->fackets_out < 0) 700 if ((int)tp->fackets_out < 0)
701 tp->fackets_out = 0; 701 tp->fackets_out = 0;
702 /* SACK fastpath might overwrite it unless dealt with */
703 if (tp->fastpath_skb_hint != NULL &&
704 after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq,
705 TCP_SKB_CB(skb)->seq)) {
706 tp->fastpath_cnt_hint -= diff;
707 if ((int)tp->fastpath_cnt_hint < 0)
708 tp->fastpath_cnt_hint = 0;
709 }
702 } 710 }
703 } 711 }
704 712
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index d9323dfff826..86624fabc4bf 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -6,8 +6,7 @@
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 9 * the Free Software Foundation; either version 2 of the License.
10 * (at your option) any later version.
11 * 10 *
12 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -25,23 +24,22 @@
25#include <linux/tcp.h> 24#include <linux/tcp.h>
26#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <linux/kfifo.h>
29#include <linux/ktime.h> 27#include <linux/ktime.h>
30#include <linux/time.h> 28#include <linux/time.h>
31#include <linux/vmalloc.h>
32 29
33#include <net/tcp.h> 30#include <net/tcp.h>
34 31
35MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); 32MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
36MODULE_DESCRIPTION("TCP cwnd snooper"); 33MODULE_DESCRIPTION("TCP cwnd snooper");
37MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35MODULE_VERSION("1.1");
38 36
39static int port __read_mostly = 0; 37static int port __read_mostly = 0;
40MODULE_PARM_DESC(port, "Port to match (0=all)"); 38MODULE_PARM_DESC(port, "Port to match (0=all)");
41module_param(port, int, 0); 39module_param(port, int, 0);
42 40
43static int bufsize __read_mostly = 64*1024; 41static int bufsize __read_mostly = 4096;
44MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); 42MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
45module_param(bufsize, int, 0); 43module_param(bufsize, int, 0);
46 44
47static int full __read_mostly; 45static int full __read_mostly;
@@ -50,39 +48,38 @@ module_param(full, int, 0);
50 48
51static const char procname[] = "tcpprobe"; 49static const char procname[] = "tcpprobe";
52 50
53struct { 51struct tcp_log {
54 struct kfifo *fifo; 52 ktime_t tstamp;
53 __be32 saddr, daddr;
54 __be16 sport, dport;
55 u16 length;
56 u32 snd_nxt;
57 u32 snd_una;
58 u32 snd_wnd;
59 u32 snd_cwnd;
60 u32 ssthresh;
61 u32 srtt;
62};
63
64static struct {
55 spinlock_t lock; 65 spinlock_t lock;
56 wait_queue_head_t wait; 66 wait_queue_head_t wait;
57 ktime_t start; 67 ktime_t start;
58 u32 lastcwnd; 68 u32 lastcwnd;
59} tcpw;
60 69
61/* 70 unsigned long head, tail;
62 * Print to log with timestamps. 71 struct tcp_log *log;
63 * FIXME: causes an extra copy 72} tcp_probe;
64 */ 73
65static void printl(const char *fmt, ...)
66 __attribute__ ((format (printf, 1, 2)));
67 74
68static void printl(const char *fmt, ...) 75static inline int tcp_probe_used(void)
69{ 76{
70 va_list args; 77 return (tcp_probe.head - tcp_probe.tail) % bufsize;
71 int len; 78}
72 struct timespec tv; 79
73 char tbuf[256]; 80static inline int tcp_probe_avail(void)
74 81{
75 va_start(args, fmt); 82 return bufsize - tcp_probe_used();
76 /* want monotonic time since start of tcp_probe */
77 tv = ktime_to_timespec(ktime_sub(ktime_get(), tcpw.start));
78
79 len = sprintf(tbuf, "%lu.%09lu ",
80 (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec);
81 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
82 va_end(args);
83
84 kfifo_put(tcpw.fifo, tbuf, len);
85 wake_up(&tcpw.wait);
86} 83}
87 84
88/* 85/*
@@ -97,63 +94,117 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
97 94
98 /* Only update if port matches */ 95 /* Only update if port matches */
99 if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port) 96 if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port)
100 && (full || tp->snd_cwnd != tcpw.lastcwnd)) { 97 && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
101 printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u %u\n", 98
102 NIPQUAD(inet->saddr), ntohs(inet->sport), 99 spin_lock(&tcp_probe.lock);
103 NIPQUAD(inet->daddr), ntohs(inet->dport), 100 /* If log fills, just silently drop */
104 skb->len, tp->snd_nxt, tp->snd_una, 101 if (tcp_probe_avail() > 1) {
105 tp->snd_cwnd, tcp_current_ssthresh(sk), 102 struct tcp_log *p = tcp_probe.log + tcp_probe.head;
106 tp->snd_wnd, tp->srtt >> 3); 103
107 tcpw.lastcwnd = tp->snd_cwnd; 104 p->tstamp = ktime_get();
105 p->saddr = inet->saddr;
106 p->sport = inet->sport;
107 p->daddr = inet->daddr;
108 p->dport = inet->dport;
109 p->length = skb->len;
110 p->snd_nxt = tp->snd_nxt;
111 p->snd_una = tp->snd_una;
112 p->snd_cwnd = tp->snd_cwnd;
113 p->snd_wnd = tp->snd_wnd;
114 p->srtt = tp->srtt >> 3;
115
116 tcp_probe.head = (tcp_probe.head + 1) % bufsize;
117 }
118 tcp_probe.lastcwnd = tp->snd_cwnd;
119 spin_unlock(&tcp_probe.lock);
120
121 wake_up(&tcp_probe.wait);
108 } 122 }
109 123
110 jprobe_return(); 124 jprobe_return();
111 return 0; 125 return 0;
112} 126}
113 127
114static struct jprobe tcp_probe = { 128static struct jprobe tcp_jprobe = {
115 .kp = { 129 .kp = {
116 .symbol_name = "tcp_rcv_established", 130 .symbol_name = "tcp_rcv_established",
117 }, 131 },
118 .entry = JPROBE_ENTRY(jtcp_rcv_established), 132 .entry = JPROBE_ENTRY(jtcp_rcv_established),
119}; 133};
120 134
121
122static int tcpprobe_open(struct inode * inode, struct file * file) 135static int tcpprobe_open(struct inode * inode, struct file * file)
123{ 136{
124 kfifo_reset(tcpw.fifo); 137 /* Reset (empty) log */
125 tcpw.start = ktime_get(); 138 spin_lock_bh(&tcp_probe.lock);
139 tcp_probe.head = tcp_probe.tail = 0;
140 tcp_probe.start = ktime_get();
141 spin_unlock_bh(&tcp_probe.lock);
142
126 return 0; 143 return 0;
127} 144}
128 145
146static int tcpprobe_sprint(char *tbuf, int n)
147{
148 const struct tcp_log *p
149 = tcp_probe.log + tcp_probe.tail % bufsize;
150 struct timespec tv
151 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
152
153 return snprintf(tbuf, n,
154 "%lu.%09lu %d.%d.%d.%d:%u %d.%d.%d.%d:%u"
155 " %d %#x %#x %u %u %u %u\n",
156 (unsigned long) tv.tv_sec,
157 (unsigned long) tv.tv_nsec,
158 NIPQUAD(p->saddr), ntohs(p->sport),
159 NIPQUAD(p->daddr), ntohs(p->dport),
160 p->length, p->snd_nxt, p->snd_una,
161 p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt);
162}
163
129static ssize_t tcpprobe_read(struct file *file, char __user *buf, 164static ssize_t tcpprobe_read(struct file *file, char __user *buf,
130 size_t len, loff_t *ppos) 165 size_t len, loff_t *ppos)
131{ 166{
132 int error = 0, cnt = 0; 167 int error = 0, cnt = 0;
133 unsigned char *tbuf;
134 168
135 if (!buf || len < 0) 169 if (!buf || len < 0)
136 return -EINVAL; 170 return -EINVAL;
137 171
138 if (len == 0) 172 while (cnt < len) {
139 return 0; 173 char tbuf[128];
174 int width;
175
176 /* Wait for data in buffer */
177 error = wait_event_interruptible(tcp_probe.wait,
178 tcp_probe_used() > 0);
179 if (error)
180 break;
140 181
141 tbuf = vmalloc(len); 182 spin_lock_bh(&tcp_probe.lock);
142 if (!tbuf) 183 if (tcp_probe.head == tcp_probe.tail) {
143 return -ENOMEM; 184 /* multiple readers race? */
185 spin_unlock_bh(&tcp_probe.lock);
186 continue;
187 }
144 188
145 error = wait_event_interruptible(tcpw.wait, 189 width = tcpprobe_sprint(tbuf, sizeof(tbuf));
146 __kfifo_len(tcpw.fifo) != 0);
147 if (error)
148 goto out_free;
149 190
150 cnt = kfifo_get(tcpw.fifo, tbuf, len); 191 if (width < len)
151 error = copy_to_user(buf, tbuf, cnt); 192 tcp_probe.tail = (tcp_probe.tail + 1) % bufsize;
152 193
153out_free: 194 spin_unlock_bh(&tcp_probe.lock);
154 vfree(tbuf); 195
196 /* if record greater than space available
197 return partial buffer (so far) */
198 if (width >= len)
199 break;
200
201 error = copy_to_user(buf + cnt, tbuf, width);
202 if (error)
203 break;
204 cnt += width;
205 }
155 206
156 return error ? error : cnt; 207 return cnt == 0 ? error : cnt;
157} 208}
158 209
159static const struct file_operations tcpprobe_fops = { 210static const struct file_operations tcpprobe_fops = {
@@ -166,34 +217,37 @@ static __init int tcpprobe_init(void)
166{ 217{
167 int ret = -ENOMEM; 218 int ret = -ENOMEM;
168 219
169 init_waitqueue_head(&tcpw.wait); 220 init_waitqueue_head(&tcp_probe.wait);
170 spin_lock_init(&tcpw.lock); 221 spin_lock_init(&tcp_probe.lock);
171 tcpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &tcpw.lock); 222
172 if (IS_ERR(tcpw.fifo)) 223 if (bufsize < 0)
173 return PTR_ERR(tcpw.fifo); 224 return -EINVAL;
225
226 tcp_probe.log = kcalloc(sizeof(struct tcp_log), bufsize, GFP_KERNEL);
227 if (!tcp_probe.log)
228 goto err0;
174 229
175 if (!proc_net_fops_create(procname, S_IRUSR, &tcpprobe_fops)) 230 if (!proc_net_fops_create(procname, S_IRUSR, &tcpprobe_fops))
176 goto err0; 231 goto err0;
177 232
178 ret = register_jprobe(&tcp_probe); 233 ret = register_jprobe(&tcp_jprobe);
179 if (ret) 234 if (ret)
180 goto err1; 235 goto err1;
181 236
182 pr_info("TCP watch registered (port=%d)\n", port); 237 pr_info("TCP probe registered (port=%d)\n", port);
183 return 0; 238 return 0;
184 err1: 239 err1:
185 proc_net_remove(procname); 240 proc_net_remove(procname);
186 err0: 241 err0:
187 kfifo_free(tcpw.fifo); 242 kfree(tcp_probe.log);
188 return ret; 243 return ret;
189} 244}
190module_init(tcpprobe_init); 245module_init(tcpprobe_init);
191 246
192static __exit void tcpprobe_exit(void) 247static __exit void tcpprobe_exit(void)
193{ 248{
194 kfifo_free(tcpw.fifo);
195 proc_net_remove(procname); 249 proc_net_remove(procname);
196 unregister_jprobe(&tcp_probe); 250 unregister_jprobe(&tcp_jprobe);
197 251 kfree(tcp_probe.log);
198} 252}
199module_exit(tcpprobe_exit); 253module_exit(tcpprobe_exit);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index facb7e29304e..28355350fb62 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -70,6 +70,7 @@
70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind 70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
71 * a single port at the same time. 71 * a single port at the same time.
72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support 72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
73 * James Chapman : Add L2TP encapsulation type.
73 * 74 *
74 * 75 *
75 * This program is free software; you can redistribute it and/or 76 * This program is free software; you can redistribute it and/or
@@ -919,104 +920,6 @@ int udp_disconnect(struct sock *sk, int flags)
919 return 0; 920 return 0;
920} 921}
921 922
922/* return:
923 * 1 if the UDP system should process it
924 * 0 if we should drop this packet
925 * -1 if it should get processed by xfrm4_rcv_encap
926 */
927static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
928{
929#ifndef CONFIG_XFRM
930 return 1;
931#else
932 struct udp_sock *up = udp_sk(sk);
933 struct udphdr *uh;
934 struct iphdr *iph;
935 int iphlen, len;
936
937 __u8 *udpdata;
938 __be32 *udpdata32;
939 __u16 encap_type = up->encap_type;
940
941 /* if we're overly short, let UDP handle it */
942 len = skb->len - sizeof(struct udphdr);
943 if (len <= 0)
944 return 1;
945
946 /* if this is not encapsulated socket, then just return now */
947 if (!encap_type)
948 return 1;
949
950 /* If this is a paged skb, make sure we pull up
951 * whatever data we need to look at. */
952 if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8)))
953 return 1;
954
955 /* Now we can get the pointers */
956 uh = udp_hdr(skb);
957 udpdata = (__u8 *)uh + sizeof(struct udphdr);
958 udpdata32 = (__be32 *)udpdata;
959
960 switch (encap_type) {
961 default:
962 case UDP_ENCAP_ESPINUDP:
963 /* Check if this is a keepalive packet. If so, eat it. */
964 if (len == 1 && udpdata[0] == 0xff) {
965 return 0;
966 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) {
967 /* ESP Packet without Non-ESP header */
968 len = sizeof(struct udphdr);
969 } else
970 /* Must be an IKE packet.. pass it through */
971 return 1;
972 break;
973 case UDP_ENCAP_ESPINUDP_NON_IKE:
974 /* Check if this is a keepalive packet. If so, eat it. */
975 if (len == 1 && udpdata[0] == 0xff) {
976 return 0;
977 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
978 udpdata32[0] == 0 && udpdata32[1] == 0) {
979
980 /* ESP Packet with Non-IKE marker */
981 len = sizeof(struct udphdr) + 2 * sizeof(u32);
982 } else
983 /* Must be an IKE packet.. pass it through */
984 return 1;
985 break;
986 }
987
988 /* At this point we are sure that this is an ESPinUDP packet,
989 * so we need to remove 'len' bytes from the packet (the UDP
990 * header and optional ESP marker bytes) and then modify the
991 * protocol to ESP, and then call into the transform receiver.
992 */
993 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
994 return 0;
995
996 /* Now we can update and verify the packet length... */
997 iph = ip_hdr(skb);
998 iphlen = iph->ihl << 2;
999 iph->tot_len = htons(ntohs(iph->tot_len) - len);
1000 if (skb->len < iphlen + len) {
1001 /* packet is too small!?! */
1002 return 0;
1003 }
1004
1005 /* pull the data buffer up to the ESP header and set the
1006 * transport header to point to ESP. Keep UDP on the stack
1007 * for later.
1008 */
1009 __skb_pull(skb, len);
1010 skb_reset_transport_header(skb);
1011
1012 /* modify the protocol (it's ESP!) */
1013 iph->protocol = IPPROTO_ESP;
1014
1015 /* and let the caller know to send this into the ESP processor... */
1016 return -1;
1017#endif
1018}
1019
1020/* returns: 923/* returns:
1021 * -1: error 924 * -1: error
1022 * 0: success 925 * 0: success
@@ -1039,28 +942,28 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1039 942
1040 if (up->encap_type) { 943 if (up->encap_type) {
1041 /* 944 /*
1042 * This is an encapsulation socket, so let's see if this is 945 * This is an encapsulation socket so pass the skb to
1043 * an encapsulated packet. 946 * the socket's udp_encap_rcv() hook. Otherwise, just
1044 * If it's a keepalive packet, then just eat it. 947 * fall through and pass this up the UDP socket.
1045 * If it's an encapsulateed packet, then pass it to the 948 * up->encap_rcv() returns the following value:
1046 * IPsec xfrm input and return the response 949 * =0 if skb was successfully passed to the encap
1047 * appropriately. Otherwise, just fall through and 950 * handler or was discarded by it.
1048 * pass this up the UDP socket. 951 * >0 if skb should be passed on to UDP.
952 * <0 if skb should be resubmitted as proto -N
1049 */ 953 */
1050 int ret;
1051 954
1052 ret = udp_encap_rcv(sk, skb); 955 /* if we're overly short, let UDP handle it */
1053 if (ret == 0) { 956 if (skb->len > sizeof(struct udphdr) &&
1054 /* Eat the packet .. */ 957 up->encap_rcv != NULL) {
1055 kfree_skb(skb); 958 int ret;
1056 return 0; 959
1057 } 960 ret = (*up->encap_rcv)(sk, skb);
1058 if (ret < 0) { 961 if (ret <= 0) {
1059 /* process the ESP packet */ 962 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
1060 ret = xfrm4_rcv_encap(skb, up->encap_type); 963 return -ret;
1061 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); 964 }
1062 return -ret;
1063 } 965 }
966
1064 /* FALLTHROUGH -- it's a UDP Packet */ 967 /* FALLTHROUGH -- it's a UDP Packet */
1065 } 968 }
1066 969
@@ -1349,6 +1252,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1349 case 0: 1252 case 0:
1350 case UDP_ENCAP_ESPINUDP: 1253 case UDP_ENCAP_ESPINUDP:
1351 case UDP_ENCAP_ESPINUDP_NON_IKE: 1254 case UDP_ENCAP_ESPINUDP_NON_IKE:
1255 up->encap_rcv = xfrm4_udp_encap_rcv;
1256 /* FALLTHROUGH */
1257 case UDP_ENCAP_L2TPINUDP:
1352 up->encap_type = val; 1258 up->encap_type = val;
1353 break; 1259 break;
1354 default: 1260 default:
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index fa1902dc81b8..2fa108245413 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -16,13 +16,6 @@
16#include <net/ip.h> 16#include <net/ip.h>
17#include <net/xfrm.h> 17#include <net/xfrm.h>
18 18
19int xfrm4_rcv(struct sk_buff *skb)
20{
21 return xfrm4_rcv_encap(skb, 0);
22}
23
24EXPORT_SYMBOL(xfrm4_rcv);
25
26static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) 19static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
27{ 20{
28 switch (nexthdr) { 21 switch (nexthdr) {
@@ -53,7 +46,7 @@ drop:
53} 46}
54#endif 47#endif
55 48
56int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) 49static int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
57{ 50{
58 __be32 spi, seq; 51 __be32 spi, seq;
59 struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; 52 struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
@@ -167,3 +160,108 @@ drop:
167 kfree_skb(skb); 160 kfree_skb(skb);
168 return 0; 161 return 0;
169} 162}
163
164/* If it's a keepalive packet, then just eat it.
165 * If it's an encapsulated packet, then pass it to the
166 * IPsec xfrm input.
167 * Returns 0 if skb passed to xfrm or was dropped.
168 * Returns >0 if skb should be passed to UDP.
169 * Returns <0 if skb should be resubmitted (-ret is protocol)
170 */
171int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
172{
173 struct udp_sock *up = udp_sk(sk);
174 struct udphdr *uh;
175 struct iphdr *iph;
176 int iphlen, len;
177 int ret;
178
179 __u8 *udpdata;
180 __be32 *udpdata32;
181 __u16 encap_type = up->encap_type;
182
183 /* if this is not encapsulated socket, then just return now */
184 if (!encap_type)
185 return 1;
186
187 /* If this is a paged skb, make sure we pull up
188 * whatever data we need to look at. */
189 len = skb->len - sizeof(struct udphdr);
190 if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8)))
191 return 1;
192
193 /* Now we can get the pointers */
194 uh = udp_hdr(skb);
195 udpdata = (__u8 *)uh + sizeof(struct udphdr);
196 udpdata32 = (__be32 *)udpdata;
197
198 switch (encap_type) {
199 default:
200 case UDP_ENCAP_ESPINUDP:
201 /* Check if this is a keepalive packet. If so, eat it. */
202 if (len == 1 && udpdata[0] == 0xff) {
203 goto drop;
204 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) {
205 /* ESP Packet without Non-ESP header */
206 len = sizeof(struct udphdr);
207 } else
208 /* Must be an IKE packet.. pass it through */
209 return 1;
210 break;
211 case UDP_ENCAP_ESPINUDP_NON_IKE:
212 /* Check if this is a keepalive packet. If so, eat it. */
213 if (len == 1 && udpdata[0] == 0xff) {
214 goto drop;
215 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
216 udpdata32[0] == 0 && udpdata32[1] == 0) {
217
218 /* ESP Packet with Non-IKE marker */
219 len = sizeof(struct udphdr) + 2 * sizeof(u32);
220 } else
221 /* Must be an IKE packet.. pass it through */
222 return 1;
223 break;
224 }
225
226 /* At this point we are sure that this is an ESPinUDP packet,
227 * so we need to remove 'len' bytes from the packet (the UDP
228 * header and optional ESP marker bytes) and then modify the
229 * protocol to ESP, and then call into the transform receiver.
230 */
231 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
232 goto drop;
233
234 /* Now we can update and verify the packet length... */
235 iph = ip_hdr(skb);
236 iphlen = iph->ihl << 2;
237 iph->tot_len = htons(ntohs(iph->tot_len) - len);
238 if (skb->len < iphlen + len) {
239 /* packet is too small!?! */
240 goto drop;
241 }
242
243 /* pull the data buffer up to the ESP header and set the
244 * transport header to point to ESP. Keep UDP on the stack
245 * for later.
246 */
247 __skb_pull(skb, len);
248 skb_reset_transport_header(skb);
249
250 /* modify the protocol (it's ESP!) */
251 iph->protocol = IPPROTO_ESP;
252
253 /* process ESP */
254 ret = xfrm4_rcv_encap(skb, encap_type);
255 return ret;
256
257drop:
258 kfree_skb(skb);
259 return 0;
260}
261
262int xfrm4_rcv(struct sk_buff *skb)
263{
264 return xfrm4_rcv_encap(skb, 0);
265}
266
267EXPORT_SYMBOL(xfrm4_rcv);
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 568510304553..9275c79119b6 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -109,3 +109,4 @@ static void __exit ipip_fini(void)
109module_init(ipip_init); 109module_init(ipip_init);
110module_exit(ipip_fini); 110module_exit(ipip_fini);
111MODULE_LICENSE("GPL"); 111MODULE_LICENSE("GPL");
112MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_IPIP);
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 8e5d54f23b49..eb0b8085949b 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -109,7 +109,7 @@ config INET6_IPCOMP
109 If unsure, say Y. 109 If unsure, say Y.
110 110
111config IPV6_MIP6 111config IPV6_MIP6
112 bool "IPv6: Mobility (EXPERIMENTAL)" 112 tristate "IPv6: Mobility (EXPERIMENTAL)"
113 depends on IPV6 && EXPERIMENTAL 113 depends on IPV6 && EXPERIMENTAL
114 select XFRM 114 select XFRM
115 ---help--- 115 ---help---
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index bb33309044c9..87c23a73d284 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -14,7 +14,6 @@ ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
14 xfrm6_output.o 14 xfrm6_output.o
15ipv6-$(CONFIG_NETFILTER) += netfilter.o 15ipv6-$(CONFIG_NETFILTER) += netfilter.o
16ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o 16ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o
17ipv6-$(CONFIG_IPV6_MIP6) += mip6.o
18ipv6-$(CONFIG_PROC_FS) += proc.o 17ipv6-$(CONFIG_PROC_FS) += proc.o
19 18
20ipv6-objs += $(ipv6-y) 19ipv6-objs += $(ipv6-y)
@@ -28,6 +27,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_TRANSPORT) += xfrm6_mode_transport.o
28obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o 27obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o
29obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o 28obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
30obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o 29obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
30obj-$(CONFIG_IPV6_MIP6) += mip6.o
31obj-$(CONFIG_NETFILTER) += netfilter/ 31obj-$(CONFIG_NETFILTER) += netfilter/
32 32
33obj-$(CONFIG_IPV6_SIT) += sit.o 33obj-$(CONFIG_IPV6_SIT) += sit.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 79b79f3de24c..24424c3b7dc0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1034,7 +1034,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
1034 } 1034 }
1035 1035
1036 /* Rule 4: Prefer home address */ 1036 /* Rule 4: Prefer home address */
1037#ifdef CONFIG_IPV6_MIP6 1037#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
1038 if (hiscore.rule < 4) { 1038 if (hiscore.rule < 4) {
1039 if (ifa_result->flags & IFA_F_HOMEADDRESS) 1039 if (ifa_result->flags & IFA_F_HOMEADDRESS)
1040 hiscore.attrs |= IPV6_SADDR_SCORE_HOA; 1040 hiscore.attrs |= IPV6_SADDR_SCORE_HOA;
@@ -2785,7 +2785,7 @@ static int if6_seq_show(struct seq_file *seq, void *v)
2785 return 0; 2785 return 0;
2786} 2786}
2787 2787
2788static struct seq_operations if6_seq_ops = { 2788static const struct seq_operations if6_seq_ops = {
2789 .start = if6_seq_start, 2789 .start = if6_seq_start,
2790 .next = if6_seq_next, 2790 .next = if6_seq_next,
2791 .show = if6_seq_show, 2791 .show = if6_seq_show,
@@ -2835,7 +2835,7 @@ void if6_proc_exit(void)
2835} 2835}
2836#endif /* CONFIG_PROC_FS */ 2836#endif /* CONFIG_PROC_FS */
2837 2837
2838#ifdef CONFIG_IPV6_MIP6 2838#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
2839/* Check if address is a home address configured on any interface. */ 2839/* Check if address is a home address configured on any interface. */
2840int ipv6_chk_home_addr(struct in6_addr *addr) 2840int ipv6_chk_home_addr(struct in6_addr *addr)
2841{ 2841{
@@ -4243,7 +4243,6 @@ errout:
4243void __exit addrconf_cleanup(void) 4243void __exit addrconf_cleanup(void)
4244{ 4244{
4245 struct net_device *dev; 4245 struct net_device *dev;
4246 struct inet6_dev *idev;
4247 struct inet6_ifaddr *ifa; 4246 struct inet6_ifaddr *ifa;
4248 int i; 4247 int i;
4249 4248
@@ -4261,7 +4260,7 @@ void __exit addrconf_cleanup(void)
4261 */ 4260 */
4262 4261
4263 for_each_netdev(dev) { 4262 for_each_netdev(dev) {
4264 if ((idev = __in6_dev_get(dev)) == NULL) 4263 if (__in6_dev_get(dev) == NULL)
4265 continue; 4264 continue;
4266 addrconf_ifdown(dev, 1); 4265 addrconf_ifdown(dev, 1);
4267 } 4266 }
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 6dd377253cf7..eed09373a45d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -58,9 +58,6 @@
58#ifdef CONFIG_IPV6_TUNNEL 58#ifdef CONFIG_IPV6_TUNNEL
59#include <net/ip6_tunnel.h> 59#include <net/ip6_tunnel.h>
60#endif 60#endif
61#ifdef CONFIG_IPV6_MIP6
62#include <net/mip6.h>
63#endif
64 61
65#include <asm/uaccess.h> 62#include <asm/uaccess.h>
66#include <asm/system.h> 63#include <asm/system.h>
@@ -853,9 +850,6 @@ static int __init inet6_init(void)
853 ipv6_frag_init(); 850 ipv6_frag_init();
854 ipv6_nodata_init(); 851 ipv6_nodata_init();
855 ipv6_destopt_init(); 852 ipv6_destopt_init();
856#ifdef CONFIG_IPV6_MIP6
857 mip6_init();
858#endif
859 853
860 /* Init v6 transport protocols. */ 854 /* Init v6 transport protocols. */
861 udpv6_init(); 855 udpv6_init();
@@ -921,9 +915,7 @@ static void __exit inet6_exit(void)
921 915
922 /* Cleanup code parts. */ 916 /* Cleanup code parts. */
923 ipv6_packet_cleanup(); 917 ipv6_packet_cleanup();
924#ifdef CONFIG_IPV6_MIP6 918
925 mip6_fini();
926#endif
927 addrconf_cleanup(); 919 addrconf_cleanup();
928 ip6_flowlabel_cleanup(); 920 ip6_flowlabel_cleanup();
929 ip6_route_cleanup(); 921 ip6_route_cleanup();
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 128f94c79c64..53f46ab6af70 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -74,7 +74,7 @@ bad:
74 return 0; 74 return 0;
75} 75}
76 76
77#ifdef CONFIG_IPV6_MIP6 77#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
78/** 78/**
79 * ipv6_rearrange_destopt - rearrange IPv6 destination options header 79 * ipv6_rearrange_destopt - rearrange IPv6 destination options header
80 * @iph: IPv6 header 80 * @iph: IPv6 header
@@ -132,6 +132,8 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
132bad: 132bad:
133 return; 133 return;
134} 134}
135#else
136static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
135#endif 137#endif
136 138
137/** 139/**
@@ -189,10 +191,8 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
189 while (exthdr.raw < end) { 191 while (exthdr.raw < end) {
190 switch (nexthdr) { 192 switch (nexthdr) {
191 case NEXTHDR_DEST: 193 case NEXTHDR_DEST:
192#ifdef CONFIG_IPV6_MIP6
193 if (dir == XFRM_POLICY_OUT) 194 if (dir == XFRM_POLICY_OUT)
194 ipv6_rearrange_destopt(iph, exthdr.opth); 195 ipv6_rearrange_destopt(iph, exthdr.opth);
195#endif
196 case NEXTHDR_HOP: 196 case NEXTHDR_HOP:
197 if (!zero_out_mutable_opts(exthdr.opth)) { 197 if (!zero_out_mutable_opts(exthdr.opth)) {
198 LIMIT_NETDEBUG( 198 LIMIT_NETDEBUG(
@@ -228,7 +228,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
228 u8 nexthdr; 228 u8 nexthdr;
229 char tmp_base[8]; 229 char tmp_base[8];
230 struct { 230 struct {
231#ifdef CONFIG_IPV6_MIP6 231#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
232 struct in6_addr saddr; 232 struct in6_addr saddr;
233#endif 233#endif
234 struct in6_addr daddr; 234 struct in6_addr daddr;
@@ -255,7 +255,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
255 err = -ENOMEM; 255 err = -ENOMEM;
256 goto error; 256 goto error;
257 } 257 }
258#ifdef CONFIG_IPV6_MIP6 258#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
259 memcpy(tmp_ext, &top_iph->saddr, extlen); 259 memcpy(tmp_ext, &top_iph->saddr, extlen);
260#else 260#else
261 memcpy(tmp_ext, &top_iph->daddr, extlen); 261 memcpy(tmp_ext, &top_iph->daddr, extlen);
@@ -294,7 +294,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
294 294
295 memcpy(top_iph, tmp_base, sizeof(tmp_base)); 295 memcpy(top_iph, tmp_base, sizeof(tmp_base));
296 if (tmp_ext) { 296 if (tmp_ext) {
297#ifdef CONFIG_IPV6_MIP6 297#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
298 memcpy(&top_iph->saddr, tmp_ext, extlen); 298 memcpy(&top_iph->saddr, tmp_ext, extlen);
299#else 299#else
300 memcpy(&top_iph->daddr, tmp_ext, extlen); 300 memcpy(&top_iph->daddr, tmp_ext, extlen);
@@ -554,3 +554,4 @@ module_init(ah6_init);
554module_exit(ah6_fini); 554module_exit(ah6_fini);
555 555
556MODULE_LICENSE("GPL"); 556MODULE_LICENSE("GPL");
557MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 9b81264eb78f..b8c533fbdb63 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -539,7 +539,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v)
539 return 0; 539 return 0;
540} 540}
541 541
542static struct seq_operations ac6_seq_ops = { 542static const struct seq_operations ac6_seq_ops = {
543 .start = ac6_seq_start, 543 .start = ac6_seq_start,
544 .next = ac6_seq_next, 544 .next = ac6_seq_next,
545 .stop = ac6_seq_stop, 545 .stop = ac6_seq_stop,
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b1fe7ac5dc90..fe0f49024a0a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -657,11 +657,10 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
657 rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg); 657 rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg);
658 658
659 switch (rthdr->type) { 659 switch (rthdr->type) {
660 case IPV6_SRCRT_TYPE_0: 660#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
661#ifdef CONFIG_IPV6_MIP6
662 case IPV6_SRCRT_TYPE_2: 661 case IPV6_SRCRT_TYPE_2:
663#endif
664 break; 662 break;
663#endif
665 default: 664 default:
666 err = -EINVAL; 665 err = -EINVAL;
667 goto exit_f; 666 goto exit_f;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 7107bb7e2e62..2db31ce3c7e6 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -421,3 +421,4 @@ module_init(esp6_init);
421module_exit(esp6_fini); 421module_exit(esp6_fini);
422 422
423MODULE_LICENSE("GPL"); 423MODULE_LICENSE("GPL");
424MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 14be0b9b77a5..c82d4d49f71f 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -42,7 +42,7 @@
42#include <net/ndisc.h> 42#include <net/ndisc.h>
43#include <net/ip6_route.h> 43#include <net/ip6_route.h>
44#include <net/addrconf.h> 44#include <net/addrconf.h>
45#ifdef CONFIG_IPV6_MIP6 45#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
46#include <net/xfrm.h> 46#include <net/xfrm.h>
47#endif 47#endif
48 48
@@ -90,6 +90,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
90 bad: 90 bad:
91 return -1; 91 return -1;
92} 92}
93EXPORT_SYMBOL_GPL(ipv6_find_tlv);
93 94
94/* 95/*
95 * Parsing tlv encoded headers. 96 * Parsing tlv encoded headers.
@@ -196,7 +197,7 @@ bad:
196 Destination options header. 197 Destination options header.
197 *****************************/ 198 *****************************/
198 199
199#ifdef CONFIG_IPV6_MIP6 200#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
200static int ipv6_dest_hao(struct sk_buff **skbp, int optoff) 201static int ipv6_dest_hao(struct sk_buff **skbp, int optoff)
201{ 202{
202 struct sk_buff *skb = *skbp; 203 struct sk_buff *skb = *skbp;
@@ -270,7 +271,7 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff)
270#endif 271#endif
271 272
272static struct tlvtype_proc tlvprocdestopt_lst[] = { 273static struct tlvtype_proc tlvprocdestopt_lst[] = {
273#ifdef CONFIG_IPV6_MIP6 274#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
274 { 275 {
275 .type = IPV6_TLV_HAO, 276 .type = IPV6_TLV_HAO,
276 .func = ipv6_dest_hao, 277 .func = ipv6_dest_hao,
@@ -283,7 +284,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp)
283{ 284{
284 struct sk_buff *skb = *skbp; 285 struct sk_buff *skb = *skbp;
285 struct inet6_skb_parm *opt = IP6CB(skb); 286 struct inet6_skb_parm *opt = IP6CB(skb);
286#ifdef CONFIG_IPV6_MIP6 287#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
287 __u16 dstbuf; 288 __u16 dstbuf;
288#endif 289#endif
289 struct dst_entry *dst; 290 struct dst_entry *dst;
@@ -298,7 +299,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp)
298 } 299 }
299 300
300 opt->lastopt = opt->dst1 = skb_network_header_len(skb); 301 opt->lastopt = opt->dst1 = skb_network_header_len(skb);
301#ifdef CONFIG_IPV6_MIP6 302#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
302 dstbuf = opt->dst1; 303 dstbuf = opt->dst1;
303#endif 304#endif
304 305
@@ -308,7 +309,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp)
308 skb = *skbp; 309 skb = *skbp;
309 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; 310 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
310 opt = IP6CB(skb); 311 opt = IP6CB(skb);
311#ifdef CONFIG_IPV6_MIP6 312#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
312 opt->nhoff = dstbuf; 313 opt->nhoff = dstbuf;
313#else 314#else
314 opt->nhoff = opt->dst1; 315 opt->nhoff = opt->dst1;
@@ -371,22 +372,13 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp)
371 struct rt0_hdr *rthdr; 372 struct rt0_hdr *rthdr;
372 int accept_source_route = ipv6_devconf.accept_source_route; 373 int accept_source_route = ipv6_devconf.accept_source_route;
373 374
374 if (accept_source_route < 0 || 375 idev = in6_dev_get(skb->dev);
375 ((idev = in6_dev_get(skb->dev)) == NULL)) { 376 if (idev) {
376 kfree_skb(skb); 377 if (accept_source_route > idev->cnf.accept_source_route)
377 return -1; 378 accept_source_route = idev->cnf.accept_source_route;
378 }
379 if (idev->cnf.accept_source_route < 0) {
380 in6_dev_put(idev); 379 in6_dev_put(idev);
381 kfree_skb(skb);
382 return -1;
383 } 380 }
384 381
385 if (accept_source_route > idev->cnf.accept_source_route)
386 accept_source_route = idev->cnf.accept_source_route;
387
388 in6_dev_put(idev);
389
390 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 382 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
391 !pskb_may_pull(skb, (skb_transport_offset(skb) + 383 !pskb_may_pull(skb, (skb_transport_offset(skb) +
392 ((skb_transport_header(skb)[1] + 1) << 3)))) { 384 ((skb_transport_header(skb)[1] + 1) << 3)))) {
@@ -398,24 +390,6 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp)
398 390
399 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); 391 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
400 392
401 switch (hdr->type) {
402#ifdef CONFIG_IPV6_MIP6
403 case IPV6_SRCRT_TYPE_2:
404 break;
405#endif
406 case IPV6_SRCRT_TYPE_0:
407 if (accept_source_route > 0)
408 break;
409 kfree_skb(skb);
410 return -1;
411 default:
412 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
413 IPSTATS_MIB_INHDRERRORS);
414 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
415 (&hdr->type) - skb_network_header(skb));
416 return -1;
417 }
418
419 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || 393 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
420 skb->pkt_type != PACKET_HOST) { 394 skb->pkt_type != PACKET_HOST) {
421 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 395 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
@@ -427,7 +401,7 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp)
427looped_back: 401looped_back:
428 if (hdr->segments_left == 0) { 402 if (hdr->segments_left == 0) {
429 switch (hdr->type) { 403 switch (hdr->type) {
430#ifdef CONFIG_IPV6_MIP6 404#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
431 case IPV6_SRCRT_TYPE_2: 405 case IPV6_SRCRT_TYPE_2:
432 /* Silently discard type 2 header unless it was 406 /* Silently discard type 2 header unless it was
433 * processed by own 407 * processed by own
@@ -453,18 +427,10 @@ looped_back:
453 } 427 }
454 428
455 switch (hdr->type) { 429 switch (hdr->type) {
456 case IPV6_SRCRT_TYPE_0: 430#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
457 if (hdr->hdrlen & 0x01) {
458 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
459 IPSTATS_MIB_INHDRERRORS);
460 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
461 ((&hdr->hdrlen) -
462 skb_network_header(skb)));
463 return -1;
464 }
465 break;
466#ifdef CONFIG_IPV6_MIP6
467 case IPV6_SRCRT_TYPE_2: 431 case IPV6_SRCRT_TYPE_2:
432 if (accept_source_route < 0)
433 goto unknown_rh;
468 /* Silently discard invalid RTH type 2 */ 434 /* Silently discard invalid RTH type 2 */
469 if (hdr->hdrlen != 2 || hdr->segments_left != 1) { 435 if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
470 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 436 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
@@ -474,6 +440,8 @@ looped_back:
474 } 440 }
475 break; 441 break;
476#endif 442#endif
443 default:
444 goto unknown_rh;
477 } 445 }
478 446
479 /* 447 /*
@@ -520,7 +488,7 @@ looped_back:
520 addr += i - 1; 488 addr += i - 1;
521 489
522 switch (hdr->type) { 490 switch (hdr->type) {
523#ifdef CONFIG_IPV6_MIP6 491#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
524 case IPV6_SRCRT_TYPE_2: 492 case IPV6_SRCRT_TYPE_2:
525 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, 493 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
526 (xfrm_address_t *)&ipv6_hdr(skb)->saddr, 494 (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
@@ -577,6 +545,12 @@ looped_back:
577 skb_push(skb, skb->data - skb_network_header(skb)); 545 skb_push(skb, skb->data - skb_network_header(skb));
578 dst_input(skb); 546 dst_input(skb);
579 return -1; 547 return -1;
548
549unknown_rh:
550 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
551 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
552 (&hdr->type) - skb_network_header(skb));
553 return -1;
580} 554}
581 555
582static struct inet6_protocol rthdr_protocol = { 556static struct inet6_protocol rthdr_protocol = {
@@ -590,72 +564,6 @@ void __init ipv6_rthdr_init(void)
590 printk(KERN_ERR "ipv6_rthdr_init: Could not register protocol\n"); 564 printk(KERN_ERR "ipv6_rthdr_init: Could not register protocol\n");
591}; 565};
592 566
593/*
594 This function inverts received rthdr.
595 NOTE: specs allow to make it automatically only if
596 packet authenticated.
597
598 I will not discuss it here (though, I am really pissed off at
599 this stupid requirement making rthdr idea useless)
600
601 Actually, it creates severe problems for us.
602 Embryonic requests has no associated sockets,
603 so that user have no control over it and
604 cannot not only to set reply options, but
605 even to know, that someone wants to connect
606 without success. :-(
607
608 For now we need to test the engine, so that I created
609 temporary (or permanent) backdoor.
610 If listening socket set IPV6_RTHDR to 2, then we invert header.
611 --ANK (980729)
612 */
613
614struct ipv6_txoptions *
615ipv6_invert_rthdr(struct sock *sk, struct ipv6_rt_hdr *hdr)
616{
617 /* Received rthdr:
618
619 [ H1 -> H2 -> ... H_prev ] daddr=ME
620
621 Inverted result:
622 [ H_prev -> ... -> H1 ] daddr =sender
623
624 Note, that IP output engine will rewrite this rthdr
625 by rotating it left by one addr.
626 */
627
628 int n, i;
629 struct rt0_hdr *rthdr = (struct rt0_hdr*)hdr;
630 struct rt0_hdr *irthdr;
631 struct ipv6_txoptions *opt;
632 int hdrlen = ipv6_optlen(hdr);
633
634 if (hdr->segments_left ||
635 hdr->type != IPV6_SRCRT_TYPE_0 ||
636 hdr->hdrlen & 0x01)
637 return NULL;
638
639 n = hdr->hdrlen >> 1;
640 opt = sock_kmalloc(sk, sizeof(*opt) + hdrlen, GFP_ATOMIC);
641 if (opt == NULL)
642 return NULL;
643 memset(opt, 0, sizeof(*opt));
644 opt->tot_len = sizeof(*opt) + hdrlen;
645 opt->srcrt = (void*)(opt+1);
646 opt->opt_nflen = hdrlen;
647
648 memcpy(opt->srcrt, hdr, sizeof(*hdr));
649 irthdr = (struct rt0_hdr*)opt->srcrt;
650 irthdr->reserved = 0;
651 opt->srcrt->segments_left = n;
652 for (i=0; i<n; i++)
653 memcpy(irthdr->addr+i, rthdr->addr+(n-1-i), 16);
654 return opt;
655}
656
657EXPORT_SYMBOL_GPL(ipv6_invert_rthdr);
658
659/********************************** 567/**********************************
660 Hop-by-hop options. 568 Hop-by-hop options.
661 **********************************/ 569 **********************************/
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index e9bcce9e7bdf..4765a29f98a8 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -272,7 +272,7 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
272 return 0; 272 return 0;
273} 273}
274 274
275#ifdef CONFIG_IPV6_MIP6 275#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
276static void mip6_addr_swap(struct sk_buff *skb) 276static void mip6_addr_swap(struct sk_buff *skb)
277{ 277{
278 struct ipv6hdr *iph = ipv6_hdr(skb); 278 struct ipv6hdr *iph = ipv6_hdr(skb);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index c206a152ed9d..413a4ebb195c 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -648,7 +648,7 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v)
648 return 0; 648 return 0;
649} 649}
650 650
651static struct seq_operations ip6fl_seq_ops = { 651static const struct seq_operations ip6fl_seq_ops = {
652 .start = ip6fl_seq_start, 652 .start = ip6fl_seq_start,
653 .next = ip6fl_seq_next, 653 .next = ip6fl_seq_next,
654 .stop = ip6fl_seq_stop, 654 .stop = ip6fl_seq_stop,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4704b5fc3085..50d86e94d9ed 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -521,6 +521,10 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
521 to->tc_index = from->tc_index; 521 to->tc_index = from->tc_index;
522#endif 522#endif
523 nf_copy(to, from); 523 nf_copy(to, from);
524#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
525 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
526 to->nf_trace = from->nf_trace;
527#endif
524 skb_copy_secmark(to, from); 528 skb_copy_secmark(to, from);
525} 529}
526 530
@@ -543,7 +547,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
543 found_rhdr = 1; 547 found_rhdr = 1;
544 break; 548 break;
545 case NEXTHDR_DEST: 549 case NEXTHDR_DEST:
546#ifdef CONFIG_IPV6_MIP6 550#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
547 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) 551 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
548 break; 552 break;
549#endif 553#endif
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a0902fbdb4e1..281aee42d3f0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -883,8 +883,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
883 */ 883 */
884 max_headroom += LL_RESERVED_SPACE(tdev); 884 max_headroom += LL_RESERVED_SPACE(tdev);
885 885
886 if (skb_headroom(skb) < max_headroom || 886 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
887 skb_cloned(skb) || skb_shared(skb)) { 887 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
888 struct sk_buff *new_skb; 888 struct sk_buff *new_skb;
889 889
890 if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) 890 if (!(new_skb = skb_realloc_headroom(skb, max_headroom)))
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 1ee50b5782e1..473f165310ea 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -500,4 +500,4 @@ MODULE_LICENSE("GPL");
500MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) for IPv6 - RFC3173"); 500MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) for IPv6 - RFC3173");
501MODULE_AUTHOR("Mitsuru KANDA <mk@linux-ipv6.org>"); 501MODULE_AUTHOR("Mitsuru KANDA <mk@linux-ipv6.org>");
502 502
503 503MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_COMP);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index aa3d07c52a8f..d6846393182d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -123,7 +123,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
123 struct ipv6hdr *ipv6h; 123 struct ipv6hdr *ipv6h;
124 struct inet6_protocol *ops; 124 struct inet6_protocol *ops;
125 125
126 if (!(features & NETIF_F_HW_CSUM)) 126 if (!(features & NETIF_F_V6_CSUM))
127 features &= ~NETIF_F_SG; 127 features &= ~NETIF_F_SG;
128 128
129 if (unlikely(skb_shinfo(skb)->gso_type & 129 if (unlikely(skb_shinfo(skb)->gso_type &
@@ -336,16 +336,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
336 break; 336 break;
337 337
338 case IPV6_RECVRTHDR: 338 case IPV6_RECVRTHDR:
339 if (val < 0 || val > 2) 339 np->rxopt.bits.srcrt = valbool;
340 goto e_inval;
341 np->rxopt.bits.srcrt = val;
342 retv = 0; 340 retv = 0;
343 break; 341 break;
344 342
345 case IPV6_2292RTHDR: 343 case IPV6_2292RTHDR:
346 if (val < 0 || val > 2) 344 np->rxopt.bits.osrcrt = valbool;
347 goto e_inval;
348 np->rxopt.bits.osrcrt = val;
349 retv = 0; 345 retv = 0;
350 break; 346 break;
351 347
@@ -416,11 +412,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
416 if (optname == IPV6_RTHDR && opt && opt->srcrt) { 412 if (optname == IPV6_RTHDR && opt && opt->srcrt) {
417 struct ipv6_rt_hdr *rthdr = opt->srcrt; 413 struct ipv6_rt_hdr *rthdr = opt->srcrt;
418 switch (rthdr->type) { 414 switch (rthdr->type) {
419 case IPV6_SRCRT_TYPE_0: 415#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
420#ifdef CONFIG_IPV6_MIP6
421 case IPV6_SRCRT_TYPE_2: 416 case IPV6_SRCRT_TYPE_2:
422#endif
423 break; 417 break;
418#endif
424 default: 419 default:
425 goto sticky_done; 420 goto sticky_done;
426 } 421 }
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 3e308fb41b49..ae9881832a7e 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2423,7 +2423,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2423 return 0; 2423 return 0;
2424} 2424}
2425 2425
2426static struct seq_operations igmp6_mc_seq_ops = { 2426static const struct seq_operations igmp6_mc_seq_ops = {
2427 .start = igmp6_mc_seq_start, 2427 .start = igmp6_mc_seq_start,
2428 .next = igmp6_mc_seq_next, 2428 .next = igmp6_mc_seq_next,
2429 .stop = igmp6_mc_seq_stop, 2429 .stop = igmp6_mc_seq_stop,
@@ -2597,7 +2597,7 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2597 return 0; 2597 return 0;
2598} 2598}
2599 2599
2600static struct seq_operations igmp6_mcf_seq_ops = { 2600static const struct seq_operations igmp6_mcf_seq_ops = {
2601 .start = igmp6_mcf_seq_start, 2601 .start = igmp6_mcf_seq_start,
2602 .next = igmp6_mcf_seq_next, 2602 .next = igmp6_mcf_seq_next,
2603 .stop = igmp6_mcf_seq_stop, 2603 .stop = igmp6_mcf_seq_stop,
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 13b7160fb892..8a1399ce38ce 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -30,6 +30,7 @@
30#include <net/sock.h> 30#include <net/sock.h>
31#include <net/ipv6.h> 31#include <net/ipv6.h>
32#include <net/ip6_checksum.h> 32#include <net/ip6_checksum.h>
33#include <net/rawv6.h>
33#include <net/xfrm.h> 34#include <net/xfrm.h>
34#include <net/mip6.h> 35#include <net/mip6.h>
35 36
@@ -86,7 +87,7 @@ static int mip6_mh_len(int type)
86 return len; 87 return len;
87} 88}
88 89
89int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) 90static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
90{ 91{
91 struct ip6_mh *mh; 92 struct ip6_mh *mh;
92 93
@@ -471,7 +472,7 @@ static struct xfrm_type mip6_rthdr_type =
471 .remote_addr = mip6_xfrm_addr, 472 .remote_addr = mip6_xfrm_addr,
472}; 473};
473 474
474int __init mip6_init(void) 475static int __init mip6_init(void)
475{ 476{
476 printk(KERN_INFO "Mobile IPv6\n"); 477 printk(KERN_INFO "Mobile IPv6\n");
477 478
@@ -483,18 +484,35 @@ int __init mip6_init(void)
483 printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __FUNCTION__); 484 printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __FUNCTION__);
484 goto mip6_rthdr_xfrm_fail; 485 goto mip6_rthdr_xfrm_fail;
485 } 486 }
487 if (rawv6_mh_filter_register(mip6_mh_filter) < 0) {
488 printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __FUNCTION__);
489 goto mip6_rawv6_mh_fail;
490 }
491
492
486 return 0; 493 return 0;
487 494
495 mip6_rawv6_mh_fail:
496 xfrm_unregister_type(&mip6_rthdr_type, AF_INET6);
488 mip6_rthdr_xfrm_fail: 497 mip6_rthdr_xfrm_fail:
489 xfrm_unregister_type(&mip6_destopt_type, AF_INET6); 498 xfrm_unregister_type(&mip6_destopt_type, AF_INET6);
490 mip6_destopt_xfrm_fail: 499 mip6_destopt_xfrm_fail:
491 return -EAGAIN; 500 return -EAGAIN;
492} 501}
493 502
494void __exit mip6_fini(void) 503static void __exit mip6_fini(void)
495{ 504{
505 if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0)
506 printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __FUNCTION__);
496 if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0) 507 if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0)
497 printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __FUNCTION__); 508 printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __FUNCTION__);
498 if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0) 509 if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0)
499 printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __FUNCTION__); 510 printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __FUNCTION__);
500} 511}
512
513module_init(mip6_init);
514module_exit(mip6_fini);
515
516MODULE_LICENSE("GPL");
517MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_DSTOPTS);
518MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ROUTING);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9aa624026688..254c769b750a 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -96,13 +96,13 @@ ip6t_ext_hdr(u8 nexthdr)
96} 96}
97 97
98/* Returns whether matches rule or not. */ 98/* Returns whether matches rule or not. */
99static inline int 99static inline bool
100ip6_packet_match(const struct sk_buff *skb, 100ip6_packet_match(const struct sk_buff *skb,
101 const char *indev, 101 const char *indev,
102 const char *outdev, 102 const char *outdev,
103 const struct ip6t_ip6 *ip6info, 103 const struct ip6t_ip6 *ip6info,
104 unsigned int *protoff, 104 unsigned int *protoff,
105 int *fragoff, int *hotdrop) 105 int *fragoff, bool *hotdrop)
106{ 106{
107 size_t i; 107 size_t i;
108 unsigned long ret; 108 unsigned long ret;
@@ -122,7 +122,7 @@ ip6_packet_match(const struct sk_buff *skb,
122 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, 122 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
123 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, 123 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
124 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ 124 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
125 return 0; 125 return false;
126 } 126 }
127 127
128 /* Look for ifname matches; this should unroll nicely. */ 128 /* Look for ifname matches; this should unroll nicely. */
@@ -136,7 +136,7 @@ ip6_packet_match(const struct sk_buff *skb,
136 dprintf("VIA in mismatch (%s vs %s).%s\n", 136 dprintf("VIA in mismatch (%s vs %s).%s\n",
137 indev, ip6info->iniface, 137 indev, ip6info->iniface,
138 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":""); 138 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
139 return 0; 139 return false;
140 } 140 }
141 141
142 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) { 142 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
@@ -149,7 +149,7 @@ ip6_packet_match(const struct sk_buff *skb,
149 dprintf("VIA out mismatch (%s vs %s).%s\n", 149 dprintf("VIA out mismatch (%s vs %s).%s\n",
150 outdev, ip6info->outiface, 150 outdev, ip6info->outiface,
151 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":""); 151 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
152 return 0; 152 return false;
153 } 153 }
154 154
155/* ... might want to do something with class and flowlabel here ... */ 155/* ... might want to do something with class and flowlabel here ... */
@@ -162,8 +162,8 @@ ip6_packet_match(const struct sk_buff *skb,
162 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); 162 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
163 if (protohdr < 0) { 163 if (protohdr < 0) {
164 if (_frag_off == 0) 164 if (_frag_off == 0)
165 *hotdrop = 1; 165 *hotdrop = true;
166 return 0; 166 return false;
167 } 167 }
168 *fragoff = _frag_off; 168 *fragoff = _frag_off;
169 169
@@ -174,34 +174,34 @@ ip6_packet_match(const struct sk_buff *skb,
174 174
175 if (ip6info->proto == protohdr) { 175 if (ip6info->proto == protohdr) {
176 if(ip6info->invflags & IP6T_INV_PROTO) { 176 if(ip6info->invflags & IP6T_INV_PROTO) {
177 return 0; 177 return false;
178 } 178 }
179 return 1; 179 return true;
180 } 180 }
181 181
182 /* We need match for the '-p all', too! */ 182 /* We need match for the '-p all', too! */
183 if ((ip6info->proto != 0) && 183 if ((ip6info->proto != 0) &&
184 !(ip6info->invflags & IP6T_INV_PROTO)) 184 !(ip6info->invflags & IP6T_INV_PROTO))
185 return 0; 185 return false;
186 } 186 }
187 return 1; 187 return true;
188} 188}
189 189
190/* should be ip6 safe */ 190/* should be ip6 safe */
191static inline int 191static inline bool
192ip6_checkentry(const struct ip6t_ip6 *ipv6) 192ip6_checkentry(const struct ip6t_ip6 *ipv6)
193{ 193{
194 if (ipv6->flags & ~IP6T_F_MASK) { 194 if (ipv6->flags & ~IP6T_F_MASK) {
195 duprintf("Unknown flag bits set: %08X\n", 195 duprintf("Unknown flag bits set: %08X\n",
196 ipv6->flags & ~IP6T_F_MASK); 196 ipv6->flags & ~IP6T_F_MASK);
197 return 0; 197 return false;
198 } 198 }
199 if (ipv6->invflags & ~IP6T_INV_MASK) { 199 if (ipv6->invflags & ~IP6T_INV_MASK) {
200 duprintf("Unknown invflag bits set: %08X\n", 200 duprintf("Unknown invflag bits set: %08X\n",
201 ipv6->invflags & ~IP6T_INV_MASK); 201 ipv6->invflags & ~IP6T_INV_MASK);
202 return 0; 202 return false;
203 } 203 }
204 return 1; 204 return true;
205} 205}
206 206
207static unsigned int 207static unsigned int
@@ -219,20 +219,20 @@ ip6t_error(struct sk_buff **pskb,
219} 219}
220 220
221static inline 221static inline
222int do_match(struct ip6t_entry_match *m, 222bool do_match(struct ip6t_entry_match *m,
223 const struct sk_buff *skb, 223 const struct sk_buff *skb,
224 const struct net_device *in, 224 const struct net_device *in,
225 const struct net_device *out, 225 const struct net_device *out,
226 int offset, 226 int offset,
227 unsigned int protoff, 227 unsigned int protoff,
228 int *hotdrop) 228 bool *hotdrop)
229{ 229{
230 /* Stop iteration if it doesn't match */ 230 /* Stop iteration if it doesn't match */
231 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data, 231 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
232 offset, protoff, hotdrop)) 232 offset, protoff, hotdrop))
233 return 1; 233 return true;
234 else 234 else
235 return 0; 235 return false;
236} 236}
237 237
238static inline struct ip6t_entry * 238static inline struct ip6t_entry *
@@ -241,6 +241,113 @@ get_entry(void *base, unsigned int offset)
241 return (struct ip6t_entry *)(base + offset); 241 return (struct ip6t_entry *)(base + offset);
242} 242}
243 243
244/* All zeroes == unconditional rule. */
245static inline int
246unconditional(const struct ip6t_ip6 *ipv6)
247{
248 unsigned int i;
249
250 for (i = 0; i < sizeof(*ipv6); i++)
251 if (((char *)ipv6)[i])
252 break;
253
254 return (i == sizeof(*ipv6));
255}
256
257#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
258 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
259/* This cries for unification! */
260static const char *hooknames[] = {
261 [NF_IP6_PRE_ROUTING] = "PREROUTING",
262 [NF_IP6_LOCAL_IN] = "INPUT",
263 [NF_IP6_FORWARD] = "FORWARD",
264 [NF_IP6_LOCAL_OUT] = "OUTPUT",
265 [NF_IP6_POST_ROUTING] = "POSTROUTING",
266};
267
268enum nf_ip_trace_comments {
269 NF_IP6_TRACE_COMMENT_RULE,
270 NF_IP6_TRACE_COMMENT_RETURN,
271 NF_IP6_TRACE_COMMENT_POLICY,
272};
273
274static const char *comments[] = {
275 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
276 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
277 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
278};
279
280static struct nf_loginfo trace_loginfo = {
281 .type = NF_LOG_TYPE_LOG,
282 .u = {
283 .log = {
284 .level = 4,
285 .logflags = NF_LOG_MASK,
286 },
287 },
288};
289
290static inline int
291get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
292 char *hookname, char **chainname,
293 char **comment, unsigned int *rulenum)
294{
295 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
296
297 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
298 /* Head of user chain: ERROR target with chainname */
299 *chainname = t->target.data;
300 (*rulenum) = 0;
301 } else if (s == e) {
302 (*rulenum)++;
303
304 if (s->target_offset == sizeof(struct ip6t_entry)
305 && strcmp(t->target.u.kernel.target->name,
306 IP6T_STANDARD_TARGET) == 0
307 && t->verdict < 0
308 && unconditional(&s->ipv6)) {
309 /* Tail of chains: STANDARD target (return/policy) */
310 *comment = *chainname == hookname
311 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
312 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
313 }
314 return 1;
315 } else
316 (*rulenum)++;
317
318 return 0;
319}
320
321static void trace_packet(struct sk_buff *skb,
322 unsigned int hook,
323 const struct net_device *in,
324 const struct net_device *out,
325 char *tablename,
326 struct xt_table_info *private,
327 struct ip6t_entry *e)
328{
329 void *table_base;
330 struct ip6t_entry *root;
331 char *hookname, *chainname, *comment;
332 unsigned int rulenum = 0;
333
334 table_base = (void *)private->entries[smp_processor_id()];
335 root = get_entry(table_base, private->hook_entry[hook]);
336
337 hookname = chainname = (char *)hooknames[hook];
338 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
339
340 IP6T_ENTRY_ITERATE(root,
341 private->size - private->hook_entry[hook],
342 get_chainname_rulenum,
343 e, hookname, &chainname, &comment, &rulenum);
344
345 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
346 "TRACE: %s:%s:%s:%u ",
347 tablename, chainname, comment, rulenum);
348}
349#endif
350
244/* Returns one of the generic firewall policies, like NF_ACCEPT. */ 351/* Returns one of the generic firewall policies, like NF_ACCEPT. */
245unsigned int 352unsigned int
246ip6t_do_table(struct sk_buff **pskb, 353ip6t_do_table(struct sk_buff **pskb,
@@ -252,7 +359,7 @@ ip6t_do_table(struct sk_buff **pskb,
252 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 359 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
253 int offset = 0; 360 int offset = 0;
254 unsigned int protoff = 0; 361 unsigned int protoff = 0;
255 int hotdrop = 0; 362 bool hotdrop = false;
256 /* Initializing verdict to NF_DROP keeps gcc happy. */ 363 /* Initializing verdict to NF_DROP keeps gcc happy. */
257 unsigned int verdict = NF_DROP; 364 unsigned int verdict = NF_DROP;
258 const char *indev, *outdev; 365 const char *indev, *outdev;
@@ -298,6 +405,14 @@ ip6t_do_table(struct sk_buff **pskb,
298 405
299 t = ip6t_get_target(e); 406 t = ip6t_get_target(e);
300 IP_NF_ASSERT(t->u.kernel.target); 407 IP_NF_ASSERT(t->u.kernel.target);
408
409#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
410 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
411 /* The packet is traced: log it */
412 if (unlikely((*pskb)->nf_trace))
413 trace_packet(*pskb, hook, in, out,
414 table->name, private, e);
415#endif
301 /* Standard target? */ 416 /* Standard target? */
302 if (!t->u.kernel.target->target) { 417 if (!t->u.kernel.target->target) {
303 int v; 418 int v;
@@ -377,19 +492,6 @@ ip6t_do_table(struct sk_buff **pskb,
377#endif 492#endif
378} 493}
379 494
380/* All zeroes == unconditional rule. */
381static inline int
382unconditional(const struct ip6t_ip6 *ipv6)
383{
384 unsigned int i;
385
386 for (i = 0; i < sizeof(*ipv6); i++)
387 if (((char *)ipv6)[i])
388 break;
389
390 return (i == sizeof(*ipv6));
391}
392
393/* Figures out from what hook each rule can be called: returns 0 if 495/* Figures out from what hook each rule can be called: returns 0 if
394 there are loops. Puts hook bitmask in comefrom. */ 496 there are loops. Puts hook bitmask in comefrom. */
395static int 497static int
@@ -1282,16 +1384,16 @@ void ip6t_unregister_table(struct xt_table *table)
1282} 1384}
1283 1385
1284/* Returns 1 if the type and code is matched by the range, 0 otherwise */ 1386/* Returns 1 if the type and code is matched by the range, 0 otherwise */
1285static inline int 1387static inline bool
1286icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, 1388icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1287 u_int8_t type, u_int8_t code, 1389 u_int8_t type, u_int8_t code,
1288 int invert) 1390 bool invert)
1289{ 1391{
1290 return (type == test_type && code >= min_code && code <= max_code) 1392 return (type == test_type && code >= min_code && code <= max_code)
1291 ^ invert; 1393 ^ invert;
1292} 1394}
1293 1395
1294static int 1396static bool
1295icmp6_match(const struct sk_buff *skb, 1397icmp6_match(const struct sk_buff *skb,
1296 const struct net_device *in, 1398 const struct net_device *in,
1297 const struct net_device *out, 1399 const struct net_device *out,
@@ -1299,22 +1401,22 @@ icmp6_match(const struct sk_buff *skb,
1299 const void *matchinfo, 1401 const void *matchinfo,
1300 int offset, 1402 int offset,
1301 unsigned int protoff, 1403 unsigned int protoff,
1302 int *hotdrop) 1404 bool *hotdrop)
1303{ 1405{
1304 struct icmp6hdr _icmp, *ic; 1406 struct icmp6hdr _icmp, *ic;
1305 const struct ip6t_icmp *icmpinfo = matchinfo; 1407 const struct ip6t_icmp *icmpinfo = matchinfo;
1306 1408
1307 /* Must not be a fragment. */ 1409 /* Must not be a fragment. */
1308 if (offset) 1410 if (offset)
1309 return 0; 1411 return false;
1310 1412
1311 ic = skb_header_pointer(skb, protoff, sizeof(_icmp), &_icmp); 1413 ic = skb_header_pointer(skb, protoff, sizeof(_icmp), &_icmp);
1312 if (ic == NULL) { 1414 if (ic == NULL) {
1313 /* We've been asked to examine this packet, and we 1415 /* We've been asked to examine this packet, and we
1314 can't. Hence, no choice but to drop. */ 1416 can't. Hence, no choice but to drop. */
1315 duprintf("Dropping evil ICMP tinygram.\n"); 1417 duprintf("Dropping evil ICMP tinygram.\n");
1316 *hotdrop = 1; 1418 *hotdrop = true;
1317 return 0; 1419 return false;
1318 } 1420 }
1319 1421
1320 return icmp6_type_code_match(icmpinfo->type, 1422 return icmp6_type_code_match(icmpinfo->type,
@@ -1325,7 +1427,7 @@ icmp6_match(const struct sk_buff *skb,
1325} 1427}
1326 1428
1327/* Called when user tries to insert an entry of this type. */ 1429/* Called when user tries to insert an entry of this type. */
1328static int 1430static bool
1329icmp6_checkentry(const char *tablename, 1431icmp6_checkentry(const char *tablename,
1330 const void *entry, 1432 const void *entry,
1331 const struct xt_match *match, 1433 const struct xt_match *match,
@@ -1339,13 +1441,13 @@ icmp6_checkentry(const char *tablename,
1339} 1441}
1340 1442
1341/* The built-in targets: standard (NULL) and error. */ 1443/* The built-in targets: standard (NULL) and error. */
1342static struct xt_target ip6t_standard_target = { 1444static struct xt_target ip6t_standard_target __read_mostly = {
1343 .name = IP6T_STANDARD_TARGET, 1445 .name = IP6T_STANDARD_TARGET,
1344 .targetsize = sizeof(int), 1446 .targetsize = sizeof(int),
1345 .family = AF_INET6, 1447 .family = AF_INET6,
1346}; 1448};
1347 1449
1348static struct xt_target ip6t_error_target = { 1450static struct xt_target ip6t_error_target __read_mostly = {
1349 .name = IP6T_ERROR_TARGET, 1451 .name = IP6T_ERROR_TARGET,
1350 .target = ip6t_error, 1452 .target = ip6t_error,
1351 .targetsize = IP6T_FUNCTION_MAXNAMELEN, 1453 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
@@ -1362,7 +1464,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
1362 .get = do_ip6t_get_ctl, 1464 .get = do_ip6t_get_ctl,
1363}; 1465};
1364 1466
1365static struct xt_match icmp6_matchstruct = { 1467static struct xt_match icmp6_matchstruct __read_mostly = {
1366 .name = "icmp6", 1468 .name = "icmp6",
1367 .match = &icmp6_match, 1469 .match = &icmp6_match,
1368 .matchsize = sizeof(struct ip6t_icmp), 1470 .matchsize = sizeof(struct ip6t_icmp),
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c
index 4115a576ba25..ad4d94310b87 100644
--- a/net/ipv6/netfilter/ip6t_HL.c
+++ b/net/ipv6/netfilter/ip6t_HL.c
@@ -58,28 +58,28 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb,
58 return XT_CONTINUE; 58 return XT_CONTINUE;
59} 59}
60 60
61static int ip6t_hl_checkentry(const char *tablename, 61static bool ip6t_hl_checkentry(const char *tablename,
62 const void *entry, 62 const void *entry,
63 const struct xt_target *target, 63 const struct xt_target *target,
64 void *targinfo, 64 void *targinfo,
65 unsigned int hook_mask) 65 unsigned int hook_mask)
66{ 66{
67 struct ip6t_HL_info *info = targinfo; 67 const struct ip6t_HL_info *info = targinfo;
68 68
69 if (info->mode > IP6T_HL_MAXMODE) { 69 if (info->mode > IP6T_HL_MAXMODE) {
70 printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n", 70 printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n",
71 info->mode); 71 info->mode);
72 return 0; 72 return false;
73 } 73 }
74 if ((info->mode != IP6T_HL_SET) && (info->hop_limit == 0)) { 74 if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
75 printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't " 75 printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't "
76 "make sense with value 0\n"); 76 "make sense with value 0\n");
77 return 0; 77 return false;
78 } 78 }
79 return 1; 79 return true;
80} 80}
81 81
82static struct xt_target ip6t_HL = { 82static struct xt_target ip6t_HL __read_mostly = {
83 .name = "HL", 83 .name = "HL",
84 .family = AF_INET6, 84 .family = AF_INET6,
85 .target = ip6t_hl_target, 85 .target = ip6t_hl_target,
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 5bb9cd349350..b05327ebd332 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -32,12 +32,6 @@ struct in_device;
32#include <net/route.h> 32#include <net/route.h>
33#include <linux/netfilter_ipv6/ip6t_LOG.h> 33#include <linux/netfilter_ipv6/ip6t_LOG.h>
34 34
35#if 0
36#define DEBUGP printk
37#else
38#define DEBUGP(format, args...)
39#endif
40
41/* Use lock to serialize, so printks don't overlap */ 35/* Use lock to serialize, so printks don't overlap */
42static DEFINE_SPINLOCK(log_lock); 36static DEFINE_SPINLOCK(log_lock);
43 37
@@ -48,7 +42,8 @@ static void dump_packet(const struct nf_loginfo *info,
48{ 42{
49 u_int8_t currenthdr; 43 u_int8_t currenthdr;
50 int fragment; 44 int fragment;
51 struct ipv6hdr _ip6h, *ih; 45 struct ipv6hdr _ip6h;
46 const struct ipv6hdr *ih;
52 unsigned int ptr; 47 unsigned int ptr;
53 unsigned int hdrlen = 0; 48 unsigned int hdrlen = 0;
54 unsigned int logflags; 49 unsigned int logflags;
@@ -78,7 +73,8 @@ static void dump_packet(const struct nf_loginfo *info,
78 ptr = ip6hoff + sizeof(struct ipv6hdr); 73 ptr = ip6hoff + sizeof(struct ipv6hdr);
79 currenthdr = ih->nexthdr; 74 currenthdr = ih->nexthdr;
80 while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) { 75 while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
81 struct ipv6_opt_hdr _hdr, *hp; 76 struct ipv6_opt_hdr _hdr;
77 const struct ipv6_opt_hdr *hp;
82 78
83 hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); 79 hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
84 if (hp == NULL) { 80 if (hp == NULL) {
@@ -92,7 +88,8 @@ static void dump_packet(const struct nf_loginfo *info,
92 88
93 switch (currenthdr) { 89 switch (currenthdr) {
94 case IPPROTO_FRAGMENT: { 90 case IPPROTO_FRAGMENT: {
95 struct frag_hdr _fhdr, *fh; 91 struct frag_hdr _fhdr;
92 const struct frag_hdr *fh;
96 93
97 printk("FRAG:"); 94 printk("FRAG:");
98 fh = skb_header_pointer(skb, ptr, sizeof(_fhdr), 95 fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
@@ -131,7 +128,8 @@ static void dump_packet(const struct nf_loginfo *info,
131 /* Max Length */ 128 /* Max Length */
132 case IPPROTO_AH: 129 case IPPROTO_AH:
133 if (logflags & IP6T_LOG_IPOPT) { 130 if (logflags & IP6T_LOG_IPOPT) {
134 struct ip_auth_hdr _ahdr, *ah; 131 struct ip_auth_hdr _ahdr;
132 const struct ip_auth_hdr *ah;
135 133
136 /* Max length: 3 "AH " */ 134 /* Max length: 3 "AH " */
137 printk("AH "); 135 printk("AH ");
@@ -162,7 +160,8 @@ static void dump_packet(const struct nf_loginfo *info,
162 break; 160 break;
163 case IPPROTO_ESP: 161 case IPPROTO_ESP:
164 if (logflags & IP6T_LOG_IPOPT) { 162 if (logflags & IP6T_LOG_IPOPT) {
165 struct ip_esp_hdr _esph, *eh; 163 struct ip_esp_hdr _esph;
164 const struct ip_esp_hdr *eh;
166 165
167 /* Max length: 4 "ESP " */ 166 /* Max length: 4 "ESP " */
168 printk("ESP "); 167 printk("ESP ");
@@ -202,7 +201,8 @@ static void dump_packet(const struct nf_loginfo *info,
202 201
203 switch (currenthdr) { 202 switch (currenthdr) {
204 case IPPROTO_TCP: { 203 case IPPROTO_TCP: {
205 struct tcphdr _tcph, *th; 204 struct tcphdr _tcph;
205 const struct tcphdr *th;
206 206
207 /* Max length: 10 "PROTO=TCP " */ 207 /* Max length: 10 "PROTO=TCP " */
208 printk("PROTO=TCP "); 208 printk("PROTO=TCP ");
@@ -250,7 +250,8 @@ static void dump_packet(const struct nf_loginfo *info,
250 250
251 if ((logflags & IP6T_LOG_TCPOPT) 251 if ((logflags & IP6T_LOG_TCPOPT)
252 && th->doff * 4 > sizeof(struct tcphdr)) { 252 && th->doff * 4 > sizeof(struct tcphdr)) {
253 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op; 253 u_int8_t _opt[60 - sizeof(struct tcphdr)];
254 const u_int8_t *op;
254 unsigned int i; 255 unsigned int i;
255 unsigned int optsize = th->doff * 4 256 unsigned int optsize = th->doff * 4
256 - sizeof(struct tcphdr); 257 - sizeof(struct tcphdr);
@@ -273,7 +274,8 @@ static void dump_packet(const struct nf_loginfo *info,
273 } 274 }
274 case IPPROTO_UDP: 275 case IPPROTO_UDP:
275 case IPPROTO_UDPLITE: { 276 case IPPROTO_UDPLITE: {
276 struct udphdr _udph, *uh; 277 struct udphdr _udph;
278 const struct udphdr *uh;
277 279
278 if (currenthdr == IPPROTO_UDP) 280 if (currenthdr == IPPROTO_UDP)
279 /* Max length: 10 "PROTO=UDP " */ 281 /* Max length: 10 "PROTO=UDP " */
@@ -298,7 +300,8 @@ static void dump_packet(const struct nf_loginfo *info,
298 break; 300 break;
299 } 301 }
300 case IPPROTO_ICMPV6: { 302 case IPPROTO_ICMPV6: {
301 struct icmp6hdr _icmp6h, *ic; 303 struct icmp6hdr _icmp6h;
304 const struct icmp6hdr *ic;
302 305
303 /* Max length: 13 "PROTO=ICMPv6 " */ 306 /* Max length: 13 "PROTO=ICMPv6 " */
304 printk("PROTO=ICMPv6 "); 307 printk("PROTO=ICMPv6 ");
@@ -448,27 +451,27 @@ ip6t_log_target(struct sk_buff **pskb,
448} 451}
449 452
450 453
451static int ip6t_log_checkentry(const char *tablename, 454static bool ip6t_log_checkentry(const char *tablename,
452 const void *entry, 455 const void *entry,
453 const struct xt_target *target, 456 const struct xt_target *target,
454 void *targinfo, 457 void *targinfo,
455 unsigned int hook_mask) 458 unsigned int hook_mask)
456{ 459{
457 const struct ip6t_log_info *loginfo = targinfo; 460 const struct ip6t_log_info *loginfo = targinfo;
458 461
459 if (loginfo->level >= 8) { 462 if (loginfo->level >= 8) {
460 DEBUGP("LOG: level %u >= 8\n", loginfo->level); 463 pr_debug("LOG: level %u >= 8\n", loginfo->level);
461 return 0; 464 return false;
462 } 465 }
463 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { 466 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
464 DEBUGP("LOG: prefix term %i\n", 467 pr_debug("LOG: prefix term %i\n",
465 loginfo->prefix[sizeof(loginfo->prefix)-1]); 468 loginfo->prefix[sizeof(loginfo->prefix)-1]);
466 return 0; 469 return false;
467 } 470 }
468 return 1; 471 return true;
469} 472}
470 473
471static struct xt_target ip6t_log_reg = { 474static struct xt_target ip6t_log_reg __read_mostly = {
472 .name = "LOG", 475 .name = "LOG",
473 .family = AF_INET6, 476 .family = AF_INET6,
474 .target = ip6t_log_target, 477 .target = ip6t_log_target,
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index cb3d2415a064..2f487cda3b6b 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -34,12 +34,6 @@ MODULE_AUTHOR("Yasuyuki KOZAKAI <yasuyuki.kozakai@toshiba.co.jp>");
34MODULE_DESCRIPTION("IP6 tables REJECT target module"); 34MODULE_DESCRIPTION("IP6 tables REJECT target module");
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36 36
37#if 0
38#define DEBUGP printk
39#else
40#define DEBUGP(format, args...)
41#endif
42
43/* Send RST reply */ 37/* Send RST reply */
44static void send_reset(struct sk_buff *oldskb) 38static void send_reset(struct sk_buff *oldskb)
45{ 39{
@@ -54,7 +48,7 @@ static void send_reset(struct sk_buff *oldskb)
54 48
55 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || 49 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
56 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { 50 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
57 DEBUGP("ip6t_REJECT: addr is not unicast.\n"); 51 pr_debug("ip6t_REJECT: addr is not unicast.\n");
58 return; 52 return;
59 } 53 }
60 54
@@ -62,16 +56,17 @@ static void send_reset(struct sk_buff *oldskb)
62 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto); 56 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
63 57
64 if ((tcphoff < 0) || (tcphoff > oldskb->len)) { 58 if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
65 DEBUGP("ip6t_REJECT: Can't get TCP header.\n"); 59 pr_debug("ip6t_REJECT: Can't get TCP header.\n");
66 return; 60 return;
67 } 61 }
68 62
69 otcplen = oldskb->len - tcphoff; 63 otcplen = oldskb->len - tcphoff;
70 64
71 /* IP header checks: fragment, too short. */ 65 /* IP header checks: fragment, too short. */
72 if ((proto != IPPROTO_TCP) || (otcplen < sizeof(struct tcphdr))) { 66 if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
73 DEBUGP("ip6t_REJECT: proto(%d) != IPPROTO_TCP, or too short. otcplen = %d\n", 67 pr_debug("ip6t_REJECT: proto(%d) != IPPROTO_TCP, "
74 proto, otcplen); 68 "or too short. otcplen = %d\n",
69 proto, otcplen);
75 return; 70 return;
76 } 71 }
77 72
@@ -80,14 +75,14 @@ static void send_reset(struct sk_buff *oldskb)
80 75
81 /* No RST for RST. */ 76 /* No RST for RST. */
82 if (otcph.rst) { 77 if (otcph.rst) {
83 DEBUGP("ip6t_REJECT: RST is set\n"); 78 pr_debug("ip6t_REJECT: RST is set\n");
84 return; 79 return;
85 } 80 }
86 81
87 /* Check checksum. */ 82 /* Check checksum. */
88 if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP, 83 if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
89 skb_checksum(oldskb, tcphoff, otcplen, 0))) { 84 skb_checksum(oldskb, tcphoff, otcplen, 0))) {
90 DEBUGP("ip6t_REJECT: TCP checksum is invalid\n"); 85 pr_debug("ip6t_REJECT: TCP checksum is invalid\n");
91 return; 86 return;
92 } 87 }
93 88
@@ -159,7 +154,7 @@ static void send_reset(struct sk_buff *oldskb)
159 tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, 154 tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
160 &ipv6_hdr(nskb)->daddr, 155 &ipv6_hdr(nskb)->daddr,
161 sizeof(struct tcphdr), IPPROTO_TCP, 156 sizeof(struct tcphdr), IPPROTO_TCP,
162 csum_partial((char *)tcph, 157 csum_partial(tcph,
163 sizeof(struct tcphdr), 0)); 158 sizeof(struct tcphdr), 0));
164 159
165 nf_ct_attach(nskb, oldskb); 160 nf_ct_attach(nskb, oldskb);
@@ -186,7 +181,7 @@ static unsigned int reject6_target(struct sk_buff **pskb,
186{ 181{
187 const struct ip6t_reject_info *reject = targinfo; 182 const struct ip6t_reject_info *reject = targinfo;
188 183
189 DEBUGP(KERN_DEBUG "%s: medium point\n", __FUNCTION__); 184 pr_debug("%s: medium point\n", __FUNCTION__);
190 /* WARNING: This code causes reentry within ip6tables. 185 /* WARNING: This code causes reentry within ip6tables.
191 This means that the ip6tables jump stack is now crap. We 186 This means that the ip6tables jump stack is now crap. We
192 must return an absolute verdict. --RR */ 187 must return an absolute verdict. --RR */
@@ -221,30 +216,30 @@ static unsigned int reject6_target(struct sk_buff **pskb,
221 return NF_DROP; 216 return NF_DROP;
222} 217}
223 218
224static int check(const char *tablename, 219static bool check(const char *tablename,
225 const void *entry, 220 const void *entry,
226 const struct xt_target *target, 221 const struct xt_target *target,
227 void *targinfo, 222 void *targinfo,
228 unsigned int hook_mask) 223 unsigned int hook_mask)
229{ 224{
230 const struct ip6t_reject_info *rejinfo = targinfo; 225 const struct ip6t_reject_info *rejinfo = targinfo;
231 const struct ip6t_entry *e = entry; 226 const struct ip6t_entry *e = entry;
232 227
233 if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { 228 if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
234 printk("ip6t_REJECT: ECHOREPLY is not supported.\n"); 229 printk("ip6t_REJECT: ECHOREPLY is not supported.\n");
235 return 0; 230 return false;
236 } else if (rejinfo->with == IP6T_TCP_RESET) { 231 } else if (rejinfo->with == IP6T_TCP_RESET) {
237 /* Must specify that it's a TCP packet */ 232 /* Must specify that it's a TCP packet */
238 if (e->ipv6.proto != IPPROTO_TCP 233 if (e->ipv6.proto != IPPROTO_TCP
239 || (e->ipv6.invflags & XT_INV_PROTO)) { 234 || (e->ipv6.invflags & XT_INV_PROTO)) {
240 DEBUGP("ip6t_REJECT: TCP_RESET illegal for non-tcp\n"); 235 printk("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
241 return 0; 236 return false;
242 } 237 }
243 } 238 }
244 return 1; 239 return true;
245} 240}
246 241
247static struct xt_target ip6t_reject_reg = { 242static struct xt_target ip6t_reject_reg __read_mostly = {
248 .name = "REJECT", 243 .name = "REJECT",
249 .family = AF_INET6, 244 .family = AF_INET6,
250 .target = reject6_target, 245 .target = reject6_target,
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index d3c154371b41..2a25fe25e0e0 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -23,25 +23,20 @@ MODULE_LICENSE("GPL");
23MODULE_DESCRIPTION("IPv6 AH match"); 23MODULE_DESCRIPTION("IPv6 AH match");
24MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); 24MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
25 25
26#if 0
27#define DEBUGP printk
28#else
29#define DEBUGP(format, args...)
30#endif
31
32/* Returns 1 if the spi is matched by the range, 0 otherwise */ 26/* Returns 1 if the spi is matched by the range, 0 otherwise */
33static inline int 27static inline bool
34spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) 28spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
35{ 29{
36 int r=0; 30 bool r;
37 DEBUGP("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', 31
38 min,spi,max); 32 pr_debug("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",
33 invert ? '!' : ' ', min, spi, max);
39 r = (spi >= min && spi <= max) ^ invert; 34 r = (spi >= min && spi <= max) ^ invert;
40 DEBUGP(" result %s\n",r? "PASS\n" : "FAILED\n"); 35 pr_debug(" result %s\n", r ? "PASS" : "FAILED");
41 return r; 36 return r;
42} 37}
43 38
44static int 39static bool
45match(const struct sk_buff *skb, 40match(const struct sk_buff *skb,
46 const struct net_device *in, 41 const struct net_device *in,
47 const struct net_device *out, 42 const struct net_device *out,
@@ -49,9 +44,10 @@ match(const struct sk_buff *skb,
49 const void *matchinfo, 44 const void *matchinfo,
50 int offset, 45 int offset,
51 unsigned int protoff, 46 unsigned int protoff,
52 int *hotdrop) 47 bool *hotdrop)
53{ 48{
54 struct ip_auth_hdr *ah, _ah; 49 struct ip_auth_hdr _ah;
50 const struct ip_auth_hdr *ah;
55 const struct ip6t_ah *ahinfo = matchinfo; 51 const struct ip6t_ah *ahinfo = matchinfo;
56 unsigned int ptr; 52 unsigned int ptr;
57 unsigned int hdrlen = 0; 53 unsigned int hdrlen = 0;
@@ -60,40 +56,40 @@ match(const struct sk_buff *skb,
60 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL); 56 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL);
61 if (err < 0) { 57 if (err < 0) {
62 if (err != -ENOENT) 58 if (err != -ENOENT)
63 *hotdrop = 1; 59 *hotdrop = true;
64 return 0; 60 return false;
65 } 61 }
66 62
67 ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah); 63 ah = skb_header_pointer(skb, ptr, sizeof(_ah), &_ah);
68 if (ah == NULL) { 64 if (ah == NULL) {
69 *hotdrop = 1; 65 *hotdrop = true;
70 return 0; 66 return false;
71 } 67 }
72 68
73 hdrlen = (ah->hdrlen + 2) << 2; 69 hdrlen = (ah->hdrlen + 2) << 2;
74 70
75 DEBUGP("IPv6 AH LEN %u %u ", hdrlen, ah->hdrlen); 71 pr_debug("IPv6 AH LEN %u %u ", hdrlen, ah->hdrlen);
76 DEBUGP("RES %04X ", ah->reserved); 72 pr_debug("RES %04X ", ah->reserved);
77 DEBUGP("SPI %u %08X\n", ntohl(ah->spi), ntohl(ah->spi)); 73 pr_debug("SPI %u %08X\n", ntohl(ah->spi), ntohl(ah->spi));
78 74
79 DEBUGP("IPv6 AH spi %02X ", 75 pr_debug("IPv6 AH spi %02X ",
80 (spi_match(ahinfo->spis[0], ahinfo->spis[1], 76 spi_match(ahinfo->spis[0], ahinfo->spis[1],
81 ntohl(ah->spi), 77 ntohl(ah->spi),
82 !!(ahinfo->invflags & IP6T_AH_INV_SPI)))); 78 !!(ahinfo->invflags & IP6T_AH_INV_SPI)));
83 DEBUGP("len %02X %04X %02X ", 79 pr_debug("len %02X %04X %02X ",
84 ahinfo->hdrlen, hdrlen, 80 ahinfo->hdrlen, hdrlen,
85 (!ahinfo->hdrlen || 81 (!ahinfo->hdrlen ||
86 (ahinfo->hdrlen == hdrlen) ^ 82 (ahinfo->hdrlen == hdrlen) ^
87 !!(ahinfo->invflags & IP6T_AH_INV_LEN))); 83 !!(ahinfo->invflags & IP6T_AH_INV_LEN)));
88 DEBUGP("res %02X %04X %02X\n", 84 pr_debug("res %02X %04X %02X\n",
89 ahinfo->hdrres, ah->reserved, 85 ahinfo->hdrres, ah->reserved,
90 !(ahinfo->hdrres && ah->reserved)); 86 !(ahinfo->hdrres && ah->reserved));
91 87
92 return (ah != NULL) 88 return (ah != NULL)
93 && 89 &&
94 (spi_match(ahinfo->spis[0], ahinfo->spis[1], 90 spi_match(ahinfo->spis[0], ahinfo->spis[1],
95 ntohl(ah->spi), 91 ntohl(ah->spi),
96 !!(ahinfo->invflags & IP6T_AH_INV_SPI))) 92 !!(ahinfo->invflags & IP6T_AH_INV_SPI))
97 && 93 &&
98 (!ahinfo->hdrlen || 94 (!ahinfo->hdrlen ||
99 (ahinfo->hdrlen == hdrlen) ^ 95 (ahinfo->hdrlen == hdrlen) ^
@@ -103,7 +99,7 @@ match(const struct sk_buff *skb,
103} 99}
104 100
105/* Called when user tries to insert an entry of this type. */ 101/* Called when user tries to insert an entry of this type. */
106static int 102static bool
107checkentry(const char *tablename, 103checkentry(const char *tablename,
108 const void *entry, 104 const void *entry,
109 const struct xt_match *match, 105 const struct xt_match *match,
@@ -113,13 +109,13 @@ checkentry(const char *tablename,
113 const struct ip6t_ah *ahinfo = matchinfo; 109 const struct ip6t_ah *ahinfo = matchinfo;
114 110
115 if (ahinfo->invflags & ~IP6T_AH_INV_MASK) { 111 if (ahinfo->invflags & ~IP6T_AH_INV_MASK) {
116 DEBUGP("ip6t_ah: unknown flags %X\n", ahinfo->invflags); 112 pr_debug("ip6t_ah: unknown flags %X\n", ahinfo->invflags);
117 return 0; 113 return false;
118 } 114 }
119 return 1; 115 return true;
120} 116}
121 117
122static struct xt_match ah_match = { 118static struct xt_match ah_match __read_mostly = {
123 .name = "ah", 119 .name = "ah",
124 .family = AF_INET6, 120 .family = AF_INET6,
125 .match = match, 121 .match = match,
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 0f3dd932f0a6..34ba150bfe5d 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -19,7 +19,7 @@ MODULE_DESCRIPTION("IPv6 EUI64 address checking match");
19MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
20MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); 20MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
21 21
22static int 22static bool
23match(const struct sk_buff *skb, 23match(const struct sk_buff *skb,
24 const struct net_device *in, 24 const struct net_device *in,
25 const struct net_device *out, 25 const struct net_device *out,
@@ -27,16 +27,16 @@ match(const struct sk_buff *skb,
27 const void *matchinfo, 27 const void *matchinfo,
28 int offset, 28 int offset,
29 unsigned int protoff, 29 unsigned int protoff,
30 int *hotdrop) 30 bool *hotdrop)
31{ 31{
32 unsigned char eui64[8]; 32 unsigned char eui64[8];
33 int i = 0; 33 int i = 0;
34 34
35 if (!(skb_mac_header(skb) >= skb->head && 35 if (!(skb_mac_header(skb) >= skb->head &&
36 (skb_mac_header(skb) + ETH_HLEN) <= skb->data) && 36 skb_mac_header(skb) + ETH_HLEN <= skb->data) &&
37 offset != 0) { 37 offset != 0) {
38 *hotdrop = 1; 38 *hotdrop = true;
39 return 0; 39 return false;
40 } 40 }
41 41
42 memset(eui64, 0, sizeof(eui64)); 42 memset(eui64, 0, sizeof(eui64));
@@ -50,19 +50,19 @@ match(const struct sk_buff *skb,
50 eui64[0] |= 0x02; 50 eui64[0] |= 0x02;
51 51
52 i = 0; 52 i = 0;
53 while ((ipv6_hdr(skb)->saddr.s6_addr[8 + i] == eui64[i]) 53 while (ipv6_hdr(skb)->saddr.s6_addr[8 + i] == eui64[i]
54 && (i < 8)) 54 && i < 8)
55 i++; 55 i++;
56 56
57 if (i == 8) 57 if (i == 8)
58 return 1; 58 return true;
59 } 59 }
60 } 60 }
61 61
62 return 0; 62 return false;
63} 63}
64 64
65static struct xt_match eui64_match = { 65static struct xt_match eui64_match __read_mostly = {
66 .name = "eui64", 66 .name = "eui64",
67 .family = AF_INET6, 67 .family = AF_INET6,
68 .match = match, 68 .match = match,
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index 5a5da71321b6..968aeba02073 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -22,25 +22,19 @@ MODULE_LICENSE("GPL");
22MODULE_DESCRIPTION("IPv6 FRAG match"); 22MODULE_DESCRIPTION("IPv6 FRAG match");
23MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); 23MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
24 24
25#if 0
26#define DEBUGP printk
27#else
28#define DEBUGP(format, args...)
29#endif
30
31/* Returns 1 if the id is matched by the range, 0 otherwise */ 25/* Returns 1 if the id is matched by the range, 0 otherwise */
32static inline int 26static inline bool
33id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) 27id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
34{ 28{
35 int r = 0; 29 bool r;
36 DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ', 30 pr_debug("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
37 min, id, max); 31 min, id, max);
38 r = (id >= min && id <= max) ^ invert; 32 r = (id >= min && id <= max) ^ invert;
39 DEBUGP(" result %s\n", r ? "PASS" : "FAILED"); 33 pr_debug(" result %s\n", r ? "PASS" : "FAILED");
40 return r; 34 return r;
41} 35}
42 36
43static int 37static bool
44match(const struct sk_buff *skb, 38match(const struct sk_buff *skb,
45 const struct net_device *in, 39 const struct net_device *in,
46 const struct net_device *out, 40 const struct net_device *out,
@@ -48,9 +42,10 @@ match(const struct sk_buff *skb,
48 const void *matchinfo, 42 const void *matchinfo,
49 int offset, 43 int offset,
50 unsigned int protoff, 44 unsigned int protoff,
51 int *hotdrop) 45 bool *hotdrop)
52{ 46{
53 struct frag_hdr _frag, *fh; 47 struct frag_hdr _frag;
48 const struct frag_hdr *fh;
54 const struct ip6t_frag *fraginfo = matchinfo; 49 const struct ip6t_frag *fraginfo = matchinfo;
55 unsigned int ptr; 50 unsigned int ptr;
56 int err; 51 int err;
@@ -58,53 +53,53 @@ match(const struct sk_buff *skb,
58 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL); 53 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL);
59 if (err < 0) { 54 if (err < 0) {
60 if (err != -ENOENT) 55 if (err != -ENOENT)
61 *hotdrop = 1; 56 *hotdrop = true;
62 return 0; 57 return false;
63 } 58 }
64 59
65 fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); 60 fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
66 if (fh == NULL) { 61 if (fh == NULL) {
67 *hotdrop = 1; 62 *hotdrop = true;
68 return 0; 63 return false;
69 } 64 }
70 65
71 DEBUGP("INFO %04X ", fh->frag_off); 66 pr_debug("INFO %04X ", fh->frag_off);
72 DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7); 67 pr_debug("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
73 DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6); 68 pr_debug("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
74 DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF)); 69 pr_debug("MF %04X ", fh->frag_off & htons(IP6_MF));
75 DEBUGP("ID %u %08X\n", ntohl(fh->identification), 70 pr_debug("ID %u %08X\n", ntohl(fh->identification),
76 ntohl(fh->identification)); 71 ntohl(fh->identification));
77 72
78 DEBUGP("IPv6 FRAG id %02X ", 73 pr_debug("IPv6 FRAG id %02X ",
79 (id_match(fraginfo->ids[0], fraginfo->ids[1], 74 id_match(fraginfo->ids[0], fraginfo->ids[1],
80 ntohl(fh->identification), 75 ntohl(fh->identification),
81 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))); 76 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)));
82 DEBUGP("res %02X %02X%04X %02X ", 77 pr_debug("res %02X %02X%04X %02X ",
83 (fraginfo->flags & IP6T_FRAG_RES), fh->reserved, 78 fraginfo->flags & IP6T_FRAG_RES, fh->reserved,
84 ntohs(fh->frag_off) & 0x6, 79 ntohs(fh->frag_off) & 0x6,
85 !((fraginfo->flags & IP6T_FRAG_RES) 80 !((fraginfo->flags & IP6T_FRAG_RES)
86 && (fh->reserved || (ntohs(fh->frag_off) & 0x06)))); 81 && (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
87 DEBUGP("first %02X %02X %02X ", 82 pr_debug("first %02X %02X %02X ",
88 (fraginfo->flags & IP6T_FRAG_FST), 83 fraginfo->flags & IP6T_FRAG_FST,
89 ntohs(fh->frag_off) & ~0x7, 84 ntohs(fh->frag_off) & ~0x7,
90 !((fraginfo->flags & IP6T_FRAG_FST) 85 !((fraginfo->flags & IP6T_FRAG_FST)
91 && (ntohs(fh->frag_off) & ~0x7))); 86 && (ntohs(fh->frag_off) & ~0x7)));
92 DEBUGP("mf %02X %02X %02X ", 87 pr_debug("mf %02X %02X %02X ",
93 (fraginfo->flags & IP6T_FRAG_MF), 88 fraginfo->flags & IP6T_FRAG_MF,
94 ntohs(fh->frag_off) & IP6_MF, 89 ntohs(fh->frag_off) & IP6_MF,
95 !((fraginfo->flags & IP6T_FRAG_MF) 90 !((fraginfo->flags & IP6T_FRAG_MF)
96 && !((ntohs(fh->frag_off) & IP6_MF)))); 91 && !((ntohs(fh->frag_off) & IP6_MF))));
97 DEBUGP("last %02X %02X %02X\n", 92 pr_debug("last %02X %02X %02X\n",
98 (fraginfo->flags & IP6T_FRAG_NMF), 93 fraginfo->flags & IP6T_FRAG_NMF,
99 ntohs(fh->frag_off) & IP6_MF, 94 ntohs(fh->frag_off) & IP6_MF,
100 !((fraginfo->flags & IP6T_FRAG_NMF) 95 !((fraginfo->flags & IP6T_FRAG_NMF)
101 && (ntohs(fh->frag_off) & IP6_MF))); 96 && (ntohs(fh->frag_off) & IP6_MF)));
102 97
103 return (fh != NULL) 98 return (fh != NULL)
104 && 99 &&
105 (id_match(fraginfo->ids[0], fraginfo->ids[1], 100 id_match(fraginfo->ids[0], fraginfo->ids[1],
106 ntohl(fh->identification), 101 ntohl(fh->identification),
107 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))) 102 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))
108 && 103 &&
109 !((fraginfo->flags & IP6T_FRAG_RES) 104 !((fraginfo->flags & IP6T_FRAG_RES)
110 && (fh->reserved || (ntohs(fh->frag_off) & 0x6))) 105 && (fh->reserved || (ntohs(fh->frag_off) & 0x6)))
@@ -120,7 +115,7 @@ match(const struct sk_buff *skb,
120} 115}
121 116
122/* Called when user tries to insert an entry of this type. */ 117/* Called when user tries to insert an entry of this type. */
123static int 118static bool
124checkentry(const char *tablename, 119checkentry(const char *tablename,
125 const void *ip, 120 const void *ip,
126 const struct xt_match *match, 121 const struct xt_match *match,
@@ -130,13 +125,13 @@ checkentry(const char *tablename,
130 const struct ip6t_frag *fraginfo = matchinfo; 125 const struct ip6t_frag *fraginfo = matchinfo;
131 126
132 if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { 127 if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
133 DEBUGP("ip6t_frag: unknown flags %X\n", fraginfo->invflags); 128 pr_debug("ip6t_frag: unknown flags %X\n", fraginfo->invflags);
134 return 0; 129 return false;
135 } 130 }
136 return 1; 131 return true;
137} 132}
138 133
139static struct xt_match frag_match = { 134static struct xt_match frag_match __read_mostly = {
140 .name = "frag", 135 .name = "frag",
141 .family = AF_INET6, 136 .family = AF_INET6,
142 .match = match, 137 .match = match,
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index d2373c7cd354..e6ca6018b1ea 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -25,12 +25,6 @@ MODULE_DESCRIPTION("IPv6 opts match");
25MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); 25MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
26MODULE_ALIAS("ip6t_dst"); 26MODULE_ALIAS("ip6t_dst");
27 27
28#if 0
29#define DEBUGP printk
30#else
31#define DEBUGP(format, args...)
32#endif
33
34/* 28/*
35 * (Type & 0xC0) >> 6 29 * (Type & 0xC0) >> 6
36 * 0 -> ignorable 30 * 0 -> ignorable
@@ -47,7 +41,7 @@ MODULE_ALIAS("ip6t_dst");
47 * 5 -> RTALERT 2 x x 41 * 5 -> RTALERT 2 x x
48 */ 42 */
49 43
50static int 44static bool
51match(const struct sk_buff *skb, 45match(const struct sk_buff *skb,
52 const struct net_device *in, 46 const struct net_device *in,
53 const struct net_device *out, 47 const struct net_device *out,
@@ -55,45 +49,48 @@ match(const struct sk_buff *skb,
55 const void *matchinfo, 49 const void *matchinfo,
56 int offset, 50 int offset,
57 unsigned int protoff, 51 unsigned int protoff,
58 int *hotdrop) 52 bool *hotdrop)
59{ 53{
60 struct ipv6_opt_hdr _optsh, *oh; 54 struct ipv6_opt_hdr _optsh;
55 const struct ipv6_opt_hdr *oh;
61 const struct ip6t_opts *optinfo = matchinfo; 56 const struct ip6t_opts *optinfo = matchinfo;
62 unsigned int temp; 57 unsigned int temp;
63 unsigned int ptr; 58 unsigned int ptr;
64 unsigned int hdrlen = 0; 59 unsigned int hdrlen = 0;
65 unsigned int ret = 0; 60 bool ret = false;
66 u8 _opttype, *tp = NULL; 61 u8 _opttype;
67 u8 _optlen, *lp = NULL; 62 u8 _optlen;
63 const u_int8_t *tp = NULL;
64 const u_int8_t *lp = NULL;
68 unsigned int optlen; 65 unsigned int optlen;
69 int err; 66 int err;
70 67
71 err = ipv6_find_hdr(skb, &ptr, match->data, NULL); 68 err = ipv6_find_hdr(skb, &ptr, match->data, NULL);
72 if (err < 0) { 69 if (err < 0) {
73 if (err != -ENOENT) 70 if (err != -ENOENT)
74 *hotdrop = 1; 71 *hotdrop = true;
75 return 0; 72 return false;
76 } 73 }
77 74
78 oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); 75 oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
79 if (oh == NULL) { 76 if (oh == NULL) {
80 *hotdrop = 1; 77 *hotdrop = true;
81 return 0; 78 return false;
82 } 79 }
83 80
84 hdrlen = ipv6_optlen(oh); 81 hdrlen = ipv6_optlen(oh);
85 if (skb->len - ptr < hdrlen) { 82 if (skb->len - ptr < hdrlen) {
86 /* Packet smaller than it's length field */ 83 /* Packet smaller than it's length field */
87 return 0; 84 return false;
88 } 85 }
89 86
90 DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); 87 pr_debug("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
91 88
92 DEBUGP("len %02X %04X %02X ", 89 pr_debug("len %02X %04X %02X ",
93 optinfo->hdrlen, hdrlen, 90 optinfo->hdrlen, hdrlen,
94 (!(optinfo->flags & IP6T_OPTS_LEN) || 91 (!(optinfo->flags & IP6T_OPTS_LEN) ||
95 ((optinfo->hdrlen == hdrlen) ^ 92 ((optinfo->hdrlen == hdrlen) ^
96 !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); 93 !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
97 94
98 ret = (oh != NULL) && 95 ret = (oh != NULL) &&
99 (!(optinfo->flags & IP6T_OPTS_LEN) || 96 (!(optinfo->flags & IP6T_OPTS_LEN) ||
@@ -105,10 +102,10 @@ match(const struct sk_buff *skb,
105 if (!(optinfo->flags & IP6T_OPTS_OPTS)) { 102 if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
106 return ret; 103 return ret;
107 } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { 104 } else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
108 DEBUGP("Not strict - not implemented"); 105 pr_debug("Not strict - not implemented");
109 } else { 106 } else {
110 DEBUGP("Strict "); 107 pr_debug("Strict ");
111 DEBUGP("#%d ", optinfo->optsnr); 108 pr_debug("#%d ", optinfo->optsnr);
112 for (temp = 0; temp < optinfo->optsnr; temp++) { 109 for (temp = 0; temp < optinfo->optsnr; temp++) {
113 /* type field exists ? */ 110 /* type field exists ? */
114 if (hdrlen < 1) 111 if (hdrlen < 1)
@@ -120,12 +117,11 @@ match(const struct sk_buff *skb,
120 117
121 /* Type check */ 118 /* Type check */
122 if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) { 119 if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
123 DEBUGP("Tbad %02X %02X\n", 120 pr_debug("Tbad %02X %02X\n", *tp,
124 *tp, 121 (optinfo->opts[temp] & 0xFF00) >> 8);
125 (optinfo->opts[temp] & 0xFF00) >> 8); 122 return false;
126 return 0;
127 } else { 123 } else {
128 DEBUGP("Tok "); 124 pr_debug("Tok ");
129 } 125 }
130 /* Length check */ 126 /* Length check */
131 if (*tp) { 127 if (*tp) {
@@ -142,23 +138,23 @@ match(const struct sk_buff *skb,
142 spec_len = optinfo->opts[temp] & 0x00FF; 138 spec_len = optinfo->opts[temp] & 0x00FF;
143 139
144 if (spec_len != 0x00FF && spec_len != *lp) { 140 if (spec_len != 0x00FF && spec_len != *lp) {
145 DEBUGP("Lbad %02X %04X\n", *lp, 141 pr_debug("Lbad %02X %04X\n", *lp,
146 spec_len); 142 spec_len);
147 return 0; 143 return false;
148 } 144 }
149 DEBUGP("Lok "); 145 pr_debug("Lok ");
150 optlen = *lp + 2; 146 optlen = *lp + 2;
151 } else { 147 } else {
152 DEBUGP("Pad1\n"); 148 pr_debug("Pad1\n");
153 optlen = 1; 149 optlen = 1;
154 } 150 }
155 151
156 /* Step to the next */ 152 /* Step to the next */
157 DEBUGP("len%04X \n", optlen); 153 pr_debug("len%04X \n", optlen);
158 154
159 if ((ptr > skb->len - optlen || hdrlen < optlen) && 155 if ((ptr > skb->len - optlen || hdrlen < optlen) &&
160 (temp < optinfo->optsnr - 1)) { 156 temp < optinfo->optsnr - 1) {
161 DEBUGP("new pointer is too large! \n"); 157 pr_debug("new pointer is too large! \n");
162 break; 158 break;
163 } 159 }
164 ptr += optlen; 160 ptr += optlen;
@@ -167,14 +163,14 @@ match(const struct sk_buff *skb,
167 if (temp == optinfo->optsnr) 163 if (temp == optinfo->optsnr)
168 return ret; 164 return ret;
169 else 165 else
170 return 0; 166 return false;
171 } 167 }
172 168
173 return 0; 169 return false;
174} 170}
175 171
176/* Called when user tries to insert an entry of this type. */ 172/* Called when user tries to insert an entry of this type. */
177static int 173static bool
178checkentry(const char *tablename, 174checkentry(const char *tablename,
179 const void *entry, 175 const void *entry,
180 const struct xt_match *match, 176 const struct xt_match *match,
@@ -184,13 +180,13 @@ checkentry(const char *tablename,
184 const struct ip6t_opts *optsinfo = matchinfo; 180 const struct ip6t_opts *optsinfo = matchinfo;
185 181
186 if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { 182 if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
187 DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags); 183 pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
188 return 0; 184 return false;
189 } 185 }
190 return 1; 186 return true;
191} 187}
192 188
193static struct xt_match opts_match[] = { 189static struct xt_match opts_match[] __read_mostly = {
194 { 190 {
195 .name = "hbh", 191 .name = "hbh",
196 .family = AF_INET6, 192 .family = AF_INET6,
diff --git a/net/ipv6/netfilter/ip6t_hl.c b/net/ipv6/netfilter/ip6t_hl.c
index d606c0e6d6fd..ca29ec00dc18 100644
--- a/net/ipv6/netfilter/ip6t_hl.c
+++ b/net/ipv6/netfilter/ip6t_hl.c
@@ -19,37 +19,37 @@ MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
19MODULE_DESCRIPTION("IP tables Hop Limit matching module"); 19MODULE_DESCRIPTION("IP tables Hop Limit matching module");
20MODULE_LICENSE("GPL"); 20MODULE_LICENSE("GPL");
21 21
22static int match(const struct sk_buff *skb, 22static bool match(const struct sk_buff *skb,
23 const struct net_device *in, const struct net_device *out, 23 const struct net_device *in, const struct net_device *out,
24 const struct xt_match *match, const void *matchinfo, 24 const struct xt_match *match, const void *matchinfo,
25 int offset, unsigned int protoff, int *hotdrop) 25 int offset, unsigned int protoff, bool *hotdrop)
26{ 26{
27 const struct ip6t_hl_info *info = matchinfo; 27 const struct ip6t_hl_info *info = matchinfo;
28 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 28 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
29 29
30 switch (info->mode) { 30 switch (info->mode) {
31 case IP6T_HL_EQ: 31 case IP6T_HL_EQ:
32 return (ip6h->hop_limit == info->hop_limit); 32 return ip6h->hop_limit == info->hop_limit;
33 break; 33 break;
34 case IP6T_HL_NE: 34 case IP6T_HL_NE:
35 return (!(ip6h->hop_limit == info->hop_limit)); 35 return ip6h->hop_limit != info->hop_limit;
36 break; 36 break;
37 case IP6T_HL_LT: 37 case IP6T_HL_LT:
38 return (ip6h->hop_limit < info->hop_limit); 38 return ip6h->hop_limit < info->hop_limit;
39 break; 39 break;
40 case IP6T_HL_GT: 40 case IP6T_HL_GT:
41 return (ip6h->hop_limit > info->hop_limit); 41 return ip6h->hop_limit > info->hop_limit;
42 break; 42 break;
43 default: 43 default:
44 printk(KERN_WARNING "ip6t_hl: unknown mode %d\n", 44 printk(KERN_WARNING "ip6t_hl: unknown mode %d\n",
45 info->mode); 45 info->mode);
46 return 0; 46 return false;
47 } 47 }
48 48
49 return 0; 49 return false;
50} 50}
51 51
52static struct xt_match hl_match = { 52static struct xt_match hl_match __read_mostly = {
53 .name = "hl", 53 .name = "hl",
54 .family = AF_INET6, 54 .family = AF_INET6,
55 .match = match, 55 .match = match,
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index fd6a0869099b..2c65c2f9a4ab 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -26,7 +26,7 @@ MODULE_LICENSE("GPL");
26MODULE_DESCRIPTION("IPv6 headers match"); 26MODULE_DESCRIPTION("IPv6 headers match");
27MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); 27MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
28 28
29static int 29static bool
30ipv6header_match(const struct sk_buff *skb, 30ipv6header_match(const struct sk_buff *skb,
31 const struct net_device *in, 31 const struct net_device *in,
32 const struct net_device *out, 32 const struct net_device *out,
@@ -34,7 +34,7 @@ ipv6header_match(const struct sk_buff *skb,
34 const void *matchinfo, 34 const void *matchinfo,
35 int offset, 35 int offset,
36 unsigned int protoff, 36 unsigned int protoff,
37 int *hotdrop) 37 bool *hotdrop)
38{ 38{
39 const struct ip6t_ipv6header_info *info = matchinfo; 39 const struct ip6t_ipv6header_info *info = matchinfo;
40 unsigned int temp; 40 unsigned int temp;
@@ -58,7 +58,7 @@ ipv6header_match(const struct sk_buff *skb,
58 58
59 /* Is there enough space for the next ext header? */ 59 /* Is there enough space for the next ext header? */
60 if (len < (int)sizeof(struct ipv6_opt_hdr)) 60 if (len < (int)sizeof(struct ipv6_opt_hdr))
61 return 0; 61 return false;
62 /* No more exthdr -> evaluate */ 62 /* No more exthdr -> evaluate */
63 if (nexthdr == NEXTHDR_NONE) { 63 if (nexthdr == NEXTHDR_NONE) {
64 temp |= MASK_NONE; 64 temp |= MASK_NONE;
@@ -74,9 +74,9 @@ ipv6header_match(const struct sk_buff *skb,
74 BUG_ON(hp == NULL); 74 BUG_ON(hp == NULL);
75 75
76 /* Calculate the header length */ 76 /* Calculate the header length */
77 if (nexthdr == NEXTHDR_FRAGMENT) { 77 if (nexthdr == NEXTHDR_FRAGMENT)
78 hdrlen = 8; 78 hdrlen = 8;
79 } else if (nexthdr == NEXTHDR_AUTH) 79 else if (nexthdr == NEXTHDR_AUTH)
80 hdrlen = (hp->hdrlen + 2) << 2; 80 hdrlen = (hp->hdrlen + 2) << 2;
81 else 81 else
82 hdrlen = ipv6_optlen(hp); 82 hdrlen = ipv6_optlen(hp);
@@ -99,7 +99,7 @@ ipv6header_match(const struct sk_buff *skb,
99 temp |= MASK_DSTOPTS; 99 temp |= MASK_DSTOPTS;
100 break; 100 break;
101 default: 101 default:
102 return 0; 102 return false;
103 break; 103 break;
104 } 104 }
105 105
@@ -110,7 +110,7 @@ ipv6header_match(const struct sk_buff *skb,
110 break; 110 break;
111 } 111 }
112 112
113 if ((nexthdr != NEXTHDR_NONE) && (nexthdr != NEXTHDR_ESP)) 113 if (nexthdr != NEXTHDR_NONE && nexthdr != NEXTHDR_ESP)
114 temp |= MASK_PROTO; 114 temp |= MASK_PROTO;
115 115
116 if (info->modeflag) 116 if (info->modeflag)
@@ -124,7 +124,7 @@ ipv6header_match(const struct sk_buff *skb,
124 } 124 }
125} 125}
126 126
127static int 127static bool
128ipv6header_checkentry(const char *tablename, 128ipv6header_checkentry(const char *tablename,
129 const void *ip, 129 const void *ip,
130 const struct xt_match *match, 130 const struct xt_match *match,
@@ -136,12 +136,12 @@ ipv6header_checkentry(const char *tablename,
136 /* invflags is 0 or 0xff in hard mode */ 136 /* invflags is 0 or 0xff in hard mode */
137 if ((!info->modeflag) && info->invflags != 0x00 && 137 if ((!info->modeflag) && info->invflags != 0x00 &&
138 info->invflags != 0xFF) 138 info->invflags != 0xFF)
139 return 0; 139 return false;
140 140
141 return 1; 141 return true;
142} 142}
143 143
144static struct xt_match ip6t_ipv6header_match = { 144static struct xt_match ip6t_ipv6header_match __read_mostly = {
145 .name = "ipv6header", 145 .name = "ipv6header",
146 .family = AF_INET6, 146 .family = AF_INET6,
147 .match = &ipv6header_match, 147 .match = &ipv6header_match,
diff --git a/net/ipv6/netfilter/ip6t_mh.c b/net/ipv6/netfilter/ip6t_mh.c
index c2a909893a64..0fa714092dc9 100644
--- a/net/ipv6/netfilter/ip6t_mh.c
+++ b/net/ipv6/netfilter/ip6t_mh.c
@@ -31,16 +31,13 @@ MODULE_LICENSE("GPL");
31#endif 31#endif
32 32
33/* Returns 1 if the type is matched by the range, 0 otherwise */ 33/* Returns 1 if the type is matched by the range, 0 otherwise */
34static inline int 34static inline bool
35type_match(u_int8_t min, u_int8_t max, u_int8_t type, int invert) 35type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert)
36{ 36{
37 int ret; 37 return (type >= min && type <= max) ^ invert;
38
39 ret = (type >= min && type <= max) ^ invert;
40 return ret;
41} 38}
42 39
43static int 40static bool
44match(const struct sk_buff *skb, 41match(const struct sk_buff *skb,
45 const struct net_device *in, 42 const struct net_device *in,
46 const struct net_device *out, 43 const struct net_device *out,
@@ -48,29 +45,30 @@ match(const struct sk_buff *skb,
48 const void *matchinfo, 45 const void *matchinfo,
49 int offset, 46 int offset,
50 unsigned int protoff, 47 unsigned int protoff,
51 int *hotdrop) 48 bool *hotdrop)
52{ 49{
53 struct ip6_mh _mh, *mh; 50 struct ip6_mh _mh;
51 const struct ip6_mh *mh;
54 const struct ip6t_mh *mhinfo = matchinfo; 52 const struct ip6t_mh *mhinfo = matchinfo;
55 53
56 /* Must not be a fragment. */ 54 /* Must not be a fragment. */
57 if (offset) 55 if (offset)
58 return 0; 56 return false;
59 57
60 mh = skb_header_pointer(skb, protoff, sizeof(_mh), &_mh); 58 mh = skb_header_pointer(skb, protoff, sizeof(_mh), &_mh);
61 if (mh == NULL) { 59 if (mh == NULL) {
62 /* We've been asked to examine this packet, and we 60 /* We've been asked to examine this packet, and we
63 can't. Hence, no choice but to drop. */ 61 can't. Hence, no choice but to drop. */
64 duprintf("Dropping evil MH tinygram.\n"); 62 duprintf("Dropping evil MH tinygram.\n");
65 *hotdrop = 1; 63 *hotdrop = true;
66 return 0; 64 return false;
67 } 65 }
68 66
69 if (mh->ip6mh_proto != IPPROTO_NONE) { 67 if (mh->ip6mh_proto != IPPROTO_NONE) {
70 duprintf("Dropping invalid MH Payload Proto: %u\n", 68 duprintf("Dropping invalid MH Payload Proto: %u\n",
71 mh->ip6mh_proto); 69 mh->ip6mh_proto);
72 *hotdrop = 1; 70 *hotdrop = true;
73 return 0; 71 return false;
74 } 72 }
75 73
76 return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type, 74 return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type,
@@ -78,7 +76,7 @@ match(const struct sk_buff *skb,
78} 76}
79 77
80/* Called when user tries to insert an entry of this type. */ 78/* Called when user tries to insert an entry of this type. */
81static int 79static bool
82mh_checkentry(const char *tablename, 80mh_checkentry(const char *tablename,
83 const void *entry, 81 const void *entry,
84 const struct xt_match *match, 82 const struct xt_match *match,
@@ -91,7 +89,7 @@ mh_checkentry(const char *tablename,
91 return !(mhinfo->invflags & ~IP6T_MH_INV_MASK); 89 return !(mhinfo->invflags & ~IP6T_MH_INV_MASK);
92} 90}
93 91
94static struct xt_match mh_match = { 92static struct xt_match mh_match __read_mostly = {
95 .name = "mh", 93 .name = "mh",
96 .family = AF_INET6, 94 .family = AF_INET6,
97 .checkentry = mh_checkentry, 95 .checkentry = mh_checkentry,
diff --git a/net/ipv6/netfilter/ip6t_owner.c b/net/ipv6/netfilter/ip6t_owner.c
index 43738bba00b5..6036613aef36 100644
--- a/net/ipv6/netfilter/ip6t_owner.c
+++ b/net/ipv6/netfilter/ip6t_owner.c
@@ -23,7 +23,7 @@ MODULE_DESCRIPTION("IP6 tables owner matching module");
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
25 25
26static int 26static bool
27match(const struct sk_buff *skb, 27match(const struct sk_buff *skb,
28 const struct net_device *in, 28 const struct net_device *in,
29 const struct net_device *out, 29 const struct net_device *out,
@@ -31,29 +31,27 @@ match(const struct sk_buff *skb,
31 const void *matchinfo, 31 const void *matchinfo,
32 int offset, 32 int offset,
33 unsigned int protoff, 33 unsigned int protoff,
34 int *hotdrop) 34 bool *hotdrop)
35{ 35{
36 const struct ip6t_owner_info *info = matchinfo; 36 const struct ip6t_owner_info *info = matchinfo;
37 37
38 if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file) 38 if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
39 return 0; 39 return false;
40 40
41 if (info->match & IP6T_OWNER_UID) { 41 if (info->match & IP6T_OWNER_UID)
42 if ((skb->sk->sk_socket->file->f_uid != info->uid) ^ 42 if ((skb->sk->sk_socket->file->f_uid != info->uid) ^
43 !!(info->invert & IP6T_OWNER_UID)) 43 !!(info->invert & IP6T_OWNER_UID))
44 return 0; 44 return false;
45 }
46 45
47 if (info->match & IP6T_OWNER_GID) { 46 if (info->match & IP6T_OWNER_GID)
48 if ((skb->sk->sk_socket->file->f_gid != info->gid) ^ 47 if ((skb->sk->sk_socket->file->f_gid != info->gid) ^
49 !!(info->invert & IP6T_OWNER_GID)) 48 !!(info->invert & IP6T_OWNER_GID))
50 return 0; 49 return false;
51 }
52 50
53 return 1; 51 return true;
54} 52}
55 53
56static int 54static bool
57checkentry(const char *tablename, 55checkentry(const char *tablename,
58 const void *ip, 56 const void *ip,
59 const struct xt_match *match, 57 const struct xt_match *match,
@@ -65,12 +63,12 @@ checkentry(const char *tablename,
65 if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) { 63 if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) {
66 printk("ipt_owner: pid and sid matching " 64 printk("ipt_owner: pid and sid matching "
67 "not supported anymore\n"); 65 "not supported anymore\n");
68 return 0; 66 return false;
69 } 67 }
70 return 1; 68 return true;
71} 69}
72 70
73static struct xt_match owner_match = { 71static struct xt_match owner_match __read_mostly = {
74 .name = "owner", 72 .name = "owner",
75 .family = AF_INET6, 73 .family = AF_INET6,
76 .match = match, 74 .match = match,
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 81ab00d8c182..357cea703bd9 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -24,25 +24,19 @@ MODULE_LICENSE("GPL");
24MODULE_DESCRIPTION("IPv6 RT match"); 24MODULE_DESCRIPTION("IPv6 RT match");
25MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); 25MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
26 26
27#if 0
28#define DEBUGP printk
29#else
30#define DEBUGP(format, args...)
31#endif
32
33/* Returns 1 if the id is matched by the range, 0 otherwise */ 27/* Returns 1 if the id is matched by the range, 0 otherwise */
34static inline int 28static inline bool
35segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) 29segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
36{ 30{
37 int r = 0; 31 bool r;
38 DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x", 32 pr_debug("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",
39 invert ? '!' : ' ', min, id, max); 33 invert ? '!' : ' ', min, id, max);
40 r = (id >= min && id <= max) ^ invert; 34 r = (id >= min && id <= max) ^ invert;
41 DEBUGP(" result %s\n", r ? "PASS" : "FAILED"); 35 pr_debug(" result %s\n", r ? "PASS" : "FAILED");
42 return r; 36 return r;
43} 37}
44 38
45static int 39static bool
46match(const struct sk_buff *skb, 40match(const struct sk_buff *skb,
47 const struct net_device *in, 41 const struct net_device *in,
48 const struct net_device *out, 42 const struct net_device *out,
@@ -50,59 +44,61 @@ match(const struct sk_buff *skb,
50 const void *matchinfo, 44 const void *matchinfo,
51 int offset, 45 int offset,
52 unsigned int protoff, 46 unsigned int protoff,
53 int *hotdrop) 47 bool *hotdrop)
54{ 48{
55 struct ipv6_rt_hdr _route, *rh; 49 struct ipv6_rt_hdr _route;
50 const struct ipv6_rt_hdr *rh;
56 const struct ip6t_rt *rtinfo = matchinfo; 51 const struct ip6t_rt *rtinfo = matchinfo;
57 unsigned int temp; 52 unsigned int temp;
58 unsigned int ptr; 53 unsigned int ptr;
59 unsigned int hdrlen = 0; 54 unsigned int hdrlen = 0;
60 unsigned int ret = 0; 55 bool ret = false;
61 struct in6_addr *ap, _addr; 56 struct in6_addr _addr;
57 const struct in6_addr *ap;
62 int err; 58 int err;
63 59
64 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL); 60 err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL);
65 if (err < 0) { 61 if (err < 0) {
66 if (err != -ENOENT) 62 if (err != -ENOENT)
67 *hotdrop = 1; 63 *hotdrop = true;
68 return 0; 64 return false;
69 } 65 }
70 66
71 rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); 67 rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
72 if (rh == NULL) { 68 if (rh == NULL) {
73 *hotdrop = 1; 69 *hotdrop = true;
74 return 0; 70 return false;
75 } 71 }
76 72
77 hdrlen = ipv6_optlen(rh); 73 hdrlen = ipv6_optlen(rh);
78 if (skb->len - ptr < hdrlen) { 74 if (skb->len - ptr < hdrlen) {
79 /* Pcket smaller than its length field */ 75 /* Pcket smaller than its length field */
80 return 0; 76 return false;
81 } 77 }
82 78
83 DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen); 79 pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
84 DEBUGP("TYPE %04X ", rh->type); 80 pr_debug("TYPE %04X ", rh->type);
85 DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left); 81 pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
86 82
87 DEBUGP("IPv6 RT segsleft %02X ", 83 pr_debug("IPv6 RT segsleft %02X ",
88 (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], 84 segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
89 rh->segments_left, 85 rh->segments_left,
90 !!(rtinfo->invflags & IP6T_RT_INV_SGS)))); 86 !!(rtinfo->invflags & IP6T_RT_INV_SGS)));
91 DEBUGP("type %02X %02X %02X ", 87 pr_debug("type %02X %02X %02X ",
92 rtinfo->rt_type, rh->type, 88 rtinfo->rt_type, rh->type,
93 (!(rtinfo->flags & IP6T_RT_TYP) || 89 (!(rtinfo->flags & IP6T_RT_TYP) ||
94 ((rtinfo->rt_type == rh->type) ^ 90 ((rtinfo->rt_type == rh->type) ^
95 !!(rtinfo->invflags & IP6T_RT_INV_TYP)))); 91 !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
96 DEBUGP("len %02X %04X %02X ", 92 pr_debug("len %02X %04X %02X ",
97 rtinfo->hdrlen, hdrlen, 93 rtinfo->hdrlen, hdrlen,
98 (!(rtinfo->flags & IP6T_RT_LEN) || 94 !(rtinfo->flags & IP6T_RT_LEN) ||
99 ((rtinfo->hdrlen == hdrlen) ^ 95 ((rtinfo->hdrlen == hdrlen) ^
100 !!(rtinfo->invflags & IP6T_RT_INV_LEN)))); 96 !!(rtinfo->invflags & IP6T_RT_INV_LEN)));
101 DEBUGP("res %02X %02X %02X ", 97 pr_debug("res %02X %02X %02X ",
102 (rtinfo->flags & IP6T_RT_RES), 98 rtinfo->flags & IP6T_RT_RES,
103 ((struct rt0_hdr *)rh)->reserved, 99 ((const struct rt0_hdr *)rh)->reserved,
104 !((rtinfo->flags & IP6T_RT_RES) && 100 !((rtinfo->flags & IP6T_RT_RES) &&
105 (((struct rt0_hdr *)rh)->reserved))); 101 (((const struct rt0_hdr *)rh)->reserved)));
106 102
107 ret = (rh != NULL) 103 ret = (rh != NULL)
108 && 104 &&
@@ -129,18 +125,18 @@ match(const struct sk_buff *skb,
129 ret = (*rp == 0); 125 ret = (*rp == 0);
130 } 126 }
131 127
132 DEBUGP("#%d ", rtinfo->addrnr); 128 pr_debug("#%d ", rtinfo->addrnr);
133 if (!(rtinfo->flags & IP6T_RT_FST)) { 129 if (!(rtinfo->flags & IP6T_RT_FST)) {
134 return ret; 130 return ret;
135 } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) { 131 } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
136 DEBUGP("Not strict "); 132 pr_debug("Not strict ");
137 if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { 133 if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
138 DEBUGP("There isn't enough space\n"); 134 pr_debug("There isn't enough space\n");
139 return 0; 135 return false;
140 } else { 136 } else {
141 unsigned int i = 0; 137 unsigned int i = 0;
142 138
143 DEBUGP("#%d ", rtinfo->addrnr); 139 pr_debug("#%d ", rtinfo->addrnr);
144 for (temp = 0; 140 for (temp = 0;
145 temp < (unsigned int)((hdrlen - 8) / 16); 141 temp < (unsigned int)((hdrlen - 8) / 16);
146 temp++) { 142 temp++) {
@@ -154,25 +150,25 @@ match(const struct sk_buff *skb,
154 BUG_ON(ap == NULL); 150 BUG_ON(ap == NULL);
155 151
156 if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { 152 if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
157 DEBUGP("i=%d temp=%d;\n", i, temp); 153 pr_debug("i=%d temp=%d;\n", i, temp);
158 i++; 154 i++;
159 } 155 }
160 if (i == rtinfo->addrnr) 156 if (i == rtinfo->addrnr)
161 break; 157 break;
162 } 158 }
163 DEBUGP("i=%d #%d\n", i, rtinfo->addrnr); 159 pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
164 if (i == rtinfo->addrnr) 160 if (i == rtinfo->addrnr)
165 return ret; 161 return ret;
166 else 162 else
167 return 0; 163 return false;
168 } 164 }
169 } else { 165 } else {
170 DEBUGP("Strict "); 166 pr_debug("Strict ");
171 if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { 167 if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
172 DEBUGP("There isn't enough space\n"); 168 pr_debug("There isn't enough space\n");
173 return 0; 169 return false;
174 } else { 170 } else {
175 DEBUGP("#%d ", rtinfo->addrnr); 171 pr_debug("#%d ", rtinfo->addrnr);
176 for (temp = 0; temp < rtinfo->addrnr; temp++) { 172 for (temp = 0; temp < rtinfo->addrnr; temp++) {
177 ap = skb_header_pointer(skb, 173 ap = skb_header_pointer(skb,
178 ptr 174 ptr
@@ -185,20 +181,20 @@ match(const struct sk_buff *skb,
185 if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp])) 181 if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
186 break; 182 break;
187 } 183 }
188 DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr); 184 pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
189 if ((temp == rtinfo->addrnr) && 185 if (temp == rtinfo->addrnr &&
190 (temp == (unsigned int)((hdrlen - 8) / 16))) 186 temp == (unsigned int)((hdrlen - 8) / 16))
191 return ret; 187 return ret;
192 else 188 else
193 return 0; 189 return false;
194 } 190 }
195 } 191 }
196 192
197 return 0; 193 return false;
198} 194}
199 195
200/* Called when user tries to insert an entry of this type. */ 196/* Called when user tries to insert an entry of this type. */
201static int 197static bool
202checkentry(const char *tablename, 198checkentry(const char *tablename,
203 const void *entry, 199 const void *entry,
204 const struct xt_match *match, 200 const struct xt_match *match,
@@ -208,21 +204,21 @@ checkentry(const char *tablename,
208 const struct ip6t_rt *rtinfo = matchinfo; 204 const struct ip6t_rt *rtinfo = matchinfo;
209 205
210 if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { 206 if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
211 DEBUGP("ip6t_rt: unknown flags %X\n", rtinfo->invflags); 207 pr_debug("ip6t_rt: unknown flags %X\n", rtinfo->invflags);
212 return 0; 208 return false;
213 } 209 }
214 if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) && 210 if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) &&
215 (!(rtinfo->flags & IP6T_RT_TYP) || 211 (!(rtinfo->flags & IP6T_RT_TYP) ||
216 (rtinfo->rt_type != 0) || 212 (rtinfo->rt_type != 0) ||
217 (rtinfo->invflags & IP6T_RT_INV_TYP))) { 213 (rtinfo->invflags & IP6T_RT_INV_TYP))) {
218 DEBUGP("`--rt-type 0' required before `--rt-0-*'"); 214 pr_debug("`--rt-type 0' required before `--rt-0-*'");
219 return 0; 215 return false;
220 } 216 }
221 217
222 return 1; 218 return true;
223} 219}
224 220
225static struct xt_match rt_match = { 221static struct xt_match rt_match __read_mostly = {
226 .name = "rt", 222 .name = "rt",
227 .family = AF_INET6, 223 .family = AF_INET6,
228 .match = match, 224 .match = match,
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index f2d26495f413..f0a9efa67fb5 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -21,12 +21,6 @@ MODULE_DESCRIPTION("ip6tables mangle table");
21 (1 << NF_IP6_LOCAL_OUT) | \ 21 (1 << NF_IP6_LOCAL_OUT) | \
22 (1 << NF_IP6_POST_ROUTING)) 22 (1 << NF_IP6_POST_ROUTING))
23 23
24#if 0
25#define DEBUGP(x, args...) printk(KERN_DEBUG x, ## args)
26#else
27#define DEBUGP(x, args...)
28#endif
29
30static struct 24static struct
31{ 25{
32 struct ip6t_replace repl; 26 struct ip6t_replace repl;
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 0acda45d455d..ec290e4ebdd8 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -8,12 +8,6 @@
8 8
9#define RAW_VALID_HOOKS ((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_OUT)) 9#define RAW_VALID_HOOKS ((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_OUT))
10 10
11#if 0
12#define DEBUGP(x, args...) printk(KERN_DEBUG x, ## args)
13#else
14#define DEBUGP(x, args...)
15#endif
16
17static struct 11static struct
18{ 12{
19 struct ip6t_replace repl; 13 struct ip6t_replace repl;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 1b1797f1f33d..89e20ab494b8 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -26,12 +26,6 @@
26#include <net/netfilter/nf_conntrack_l3proto.h> 26#include <net/netfilter/nf_conntrack_l3proto.h>
27#include <net/netfilter/nf_conntrack_core.h> 27#include <net/netfilter/nf_conntrack_core.h>
28 28
29#if 0
30#define DEBUGP printk
31#else
32#define DEBUGP(format, args...)
33#endif
34
35static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 29static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
36 struct nf_conntrack_tuple *tuple) 30 struct nf_conntrack_tuple *tuple)
37{ 31{
@@ -136,7 +130,7 @@ ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
136 * except of IPv6 & ext headers. but it's tracked anyway. - YK 130 * except of IPv6 & ext headers. but it's tracked anyway. - YK
137 */ 131 */
138 if ((protoff < 0) || (protoff > (*pskb)->len)) { 132 if ((protoff < 0) || (protoff > (*pskb)->len)) {
139 DEBUGP("ip6_conntrack_core: can't find proto in pkt\n"); 133 pr_debug("ip6_conntrack_core: can't find proto in pkt\n");
140 NF_CT_STAT_INC_ATOMIC(error); 134 NF_CT_STAT_INC_ATOMIC(error);
141 NF_CT_STAT_INC_ATOMIC(invalid); 135 NF_CT_STAT_INC_ATOMIC(invalid);
142 return -NF_ACCEPT; 136 return -NF_ACCEPT;
@@ -147,11 +141,6 @@ ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
147 return NF_ACCEPT; 141 return NF_ACCEPT;
148} 142}
149 143
150static u_int32_t ipv6_get_features(const struct nf_conntrack_tuple *tuple)
151{
152 return NF_CT_F_BASIC;
153}
154
155static unsigned int ipv6_confirm(unsigned int hooknum, 144static unsigned int ipv6_confirm(unsigned int hooknum,
156 struct sk_buff **pskb, 145 struct sk_buff **pskb,
157 const struct net_device *in, 146 const struct net_device *in,
@@ -183,7 +172,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
183 protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum, 172 protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
184 (*pskb)->len - extoff); 173 (*pskb)->len - extoff);
185 if (protoff > (*pskb)->len || pnum == NEXTHDR_FRAGMENT) { 174 if (protoff > (*pskb)->len || pnum == NEXTHDR_FRAGMENT) {
186 DEBUGP("proto header not found\n"); 175 pr_debug("proto header not found\n");
187 return NF_ACCEPT; 176 return NF_ACCEPT;
188 } 177 }
189 178
@@ -397,7 +386,6 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = {
397 .ctl_table_path = nf_net_netfilter_sysctl_path, 386 .ctl_table_path = nf_net_netfilter_sysctl_path,
398 .ctl_table = nf_ct_ipv6_sysctl_table, 387 .ctl_table = nf_ct_ipv6_sysctl_table,
399#endif 388#endif
400 .get_features = ipv6_get_features,
401 .me = THIS_MODULE, 389 .me = THIS_MODULE,
402}; 390};
403 391
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 8814b95b2326..9defc7e14554 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -27,12 +27,6 @@
27 27
28static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ; 28static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
29 29
30#if 0
31#define DEBUGP printk
32#else
33#define DEBUGP(format, args...)
34#endif
35
36static int icmpv6_pkt_to_tuple(const struct sk_buff *skb, 30static int icmpv6_pkt_to_tuple(const struct sk_buff *skb,
37 unsigned int dataoff, 31 unsigned int dataoff,
38 struct nf_conntrack_tuple *tuple) 32 struct nf_conntrack_tuple *tuple)
@@ -125,8 +119,8 @@ static int icmpv6_new(struct nf_conn *conntrack,
125 119
126 if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) { 120 if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
127 /* Can't create a new ICMPv6 `conn' with this. */ 121 /* Can't create a new ICMPv6 `conn' with this. */
128 DEBUGP("icmpv6: can't create new conn with type %u\n", 122 pr_debug("icmpv6: can't create new conn with type %u\n",
129 type + 128); 123 type + 128);
130 NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple); 124 NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple);
131 return 0; 125 return 0;
132 } 126 }
@@ -152,14 +146,15 @@ icmpv6_error_message(struct sk_buff *skb,
152 146
153 hp = skb_header_pointer(skb, icmp6off, sizeof(_hdr), &_hdr); 147 hp = skb_header_pointer(skb, icmp6off, sizeof(_hdr), &_hdr);
154 if (hp == NULL) { 148 if (hp == NULL) {
155 DEBUGP("icmpv6_error: Can't get ICMPv6 hdr.\n"); 149 pr_debug("icmpv6_error: Can't get ICMPv6 hdr.\n");
156 return -NF_ACCEPT; 150 return -NF_ACCEPT;
157 } 151 }
158 152
159 inip6off = icmp6off + sizeof(_hdr); 153 inip6off = icmp6off + sizeof(_hdr);
160 if (skb_copy_bits(skb, inip6off+offsetof(struct ipv6hdr, nexthdr), 154 if (skb_copy_bits(skb, inip6off+offsetof(struct ipv6hdr, nexthdr),
161 &inprotonum, sizeof(inprotonum)) != 0) { 155 &inprotonum, sizeof(inprotonum)) != 0) {
162 DEBUGP("icmpv6_error: Can't get nexthdr in inner IPv6 header.\n"); 156 pr_debug("icmpv6_error: Can't get nexthdr in inner IPv6 "
157 "header.\n");
163 return -NF_ACCEPT; 158 return -NF_ACCEPT;
164 } 159 }
165 inprotoff = nf_ct_ipv6_skip_exthdr(skb, 160 inprotoff = nf_ct_ipv6_skip_exthdr(skb,
@@ -169,7 +164,8 @@ icmpv6_error_message(struct sk_buff *skb,
169 - sizeof(struct ipv6hdr)); 164 - sizeof(struct ipv6hdr));
170 165
171 if ((inprotoff > skb->len) || (inprotonum == NEXTHDR_FRAGMENT)) { 166 if ((inprotoff > skb->len) || (inprotonum == NEXTHDR_FRAGMENT)) {
172 DEBUGP("icmpv6_error: Can't get protocol header in ICMPv6 payload.\n"); 167 pr_debug("icmpv6_error: Can't get protocol header in ICMPv6 "
168 "payload.\n");
173 return -NF_ACCEPT; 169 return -NF_ACCEPT;
174 } 170 }
175 171
@@ -179,7 +175,7 @@ icmpv6_error_message(struct sk_buff *skb,
179 /* Are they talking about one of our connections? */ 175 /* Are they talking about one of our connections? */
180 if (!nf_ct_get_tuple(skb, inip6off, inprotoff, PF_INET6, inprotonum, 176 if (!nf_ct_get_tuple(skb, inip6off, inprotoff, PF_INET6, inprotonum,
181 &origtuple, &nf_conntrack_l3proto_ipv6, inproto)) { 177 &origtuple, &nf_conntrack_l3proto_ipv6, inproto)) {
182 DEBUGP("icmpv6_error: Can't get tuple\n"); 178 pr_debug("icmpv6_error: Can't get tuple\n");
183 return -NF_ACCEPT; 179 return -NF_ACCEPT;
184 } 180 }
185 181
@@ -187,15 +183,15 @@ icmpv6_error_message(struct sk_buff *skb,
187 been preserved inside the ICMP. */ 183 been preserved inside the ICMP. */
188 if (!nf_ct_invert_tuple(&intuple, &origtuple, 184 if (!nf_ct_invert_tuple(&intuple, &origtuple,
189 &nf_conntrack_l3proto_ipv6, inproto)) { 185 &nf_conntrack_l3proto_ipv6, inproto)) {
190 DEBUGP("icmpv6_error: Can't invert tuple\n"); 186 pr_debug("icmpv6_error: Can't invert tuple\n");
191 return -NF_ACCEPT; 187 return -NF_ACCEPT;
192 } 188 }
193 189
194 *ctinfo = IP_CT_RELATED; 190 *ctinfo = IP_CT_RELATED;
195 191
196 h = nf_conntrack_find_get(&intuple, NULL); 192 h = nf_conntrack_find_get(&intuple);
197 if (!h) { 193 if (!h) {
198 DEBUGP("icmpv6_error: no match\n"); 194 pr_debug("icmpv6_error: no match\n");
199 return -NF_ACCEPT; 195 return -NF_ACCEPT;
200 } else { 196 } else {
201 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) 197 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 347ab7608231..25442a8c1ba8 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -44,12 +44,6 @@
44#include <linux/kernel.h> 44#include <linux/kernel.h>
45#include <linux/module.h> 45#include <linux/module.h>
46 46
47#if 0
48#define DEBUGP printk
49#else
50#define DEBUGP(format, args...)
51#endif
52
53#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */ 47#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
54#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */ 48#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
55#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT 49#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
@@ -343,7 +337,7 @@ nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src, str
343 struct nf_ct_frag6_queue *fq; 337 struct nf_ct_frag6_queue *fq;
344 338
345 if ((fq = frag_alloc_queue()) == NULL) { 339 if ((fq = frag_alloc_queue()) == NULL) {
346 DEBUGP("Can't alloc new queue\n"); 340 pr_debug("Can't alloc new queue\n");
347 goto oom; 341 goto oom;
348 } 342 }
349 343
@@ -393,7 +387,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
393 int offset, end; 387 int offset, end;
394 388
395 if (fq->last_in & COMPLETE) { 389 if (fq->last_in & COMPLETE) {
396 DEBUGP("Allready completed\n"); 390 pr_debug("Allready completed\n");
397 goto err; 391 goto err;
398 } 392 }
399 393
@@ -402,7 +396,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
402 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 396 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
403 397
404 if ((unsigned int)end > IPV6_MAXPLEN) { 398 if ((unsigned int)end > IPV6_MAXPLEN) {
405 DEBUGP("offset is too large.\n"); 399 pr_debug("offset is too large.\n");
406 return -1; 400 return -1;
407 } 401 }
408 402
@@ -420,7 +414,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
420 */ 414 */
421 if (end < fq->len || 415 if (end < fq->len ||
422 ((fq->last_in & LAST_IN) && end != fq->len)) { 416 ((fq->last_in & LAST_IN) && end != fq->len)) {
423 DEBUGP("already received last fragment\n"); 417 pr_debug("already received last fragment\n");
424 goto err; 418 goto err;
425 } 419 }
426 fq->last_in |= LAST_IN; 420 fq->last_in |= LAST_IN;
@@ -433,13 +427,13 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
433 /* RFC2460 says always send parameter problem in 427 /* RFC2460 says always send parameter problem in
434 * this case. -DaveM 428 * this case. -DaveM
435 */ 429 */
436 DEBUGP("the end of this fragment is not rounded to 8 bytes.\n"); 430 pr_debug("end of fragment not rounded to 8 bytes.\n");
437 return -1; 431 return -1;
438 } 432 }
439 if (end > fq->len) { 433 if (end > fq->len) {
440 /* Some bits beyond end -> corruption. */ 434 /* Some bits beyond end -> corruption. */
441 if (fq->last_in & LAST_IN) { 435 if (fq->last_in & LAST_IN) {
442 DEBUGP("last packet already reached.\n"); 436 pr_debug("last packet already reached.\n");
443 goto err; 437 goto err;
444 } 438 }
445 fq->len = end; 439 fq->len = end;
@@ -451,11 +445,11 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
451 445
452 /* Point into the IP datagram 'data' part. */ 446 /* Point into the IP datagram 'data' part. */
453 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) { 447 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
454 DEBUGP("queue: message is too short.\n"); 448 pr_debug("queue: message is too short.\n");
455 goto err; 449 goto err;
456 } 450 }
457 if (pskb_trim_rcsum(skb, end - offset)) { 451 if (pskb_trim_rcsum(skb, end - offset)) {
458 DEBUGP("Can't trim\n"); 452 pr_debug("Can't trim\n");
459 goto err; 453 goto err;
460 } 454 }
461 455
@@ -480,11 +474,11 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
480 if (i > 0) { 474 if (i > 0) {
481 offset += i; 475 offset += i;
482 if (end <= offset) { 476 if (end <= offset) {
483 DEBUGP("overlap\n"); 477 pr_debug("overlap\n");
484 goto err; 478 goto err;
485 } 479 }
486 if (!pskb_pull(skb, i)) { 480 if (!pskb_pull(skb, i)) {
487 DEBUGP("Can't pull\n"); 481 pr_debug("Can't pull\n");
488 goto err; 482 goto err;
489 } 483 }
490 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 484 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
@@ -503,7 +497,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
503 /* Eat head of the next overlapped fragment 497 /* Eat head of the next overlapped fragment
504 * and leave the loop. The next ones cannot overlap. 498 * and leave the loop. The next ones cannot overlap.
505 */ 499 */
506 DEBUGP("Eat head of the overlapped parts.: %d", i); 500 pr_debug("Eat head of the overlapped parts.: %d", i);
507 if (!pskb_pull(next, i)) 501 if (!pskb_pull(next, i))
508 goto err; 502 goto err;
509 503
@@ -586,13 +580,13 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
586 sizeof(struct ipv6hdr) + fq->len - 580 sizeof(struct ipv6hdr) + fq->len -
587 sizeof(struct frag_hdr)); 581 sizeof(struct frag_hdr));
588 if (payload_len > IPV6_MAXPLEN) { 582 if (payload_len > IPV6_MAXPLEN) {
589 DEBUGP("payload len is too large.\n"); 583 pr_debug("payload len is too large.\n");
590 goto out_oversize; 584 goto out_oversize;
591 } 585 }
592 586
593 /* Head of list must not be cloned. */ 587 /* Head of list must not be cloned. */
594 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) { 588 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
595 DEBUGP("skb is cloned but can't expand head"); 589 pr_debug("skb is cloned but can't expand head");
596 goto out_oom; 590 goto out_oom;
597 } 591 }
598 592
@@ -604,7 +598,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
604 int i, plen = 0; 598 int i, plen = 0;
605 599
606 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) { 600 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
607 DEBUGP("Can't alloc skb\n"); 601 pr_debug("Can't alloc skb\n");
608 goto out_oom; 602 goto out_oom;
609 } 603 }
610 clone->next = head->next; 604 clone->next = head->next;
@@ -719,11 +713,11 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
719 return -1; 713 return -1;
720 } 714 }
721 if (len < (int)sizeof(struct ipv6_opt_hdr)) { 715 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
722 DEBUGP("too short\n"); 716 pr_debug("too short\n");
723 return -1; 717 return -1;
724 } 718 }
725 if (nexthdr == NEXTHDR_NONE) { 719 if (nexthdr == NEXTHDR_NONE) {
726 DEBUGP("next header is none\n"); 720 pr_debug("next header is none\n");
727 return -1; 721 return -1;
728 } 722 }
729 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) 723 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
@@ -764,7 +758,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
764 758
765 /* Jumbo payload inhibits frag. header */ 759 /* Jumbo payload inhibits frag. header */
766 if (ipv6_hdr(skb)->payload_len == 0) { 760 if (ipv6_hdr(skb)->payload_len == 0) {
767 DEBUGP("payload len = 0\n"); 761 pr_debug("payload len = 0\n");
768 return skb; 762 return skb;
769 } 763 }
770 764
@@ -773,14 +767,14 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
773 767
774 clone = skb_clone(skb, GFP_ATOMIC); 768 clone = skb_clone(skb, GFP_ATOMIC);
775 if (clone == NULL) { 769 if (clone == NULL) {
776 DEBUGP("Can't clone skb\n"); 770 pr_debug("Can't clone skb\n");
777 return skb; 771 return skb;
778 } 772 }
779 773
780 NFCT_FRAG6_CB(clone)->orig = skb; 774 NFCT_FRAG6_CB(clone)->orig = skb;
781 775
782 if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) { 776 if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) {
783 DEBUGP("message is too short.\n"); 777 pr_debug("message is too short.\n");
784 goto ret_orig; 778 goto ret_orig;
785 } 779 }
786 780
@@ -789,7 +783,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
789 fhdr = (struct frag_hdr *)skb_transport_header(clone); 783 fhdr = (struct frag_hdr *)skb_transport_header(clone);
790 784
791 if (!(fhdr->frag_off & htons(0xFFF9))) { 785 if (!(fhdr->frag_off & htons(0xFFF9))) {
792 DEBUGP("Invalid fragment offset\n"); 786 pr_debug("Invalid fragment offset\n");
793 /* It is not a fragmented frame */ 787 /* It is not a fragmented frame */
794 goto ret_orig; 788 goto ret_orig;
795 } 789 }
@@ -799,7 +793,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
799 793
800 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); 794 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
801 if (fq == NULL) { 795 if (fq == NULL) {
802 DEBUGP("Can't find and can't create new queue\n"); 796 pr_debug("Can't find and can't create new queue\n");
803 goto ret_orig; 797 goto ret_orig;
804 } 798 }
805 799
@@ -807,7 +801,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
807 801
808 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { 802 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
809 spin_unlock(&fq->lock); 803 spin_unlock(&fq->lock);
810 DEBUGP("Can't insert skb to queue\n"); 804 pr_debug("Can't insert skb to queue\n");
811 fq_put(fq, NULL); 805 fq_put(fq, NULL);
812 goto ret_orig; 806 goto ret_orig;
813 } 807 }
@@ -815,7 +809,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
815 if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) { 809 if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) {
816 ret_skb = nf_ct_frag6_reasm(fq, dev); 810 ret_skb = nf_ct_frag6_reasm(fq, dev);
817 if (ret_skb == NULL) 811 if (ret_skb == NULL)
818 DEBUGP("Can't reassemble fragmented packets\n"); 812 pr_debug("Can't reassemble fragmented packets\n");
819 } 813 }
820 spin_unlock(&fq->lock); 814 spin_unlock(&fq->lock);
821 815
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index a58459a76684..e27383d855de 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -49,7 +49,7 @@
49#include <net/udp.h> 49#include <net/udp.h>
50#include <net/inet_common.h> 50#include <net/inet_common.h>
51#include <net/tcp_states.h> 51#include <net/tcp_states.h>
52#ifdef CONFIG_IPV6_MIP6 52#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
53#include <net/mip6.h> 53#include <net/mip6.h>
54#endif 54#endif
55 55
@@ -137,6 +137,28 @@ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
137 return 0; 137 return 0;
138} 138}
139 139
140#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
141static int (*mh_filter)(struct sock *sock, struct sk_buff *skb);
142
143int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
144 struct sk_buff *skb))
145{
146 rcu_assign_pointer(mh_filter, filter);
147 return 0;
148}
149EXPORT_SYMBOL(rawv6_mh_filter_register);
150
151int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
152 struct sk_buff *skb))
153{
154 rcu_assign_pointer(mh_filter, NULL);
155 synchronize_rcu();
156 return 0;
157}
158EXPORT_SYMBOL(rawv6_mh_filter_unregister);
159
160#endif
161
140/* 162/*
141 * demultiplex raw sockets. 163 * demultiplex raw sockets.
142 * (should consider queueing the skb in the sock receive_queue 164 * (should consider queueing the skb in the sock receive_queue
@@ -178,16 +200,22 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
178 case IPPROTO_ICMPV6: 200 case IPPROTO_ICMPV6:
179 filtered = icmpv6_filter(sk, skb); 201 filtered = icmpv6_filter(sk, skb);
180 break; 202 break;
181#ifdef CONFIG_IPV6_MIP6 203
204#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
182 case IPPROTO_MH: 205 case IPPROTO_MH:
206 {
183 /* XXX: To validate MH only once for each packet, 207 /* XXX: To validate MH only once for each packet,
184 * this is placed here. It should be after checking 208 * this is placed here. It should be after checking
185 * xfrm policy, however it doesn't. The checking xfrm 209 * xfrm policy, however it doesn't. The checking xfrm
186 * policy is placed in rawv6_rcv() because it is 210 * policy is placed in rawv6_rcv() because it is
187 * required for each socket. 211 * required for each socket.
188 */ 212 */
189 filtered = mip6_mh_filter(sk, skb); 213 int (*filter)(struct sock *sock, struct sk_buff *skb);
214
215 filter = rcu_dereference(mh_filter);
216 filtered = filter ? filter(sk, skb) : 0;
190 break; 217 break;
218 }
191#endif 219#endif
192 default: 220 default:
193 filtered = 0; 221 filtered = 0;
@@ -611,9 +639,7 @@ static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
611 struct iovec *iov; 639 struct iovec *iov;
612 u8 __user *type = NULL; 640 u8 __user *type = NULL;
613 u8 __user *code = NULL; 641 u8 __user *code = NULL;
614#ifdef CONFIG_IPV6_MIP6
615 u8 len = 0; 642 u8 len = 0;
616#endif
617 int probed = 0; 643 int probed = 0;
618 int i; 644 int i;
619 645
@@ -646,7 +672,6 @@ static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
646 probed = 1; 672 probed = 1;
647 } 673 }
648 break; 674 break;
649#ifdef CONFIG_IPV6_MIP6
650 case IPPROTO_MH: 675 case IPPROTO_MH:
651 if (iov->iov_base && iov->iov_len < 1) 676 if (iov->iov_base && iov->iov_len < 1)
652 break; 677 break;
@@ -660,7 +685,6 @@ static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
660 len += iov->iov_len; 685 len += iov->iov_len;
661 686
662 break; 687 break;
663#endif
664 default: 688 default:
665 probed = 1; 689 probed = 1;
666 break; 690 break;
@@ -1256,7 +1280,7 @@ static int raw6_seq_show(struct seq_file *seq, void *v)
1256 return 0; 1280 return 0;
1257} 1281}
1258 1282
1259static struct seq_operations raw6_seq_ops = { 1283static const struct seq_operations raw6_seq_ops = {
1260 .start = raw6_seq_start, 1284 .start = raw6_seq_start,
1261 .next = raw6_seq_next, 1285 .next = raw6_seq_next,
1262 .stop = raw6_seq_stop, 1286 .stop = raw6_seq_stop,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1efa95a99f45..eb20bb690abd 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -532,7 +532,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
532 */ 532 */
533 max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); 533 max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);
534 534
535 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { 535 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
536 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
536 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 537 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
537 if (!new_skb) { 538 if (!new_skb) {
538 ip_rt_put(rt); 539 ip_rt_put(rt);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 193d9d60bb7a..d67fb1ef751e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -484,17 +484,6 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
484 484
485 if (dst == NULL) { 485 if (dst == NULL) {
486 opt = np->opt; 486 opt = np->opt;
487 if (opt == NULL &&
488 np->rxopt.bits.osrcrt == 2 &&
489 treq->pktopts) {
490 struct sk_buff *pktopts = treq->pktopts;
491 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
492 if (rxopt->srcrt)
493 opt = ipv6_invert_rthdr(sk,
494 (struct ipv6_rt_hdr *)(skb_network_header(pktopts) +
495 rxopt->srcrt));
496 }
497
498 if (opt && opt->srcrt) { 487 if (opt && opt->srcrt) {
499 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 488 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
500 ipv6_addr_copy(&final, &fl.fl6_dst); 489 ipv6_addr_copy(&final, &fl.fl6_dst);
@@ -1391,15 +1380,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1391 if (sk_acceptq_is_full(sk)) 1380 if (sk_acceptq_is_full(sk))
1392 goto out_overflow; 1381 goto out_overflow;
1393 1382
1394 if (np->rxopt.bits.osrcrt == 2 &&
1395 opt == NULL && treq->pktopts) {
1396 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1397 if (rxopt->srcrt)
1398 opt = ipv6_invert_rthdr(sk,
1399 (struct ipv6_rt_hdr *)(skb_network_header(treq->pktopts) +
1400 rxopt->srcrt));
1401 }
1402
1403 if (dst == NULL) { 1383 if (dst == NULL) {
1404 struct in6_addr *final_p = NULL, final; 1384 struct in6_addr *final_p = NULL, final;
1405 struct flowi fl; 1385 struct flowi fl;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 1faa2ea80afc..3ec0c4770ee3 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -18,7 +18,7 @@
18#include <net/ip.h> 18#include <net/ip.h>
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20#include <net/ip6_route.h> 20#include <net/ip6_route.h>
21#ifdef CONFIG_IPV6_MIP6 21#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
22#include <net/mip6.h> 22#include <net/mip6.h>
23#endif 23#endif
24 24
@@ -318,7 +318,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl)
318 fl->proto = nexthdr; 318 fl->proto = nexthdr;
319 return; 319 return;
320 320
321#ifdef CONFIG_IPV6_MIP6 321#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
322 case IPPROTO_MH: 322 case IPPROTO_MH:
323 if (pskb_may_pull(skb, nh + offset + 3 - skb->data)) { 323 if (pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
324 struct ip6_mh *mh; 324 struct ip6_mh *mh;
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index baa461b9f74e..cdadb4847469 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -65,7 +65,7 @@ __xfrm6_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n)
65 goto end; 65 goto end;
66 66
67 /* Rule 2: select MIPv6 RO or inbound trigger */ 67 /* Rule 2: select MIPv6 RO or inbound trigger */
68#ifdef CONFIG_IPV6_MIP6 68#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
69 for (i = 0; i < n; i++) { 69 for (i = 0; i < n; i++) {
70 if (src[i] && 70 if (src[i] &&
71 (src[i]->props.mode == XFRM_MODE_ROUTEOPTIMIZATION || 71 (src[i]->props.mode == XFRM_MODE_ROUTEOPTIMIZATION ||
@@ -130,7 +130,7 @@ __xfrm6_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n)
130 goto end; 130 goto end;
131 131
132 /* Rule 2: select MIPv6 RO or inbound trigger */ 132 /* Rule 2: select MIPv6 RO or inbound trigger */
133#ifdef CONFIG_IPV6_MIP6 133#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
134 for (i = 0; i < n; i++) { 134 for (i = 0; i < n; i++) {
135 if (src[i] && 135 if (src[i] &&
136 (src[i]->mode == XFRM_MODE_ROUTEOPTIMIZATION || 136 (src[i]->mode == XFRM_MODE_ROUTEOPTIMIZATION ||
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 5502cc948dfb..6f87dd568ded 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -379,3 +379,4 @@ static void __exit xfrm6_tunnel_fini(void)
379module_init(xfrm6_tunnel_init); 379module_init(xfrm6_tunnel_init);
380module_exit(xfrm6_tunnel_fini); 380module_exit(xfrm6_tunnel_fini);
381MODULE_LICENSE("GPL"); 381MODULE_LICENSE("GPL");
382MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index db32ac8e79bd..4226e71ae1e3 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -286,21 +286,21 @@ out:
286 return 0; 286 return 0;
287} 287}
288 288
289static struct seq_operations ipx_seq_interface_ops = { 289static const struct seq_operations ipx_seq_interface_ops = {
290 .start = ipx_seq_interface_start, 290 .start = ipx_seq_interface_start,
291 .next = ipx_seq_interface_next, 291 .next = ipx_seq_interface_next,
292 .stop = ipx_seq_interface_stop, 292 .stop = ipx_seq_interface_stop,
293 .show = ipx_seq_interface_show, 293 .show = ipx_seq_interface_show,
294}; 294};
295 295
296static struct seq_operations ipx_seq_route_ops = { 296static const struct seq_operations ipx_seq_route_ops = {
297 .start = ipx_seq_route_start, 297 .start = ipx_seq_route_start,
298 .next = ipx_seq_route_next, 298 .next = ipx_seq_route_next,
299 .stop = ipx_seq_route_stop, 299 .stop = ipx_seq_route_stop,
300 .show = ipx_seq_route_show, 300 .show = ipx_seq_route_show,
301}; 301};
302 302
303static struct seq_operations ipx_seq_socket_ops = { 303static const struct seq_operations ipx_seq_socket_ops = {
304 .start = ipx_seq_socket_start, 304 .start = ipx_seq_socket_start,
305 .next = ipx_seq_socket_next, 305 .next = ipx_seq_socket_next,
306 .stop = ipx_seq_interface_stop, 306 .stop = ipx_seq_interface_stop,
diff --git a/net/irda/Makefile b/net/irda/Makefile
index d1366c2a39cb..187f6c563a4b 100644
--- a/net/irda/Makefile
+++ b/net/irda/Makefile
@@ -10,6 +10,6 @@ obj-$(CONFIG_IRCOMM) += ircomm/
10irda-y := iriap.o iriap_event.o irlmp.o irlmp_event.o irlmp_frame.o \ 10irda-y := iriap.o iriap_event.o irlmp.o irlmp_event.o irlmp_frame.o \
11 irlap.o irlap_event.o irlap_frame.o timer.o qos.o irqueue.o \ 11 irlap.o irlap_event.o irlap_frame.o timer.o qos.o irqueue.o \
12 irttp.o irda_device.o irias_object.o wrapper.o af_irda.o \ 12 irttp.o irda_device.o irias_object.o wrapper.o af_irda.o \
13 discovery.o parameters.o irmod.o 13 discovery.o parameters.o irnetlink.o irmod.o
14irda-$(CONFIG_PROC_FS) += irproc.o 14irda-$(CONFIG_PROC_FS) += irproc.o
15irda-$(CONFIG_SYSCTL) += irsysctl.o 15irda-$(CONFIG_SYSCTL) += irsysctl.o
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index f09734128674..af0cea721d2a 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -395,7 +395,7 @@ static int discovery_seq_show(struct seq_file *seq, void *v)
395 return 0; 395 return 0;
396} 396}
397 397
398static struct seq_operations discovery_seq_ops = { 398static const struct seq_operations discovery_seq_ops = {
399 .start = discovery_seq_start, 399 .start = discovery_seq_start,
400 .next = discovery_seq_next, 400 .next = discovery_seq_next,
401 .stop = discovery_seq_stop, 401 .stop = discovery_seq_stop,
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 4749f8f55391..2d63fa8e1556 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -562,7 +562,7 @@ static int ircomm_seq_show(struct seq_file *seq, void *v)
562 return 0; 562 return 0;
563} 563}
564 564
565static struct seq_operations ircomm_seq_ops = { 565static const struct seq_operations ircomm_seq_ops = {
566 .start = ircomm_seq_start, 566 .start = ircomm_seq_start,
567 .next = ircomm_seq_next, 567 .next = ircomm_seq_next,
568 .stop = ircomm_seq_stop, 568 .stop = ircomm_seq_stop,
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 915d9384f36a..774eb707940c 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -1066,7 +1066,7 @@ static int irias_seq_show(struct seq_file *seq, void *v)
1066 return 0; 1066 return 0;
1067} 1067}
1068 1068
1069static struct seq_operations irias_seq_ops = { 1069static const struct seq_operations irias_seq_ops = {
1070 .start = irias_seq_start, 1070 .start = irias_seq_start,
1071 .next = irias_seq_next, 1071 .next = irias_seq_next,
1072 .stop = irias_seq_stop, 1072 .stop = irias_seq_stop,
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index ed69773b0f8e..f5778ef3ccc7 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1217,7 +1217,7 @@ static int irlan_seq_show(struct seq_file *seq, void *v)
1217 return 0; 1217 return 0;
1218} 1218}
1219 1219
1220static struct seq_operations irlan_seq_ops = { 1220static const struct seq_operations irlan_seq_ops = {
1221 .start = irlan_seq_start, 1221 .start = irlan_seq_start,
1222 .next = irlan_seq_next, 1222 .next = irlan_seq_next,
1223 .stop = irlan_seq_stop, 1223 .stop = irlan_seq_stop,
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index d93ebd11431e..2fc9f518f89d 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -1210,7 +1210,7 @@ static int irlap_seq_show(struct seq_file *seq, void *v)
1210 return 0; 1210 return 0;
1211} 1211}
1212 1212
1213static struct seq_operations irlap_seq_ops = { 1213static const struct seq_operations irlap_seq_ops = {
1214 .start = irlap_seq_start, 1214 .start = irlap_seq_start,
1215 .next = irlap_seq_next, 1215 .next = irlap_seq_next,
1216 .stop = irlap_seq_stop, 1216 .stop = irlap_seq_stop,
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 3013c49ab975..25a3444a9234 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -101,6 +101,13 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb)
101 101
102 irlap_insert_info(self, skb); 102 irlap_insert_info(self, skb);
103 103
104 if (unlikely(self->mode & IRDA_MODE_MONITOR)) {
105 IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __FUNCTION__,
106 self->netdev->name);
107 dev_kfree_skb(skb);
108 return;
109 }
110
104 dev_queue_xmit(skb); 111 dev_queue_xmit(skb);
105} 112}
106 113
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 9df0461b6d18..24a5e3f23778 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -1994,7 +1994,7 @@ static int irlmp_seq_show(struct seq_file *seq, void *v)
1994 return 0; 1994 return 0;
1995} 1995}
1996 1996
1997static struct seq_operations irlmp_seq_ops = { 1997static const struct seq_operations irlmp_seq_ops = {
1998 .start = irlmp_seq_start, 1998 .start = irlmp_seq_start,
1999 .next = irlmp_seq_next, 1999 .next = irlmp_seq_next,
2000 .stop = irlmp_seq_stop, 2000 .stop = irlmp_seq_stop,
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index c7fad2c5b9f3..1900937b3328 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -88,16 +88,23 @@ EXPORT_SYMBOL(irda_notify_init);
88 */ 88 */
89static int __init irda_init(void) 89static int __init irda_init(void)
90{ 90{
91 int ret = 0;
92
91 IRDA_DEBUG(0, "%s()\n", __FUNCTION__); 93 IRDA_DEBUG(0, "%s()\n", __FUNCTION__);
92 94
93 /* Lower layer of the stack */ 95 /* Lower layer of the stack */
94 irlmp_init(); 96 irlmp_init();
95 irlap_init(); 97 irlap_init();
96 98
99 /* Driver/dongle support */
100 irda_device_init();
101
97 /* Higher layers of the stack */ 102 /* Higher layers of the stack */
98 iriap_init(); 103 iriap_init();
99 irttp_init(); 104 irttp_init();
100 irsock_init(); 105 ret = irsock_init();
106 if (ret < 0)
107 goto out_err_1;
101 108
102 /* Add IrDA packet type (Start receiving packets) */ 109 /* Add IrDA packet type (Start receiving packets) */
103 dev_add_pack(&irda_packet_type); 110 dev_add_pack(&irda_packet_type);
@@ -107,13 +114,44 @@ static int __init irda_init(void)
107 irda_proc_register(); 114 irda_proc_register();
108#endif 115#endif
109#ifdef CONFIG_SYSCTL 116#ifdef CONFIG_SYSCTL
110 irda_sysctl_register(); 117 ret = irda_sysctl_register();
118 if (ret < 0)
119 goto out_err_2;
111#endif 120#endif
112 121
113 /* Driver/dongle support */ 122 ret = irda_nl_register();
114 irda_device_init(); 123 if (ret < 0)
124 goto out_err_3;
115 125
116 return 0; 126 return 0;
127
128 out_err_3:
129#ifdef CONFIG_SYSCTL
130 irda_sysctl_unregister();
131#endif
132 out_err_2:
133#ifdef CONFIG_PROC_FS
134 irda_proc_unregister();
135#endif
136
137 /* Remove IrDA packet type (stop receiving packets) */
138 dev_remove_pack(&irda_packet_type);
139
140 /* Remove higher layers */
141 irsock_cleanup();
142 out_err_1:
143 irttp_cleanup();
144 iriap_cleanup();
145
146 /* Remove lower layers */
147 irda_device_cleanup();
148 irlap_cleanup(); /* Must be done before irlmp_cleanup()! DB */
149
150 /* Remove middle layer */
151 irlmp_cleanup();
152
153
154 return ret;
117} 155}
118 156
119/* 157/*
@@ -125,6 +163,8 @@ static int __init irda_init(void)
125static void __exit irda_cleanup(void) 163static void __exit irda_cleanup(void)
126{ 164{
127 /* Remove External APIs */ 165 /* Remove External APIs */
166 irda_nl_unregister();
167
128#ifdef CONFIG_SYSCTL 168#ifdef CONFIG_SYSCTL
129 irda_sysctl_unregister(); 169 irda_sysctl_unregister();
130#endif 170#endif
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
new file mode 100644
index 000000000000..db716580e1ae
--- /dev/null
+++ b/net/irda/irnetlink.c
@@ -0,0 +1,170 @@
1/*
2 * IrDA netlink layer, for stack configuration.
3 *
4 * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz>
5 *
6 * Partly based on the 802.11 nelink implementation
7 * (see net/wireless/nl80211.c) which is:
8 * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/socket.h>
17#include <linux/irda.h>
18#include <net/sock.h>
19#include <net/irda/irda.h>
20#include <net/irda/irlap.h>
21#include <net/genetlink.h>
22
23
24
25static struct genl_family irda_nl_family = {
26 .id = GENL_ID_GENERATE,
27 .name = IRDA_NL_NAME,
28 .hdrsize = 0,
29 .version = IRDA_NL_VERSION,
30 .maxattr = IRDA_NL_CMD_MAX,
31};
32
33static struct net_device * ifname_to_netdev(struct genl_info *info)
34{
35 char * ifname;
36
37 if (!info->attrs[IRDA_NL_ATTR_IFNAME])
38 return NULL;
39
40 ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]);
41
42 IRDA_DEBUG(5, "%s(): Looking for %s\n", __FUNCTION__, ifname);
43
44 return dev_get_by_name(ifname);
45}
46
47static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info)
48{
49 struct net_device * dev;
50 struct irlap_cb * irlap;
51 u32 mode;
52
53 if (!info->attrs[IRDA_NL_ATTR_MODE])
54 return -EINVAL;
55
56 mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]);
57
58 IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __FUNCTION__, mode);
59
60 dev = ifname_to_netdev(info);
61 if (!dev)
62 return -ENODEV;
63
64 irlap = (struct irlap_cb *)dev->atalk_ptr;
65 if (!irlap) {
66 dev_put(dev);
67 return -ENODEV;
68 }
69
70 irlap->mode = mode;
71
72 dev_put(dev);
73
74 return 0;
75}
76
77static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
78{
79 struct net_device * dev;
80 struct irlap_cb * irlap;
81 struct sk_buff *msg;
82 void *hdr;
83 int ret = -ENOBUFS;
84
85 dev = ifname_to_netdev(info);
86 if (!dev)
87 return -ENODEV;
88
89 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
90 if (!msg) {
91 dev_put(dev);
92 return -ENOMEM;
93 }
94
95 irlap = (struct irlap_cb *)dev->atalk_ptr;
96 if (!irlap) {
97 ret = -ENODEV;
98 goto err_out;
99 }
100
101 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
102 &irda_nl_family, 0, IRDA_NL_CMD_GET_MODE);
103 if (IS_ERR(hdr)) {
104 ret = PTR_ERR(hdr);
105 goto err_out;
106 }
107
108 if(nla_put_string(msg, IRDA_NL_ATTR_IFNAME,
109 dev->name));
110 goto err_out;
111
112 if(nla_put_u32(msg, IRDA_NL_ATTR_MODE, irlap->mode))
113 goto err_out;
114
115 genlmsg_end(msg, hdr);
116
117 return genlmsg_unicast(msg, info->snd_pid);
118
119 err_out:
120 nlmsg_free(msg);
121 dev_put(dev);
122
123 return ret;
124}
125
126static struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
127 [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING,
128 .len = IFNAMSIZ-1 },
129 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 },
130};
131
132static struct genl_ops irda_nl_ops[] = {
133 {
134 .cmd = IRDA_NL_CMD_SET_MODE,
135 .doit = irda_nl_set_mode,
136 .policy = irda_nl_policy,
137 .flags = GENL_ADMIN_PERM,
138 },
139 {
140 .cmd = IRDA_NL_CMD_GET_MODE,
141 .doit = irda_nl_get_mode,
142 .policy = irda_nl_policy,
143 /* can be retrieved by unprivileged users */
144 },
145
146};
147
148int irda_nl_register(void)
149{
150 int err, i;
151
152 err = genl_register_family(&irda_nl_family);
153 if (err)
154 return err;
155
156 for (i = 0; i < ARRAY_SIZE(irda_nl_ops); i++) {
157 err = genl_register_ops(&irda_nl_family, &irda_nl_ops[i]);
158 if (err)
159 goto err_out;
160 }
161 return 0;
162 err_out:
163 genl_unregister_family(&irda_nl_family);
164 return err;
165}
166
167void irda_nl_unregister(void)
168{
169 genl_unregister_family(&irda_nl_family);
170}
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 7069e4a58257..7f50832a2cd5 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -369,6 +369,20 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
369/* Everything is happily mixed up. Waiting for next clean up - Jean II */ 369/* Everything is happily mixed up. Waiting for next clean up - Jean II */
370 370
371/* 371/*
372 * Initialization, that has to be done on new tsap
373 * instance allocation and on duplication
374 */
375static void irttp_init_tsap(struct tsap_cb *tsap)
376{
377 spin_lock_init(&tsap->lock);
378 init_timer(&tsap->todo_timer);
379
380 skb_queue_head_init(&tsap->rx_queue);
381 skb_queue_head_init(&tsap->tx_queue);
382 skb_queue_head_init(&tsap->rx_fragments);
383}
384
385/*
372 * Function irttp_open_tsap (stsap, notify) 386 * Function irttp_open_tsap (stsap, notify)
373 * 387 *
374 * Create TSAP connection endpoint, 388 * Create TSAP connection endpoint,
@@ -395,10 +409,11 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
395 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); 409 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__);
396 return NULL; 410 return NULL;
397 } 411 }
398 spin_lock_init(&self->lock); 412
413 /* Initialize internal objects */
414 irttp_init_tsap(self);
399 415
400 /* Initialise todo timer */ 416 /* Initialise todo timer */
401 init_timer(&self->todo_timer);
402 self->todo_timer.data = (unsigned long) self; 417 self->todo_timer.data = (unsigned long) self;
403 self->todo_timer.function = &irttp_todo_expired; 418 self->todo_timer.function = &irttp_todo_expired;
404 419
@@ -418,9 +433,6 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
418 self->magic = TTP_TSAP_MAGIC; 433 self->magic = TTP_TSAP_MAGIC;
419 self->connected = FALSE; 434 self->connected = FALSE;
420 435
421 skb_queue_head_init(&self->rx_queue);
422 skb_queue_head_init(&self->tx_queue);
423 skb_queue_head_init(&self->rx_fragments);
424 /* 436 /*
425 * Create LSAP at IrLMP layer 437 * Create LSAP at IrLMP layer
426 */ 438 */
@@ -1455,12 +1467,9 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
1455 1467
1456 /* Not everything should be copied */ 1468 /* Not everything should be copied */
1457 new->notify.instance = instance; 1469 new->notify.instance = instance;
1458 spin_lock_init(&new->lock);
1459 init_timer(&new->todo_timer);
1460 1470
1461 skb_queue_head_init(&new->rx_queue); 1471 /* Initialize internal objects */
1462 skb_queue_head_init(&new->tx_queue); 1472 irttp_init_tsap(new);
1463 skb_queue_head_init(&new->rx_fragments);
1464 1473
1465 /* This is locked */ 1474 /* This is locked */
1466 hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL); 1475 hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
@@ -1866,7 +1875,7 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
1866 return 0; 1875 return 0;
1867} 1876}
1868 1877
1869static struct seq_operations irttp_seq_ops = { 1878static const struct seq_operations irttp_seq_ops = {
1870 .start = irttp_seq_start, 1879 .start = irttp_seq_start,
1871 .next = irttp_seq_next, 1880 .next = irttp_seq_next,
1872 .stop = irttp_seq_stop, 1881 .stop = irttp_seq_stop,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 3ab9d9f8b17f..49be6c902c83 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -184,14 +184,14 @@ out:
184 return 0; 184 return 0;
185} 185}
186 186
187static struct seq_operations llc_seq_socket_ops = { 187static const struct seq_operations llc_seq_socket_ops = {
188 .start = llc_seq_start, 188 .start = llc_seq_start,
189 .next = llc_seq_next, 189 .next = llc_seq_next,
190 .stop = llc_seq_stop, 190 .stop = llc_seq_stop,
191 .show = llc_seq_socket_show, 191 .show = llc_seq_socket_show,
192}; 192};
193 193
194static struct seq_operations llc_seq_core_ops = { 194static const struct seq_operations llc_seq_core_ops = {
195 .start = llc_seq_start, 195 .start = llc_seq_start,
196 .next = llc_seq_next, 196 .next = llc_seq_next,
197 .stop = llc_seq_stop, 197 .stop = llc_seq_stop,
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c
index 352f03bd8a3a..66e8a976b311 100644
--- a/net/mac80211/ieee80211_ioctl.c
+++ b/net/mac80211/ieee80211_ioctl.c
@@ -838,6 +838,29 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev,
838} 838}
839 839
840 840
841static int ieee80211_ioctl_giwrate(struct net_device *dev,
842 struct iw_request_info *info,
843 struct iw_param *rate, char *extra)
844{
845 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
846 struct sta_info *sta;
847 struct ieee80211_sub_if_data *sdata;
848
849 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
850 if (sdata->type == IEEE80211_IF_TYPE_STA)
851 sta = sta_info_get(local, sdata->u.sta.bssid);
852 else
853 return -EOPNOTSUPP;
854 if (!sta)
855 return -ENODEV;
856 if (sta->txrate < local->oper_hw_mode->num_rates)
857 rate->value = local->oper_hw_mode->rates[sta->txrate].rate * 100000;
858 else
859 rate->value = 0;
860 sta_info_put(sta);
861 return 0;
862}
863
841static int ieee80211_ioctl_siwrts(struct net_device *dev, 864static int ieee80211_ioctl_siwrts(struct net_device *dev,
842 struct iw_request_info *info, 865 struct iw_request_info *info,
843 struct iw_param *rts, char *extra) 866 struct iw_param *rts, char *extra)
@@ -1779,7 +1802,7 @@ static const iw_handler ieee80211_handler[] =
1779 (iw_handler) NULL, /* -- hole -- */ 1802 (iw_handler) NULL, /* -- hole -- */
1780 (iw_handler) NULL, /* -- hole -- */ 1803 (iw_handler) NULL, /* -- hole -- */
1781 (iw_handler) NULL, /* SIOCSIWRATE */ 1804 (iw_handler) NULL, /* SIOCSIWRATE */
1782 (iw_handler) NULL, /* SIOCGIWRATE */ 1805 (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */
1783 (iw_handler) ieee80211_ioctl_siwrts, /* SIOCSIWRTS */ 1806 (iw_handler) ieee80211_ioctl_siwrts, /* SIOCSIWRTS */
1784 (iw_handler) ieee80211_ioctl_giwrts, /* SIOCGIWRTS */ 1807 (iw_handler) ieee80211_ioctl_giwrts, /* SIOCGIWRTS */
1785 (iw_handler) ieee80211_ioctl_siwfrag, /* SIOCSIWFRAG */ 1808 (iw_handler) ieee80211_ioctl_siwfrag, /* SIOCSIWFRAG */
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index 2048cfd1ca70..5ae7fc454665 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -283,14 +283,16 @@ static void rate_control_simple_rate_init(void *priv, void *priv_sta,
283 int i; 283 int i;
284 sta->txrate = 0; 284 sta->txrate = 0;
285 mode = local->oper_hw_mode; 285 mode = local->oper_hw_mode;
286 /* TODO: what is a good starting rate for STA? About middle? Maybe not 286 /* TODO: This routine should consider using RSSI from previous packets
287 * the lowest or the highest rate.. Could consider using RSSI from 287 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
288 * previous packets? Need to have IEEE 802.1X auth succeed immediately 288 * Until that method is implemented, we will use the lowest supported rate
289 * after assoc.. */ 289 * as a workaround, */
290 for (i = 0; i < mode->num_rates; i++) { 290 for (i = 0; i < mode->num_rates; i++) {
291 if ((sta->supp_rates & BIT(i)) && 291 if ((sta->supp_rates & BIT(i)) &&
292 (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED)) 292 (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED)) {
293 sta->txrate = i; 293 sta->txrate = i;
294 break;
295 }
294 } 296 }
295} 297}
296 298
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index a567dae8e5fd..df5e8dab871d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -343,6 +343,18 @@ config NETFILTER_XT_TARGET_NOTRACK
343 If you want to compile it as a module, say M here and read 343 If you want to compile it as a module, say M here and read
344 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 344 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
345 345
346config NETFILTER_XT_TARGET_TRACE
347 tristate '"TRACE" target support'
348 depends on NETFILTER_XTABLES
349 depends on IP_NF_RAW || IP6_NF_RAW
350 help
351 The TRACE target allows you to mark packets so that the kernel
352 will log every rule which match the packets as those traverse
353 the tables, chains, rules.
354
355 If you want to compile it as a module, say M here and read
356 <file:Documentation/modules.txt>. If unsure, say `N'.
357
346config NETFILTER_XT_TARGET_SECMARK 358config NETFILTER_XT_TARGET_SECMARK
347 tristate '"SECMARK" target support' 359 tristate '"SECMARK" target support'
348 depends on NETFILTER_XTABLES && NETWORK_SECMARK 360 depends on NETFILTER_XTABLES && NETWORK_SECMARK
@@ -635,6 +647,19 @@ config NETFILTER_XT_MATCH_TCPMSS
635 647
636 To compile it as a module, choose M here. If unsure, say N. 648 To compile it as a module, choose M here. If unsure, say N.
637 649
650config NETFILTER_XT_MATCH_U32
651 tristate '"u32" match support'
652 depends on NETFILTER_XTABLES
653 ---help---
654 u32 allows you to extract quantities of up to 4 bytes from a packet,
655 AND them with specified masks, shift them by specified amounts and
656 test whether the results are in any of a set of specified ranges.
657 The specification of what to extract is general enough to skip over
658 headers with lengths stored in the packet, as in IP or TCP header
659 lengths.
660
661 Details and examples are in the kernel module source.
662
638config NETFILTER_XT_MATCH_HASHLIMIT 663config NETFILTER_XT_MATCH_HASHLIMIT
639 tristate '"hashlimit" match support' 664 tristate '"hashlimit" match support'
640 depends on NETFILTER_XTABLES && (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) 665 depends on NETFILTER_XTABLES && (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index b2b5c7566b26..58b4245a1723 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,6 @@
1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o 1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
2 2
3nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o 3nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o
4nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o 4nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
5 5
6obj-$(CONFIG_NETFILTER) = netfilter.o 6obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -44,6 +44,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
44obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o 44obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
45obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 45obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
46obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o 46obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o 48obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o 49obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
49obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 50obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
@@ -72,4 +73,5 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o
72obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o 73obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
73obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o 74obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
74obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o 75obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
76obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
75obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o 77obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a84478ee2ded..381a77cf0c9e 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -203,7 +203,9 @@ int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len)
203 return 0; 203 return 0;
204 204
205 /* Not exclusive use of packet? Must copy. */ 205 /* Not exclusive use of packet? Must copy. */
206 if (skb_shared(*pskb) || skb_cloned(*pskb)) 206 if (skb_cloned(*pskb) && !skb_clone_writable(*pskb, writable_len))
207 goto copy_skb;
208 if (skb_shared(*pskb))
207 goto copy_skb; 209 goto copy_skb;
208 210
209 return pskb_may_pull(*pskb, writable_len); 211 return pskb_may_pull(*pskb, writable_len);
@@ -229,13 +231,13 @@ void nf_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
229{ 231{
230 __be32 diff[] = { ~from, to }; 232 __be32 diff[] = { ~from, to };
231 if (skb->ip_summed != CHECKSUM_PARTIAL) { 233 if (skb->ip_summed != CHECKSUM_PARTIAL) {
232 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), 234 *sum = csum_fold(csum_partial(diff, sizeof(diff),
233 ~csum_unfold(*sum))); 235 ~csum_unfold(*sum)));
234 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) 236 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
235 skb->csum = ~csum_partial((char *)diff, sizeof(diff), 237 skb->csum = ~csum_partial(diff, sizeof(diff),
236 ~skb->csum); 238 ~skb->csum);
237 } else if (pseudohdr) 239 } else if (pseudohdr)
238 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff), 240 *sum = ~csum_fold(csum_partial(diff, sizeof(diff),
239 csum_unfold(*sum))); 241 csum_unfold(*sum)));
240} 242}
241EXPORT_SYMBOL(nf_proto_csum_replace4); 243EXPORT_SYMBOL(nf_proto_csum_replace4);
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 0568f2e86b59..e42ab230ad88 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -142,23 +142,22 @@ static int amanda_help(struct sk_buff **pskb,
142 if (port == 0 || len > 5) 142 if (port == 0 || len > 5)
143 break; 143 break;
144 144
145 exp = nf_conntrack_expect_alloc(ct); 145 exp = nf_ct_expect_alloc(ct);
146 if (exp == NULL) { 146 if (exp == NULL) {
147 ret = NF_DROP; 147 ret = NF_DROP;
148 goto out; 148 goto out;
149 } 149 }
150 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 150 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
151 nf_conntrack_expect_init(exp, family, 151 nf_ct_expect_init(exp, family, &tuple->src.u3, &tuple->dst.u3,
152 &tuple->src.u3, &tuple->dst.u3, 152 IPPROTO_TCP, NULL, &port);
153 IPPROTO_TCP, NULL, &port);
154 153
155 nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook); 154 nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook);
156 if (nf_nat_amanda && ct->status & IPS_NAT_MASK) 155 if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
157 ret = nf_nat_amanda(pskb, ctinfo, off - dataoff, 156 ret = nf_nat_amanda(pskb, ctinfo, off - dataoff,
158 len, exp); 157 len, exp);
159 else if (nf_conntrack_expect_related(exp) != 0) 158 else if (nf_ct_expect_related(exp) != 0)
160 ret = NF_DROP; 159 ret = NF_DROP;
161 nf_conntrack_expect_put(exp); 160 nf_ct_expect_put(exp);
162 } 161 }
163 162
164out: 163out:
@@ -175,9 +174,6 @@ static struct nf_conntrack_helper amanda_helper[2] __read_mostly = {
175 .tuple.src.l3num = AF_INET, 174 .tuple.src.l3num = AF_INET,
176 .tuple.src.u.udp.port = __constant_htons(10080), 175 .tuple.src.u.udp.port = __constant_htons(10080),
177 .tuple.dst.protonum = IPPROTO_UDP, 176 .tuple.dst.protonum = IPPROTO_UDP,
178 .mask.src.l3num = 0xFFFF,
179 .mask.src.u.udp.port = __constant_htons(0xFFFF),
180 .mask.dst.protonum = 0xFF,
181 }, 177 },
182 { 178 {
183 .name = "amanda", 179 .name = "amanda",
@@ -188,9 +184,6 @@ static struct nf_conntrack_helper amanda_helper[2] __read_mostly = {
188 .tuple.src.l3num = AF_INET6, 184 .tuple.src.l3num = AF_INET6,
189 .tuple.src.u.udp.port = __constant_htons(10080), 185 .tuple.src.u.udp.port = __constant_htons(10080),
190 .tuple.dst.protonum = IPPROTO_UDP, 186 .tuple.dst.protonum = IPPROTO_UDP,
191 .mask.src.l3num = 0xFFFF,
192 .mask.src.u.udp.port = __constant_htons(0xFFFF),
193 .mask.dst.protonum = 0xFF,
194 }, 187 },
195}; 188};
196 189
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7a15e30356f2..3d1411012a2c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -36,15 +36,10 @@
36#include <net/netfilter/nf_conntrack_expect.h> 36#include <net/netfilter/nf_conntrack_expect.h>
37#include <net/netfilter/nf_conntrack_helper.h> 37#include <net/netfilter/nf_conntrack_helper.h>
38#include <net/netfilter/nf_conntrack_core.h> 38#include <net/netfilter/nf_conntrack_core.h>
39#include <net/netfilter/nf_conntrack_extend.h>
39 40
40#define NF_CONNTRACK_VERSION "0.5.0" 41#define NF_CONNTRACK_VERSION "0.5.0"
41 42
42#if 0
43#define DEBUGP printk
44#else
45#define DEBUGP(format, args...)
46#endif
47
48DEFINE_RWLOCK(nf_conntrack_lock); 43DEFINE_RWLOCK(nf_conntrack_lock);
49EXPORT_SYMBOL_GPL(nf_conntrack_lock); 44EXPORT_SYMBOL_GPL(nf_conntrack_lock);
50 45
@@ -52,57 +47,27 @@ EXPORT_SYMBOL_GPL(nf_conntrack_lock);
52atomic_t nf_conntrack_count = ATOMIC_INIT(0); 47atomic_t nf_conntrack_count = ATOMIC_INIT(0);
53EXPORT_SYMBOL_GPL(nf_conntrack_count); 48EXPORT_SYMBOL_GPL(nf_conntrack_count);
54 49
55void (*nf_conntrack_destroyed)(struct nf_conn *conntrack);
56EXPORT_SYMBOL_GPL(nf_conntrack_destroyed);
57
58unsigned int nf_conntrack_htable_size __read_mostly; 50unsigned int nf_conntrack_htable_size __read_mostly;
59EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); 51EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
60 52
61int nf_conntrack_max __read_mostly; 53int nf_conntrack_max __read_mostly;
62EXPORT_SYMBOL_GPL(nf_conntrack_max); 54EXPORT_SYMBOL_GPL(nf_conntrack_max);
63 55
64struct list_head *nf_conntrack_hash __read_mostly; 56struct hlist_head *nf_conntrack_hash __read_mostly;
65EXPORT_SYMBOL_GPL(nf_conntrack_hash); 57EXPORT_SYMBOL_GPL(nf_conntrack_hash);
66 58
67struct nf_conn nf_conntrack_untracked __read_mostly; 59struct nf_conn nf_conntrack_untracked __read_mostly;
68EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 60EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
69 61
70unsigned int nf_ct_log_invalid __read_mostly; 62unsigned int nf_ct_log_invalid __read_mostly;
71LIST_HEAD(unconfirmed); 63HLIST_HEAD(unconfirmed);
72static int nf_conntrack_vmalloc __read_mostly; 64static int nf_conntrack_vmalloc __read_mostly;
73 65static struct kmem_cache *nf_conntrack_cachep __read_mostly;
74static unsigned int nf_conntrack_next_id; 66static unsigned int nf_conntrack_next_id;
75 67
76DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); 68DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
77EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); 69EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
78 70
79/*
80 * This scheme offers various size of "struct nf_conn" dependent on
81 * features(helper, nat, ...)
82 */
83
84#define NF_CT_FEATURES_NAMELEN 256
85static struct {
86 /* name of slab cache. printed in /proc/slabinfo */
87 char *name;
88
89 /* size of slab cache */
90 size_t size;
91
92 /* slab cache pointer */
93 struct kmem_cache *cachep;
94
95 /* allocated slab cache + modules which uses this slab cache */
96 int use;
97
98} nf_ct_cache[NF_CT_F_NUM];
99
100/* protect members of nf_ct_cache except of "use" */
101DEFINE_RWLOCK(nf_ct_cache_lock);
102
103/* This avoids calling kmem_cache_create() with same name simultaneously */
104static DEFINE_MUTEX(nf_ct_cache_mutex);
105
106static int nf_conntrack_hash_rnd_initted; 71static int nf_conntrack_hash_rnd_initted;
107static unsigned int nf_conntrack_hash_rnd; 72static unsigned int nf_conntrack_hash_rnd;
108 73
@@ -125,122 +90,6 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
125 nf_conntrack_hash_rnd); 90 nf_conntrack_hash_rnd);
126} 91}
127 92
128int nf_conntrack_register_cache(u_int32_t features, const char *name,
129 size_t size)
130{
131 int ret = 0;
132 char *cache_name;
133 struct kmem_cache *cachep;
134
135 DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
136 features, name, size);
137
138 if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) {
139 DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n",
140 features);
141 return -EINVAL;
142 }
143
144 mutex_lock(&nf_ct_cache_mutex);
145
146 write_lock_bh(&nf_ct_cache_lock);
147 /* e.g: multiple helpers are loaded */
148 if (nf_ct_cache[features].use > 0) {
149 DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
150 if ((!strncmp(nf_ct_cache[features].name, name,
151 NF_CT_FEATURES_NAMELEN))
152 && nf_ct_cache[features].size == size) {
153 DEBUGP("nf_conntrack_register_cache: reusing.\n");
154 nf_ct_cache[features].use++;
155 ret = 0;
156 } else
157 ret = -EBUSY;
158
159 write_unlock_bh(&nf_ct_cache_lock);
160 mutex_unlock(&nf_ct_cache_mutex);
161 return ret;
162 }
163 write_unlock_bh(&nf_ct_cache_lock);
164
165 /*
166 * The memory space for name of slab cache must be alive until
167 * cache is destroyed.
168 */
169 cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC);
170 if (cache_name == NULL) {
171 DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n");
172 ret = -ENOMEM;
173 goto out_up_mutex;
174 }
175
176 if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN)
177 >= NF_CT_FEATURES_NAMELEN) {
178 printk("nf_conntrack_register_cache: name too long\n");
179 ret = -EINVAL;
180 goto out_free_name;
181 }
182
183 cachep = kmem_cache_create(cache_name, size, 0, 0,
184 NULL, NULL);
185 if (!cachep) {
186 printk("nf_conntrack_register_cache: Can't create slab cache "
187 "for the features = 0x%x\n", features);
188 ret = -ENOMEM;
189 goto out_free_name;
190 }
191
192 write_lock_bh(&nf_ct_cache_lock);
193 nf_ct_cache[features].use = 1;
194 nf_ct_cache[features].size = size;
195 nf_ct_cache[features].cachep = cachep;
196 nf_ct_cache[features].name = cache_name;
197 write_unlock_bh(&nf_ct_cache_lock);
198
199 goto out_up_mutex;
200
201out_free_name:
202 kfree(cache_name);
203out_up_mutex:
204 mutex_unlock(&nf_ct_cache_mutex);
205 return ret;
206}
207EXPORT_SYMBOL_GPL(nf_conntrack_register_cache);
208
209/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
210void nf_conntrack_unregister_cache(u_int32_t features)
211{
212 struct kmem_cache *cachep;
213 char *name;
214
215 /*
216 * This assures that kmem_cache_create() isn't called before destroying
217 * slab cache.
218 */
219 DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
220 mutex_lock(&nf_ct_cache_mutex);
221
222 write_lock_bh(&nf_ct_cache_lock);
223 if (--nf_ct_cache[features].use > 0) {
224 write_unlock_bh(&nf_ct_cache_lock);
225 mutex_unlock(&nf_ct_cache_mutex);
226 return;
227 }
228 cachep = nf_ct_cache[features].cachep;
229 name = nf_ct_cache[features].name;
230 nf_ct_cache[features].cachep = NULL;
231 nf_ct_cache[features].name = NULL;
232 nf_ct_cache[features].size = 0;
233 write_unlock_bh(&nf_ct_cache_lock);
234
235 synchronize_net();
236
237 kmem_cache_destroy(cachep);
238 kfree(name);
239
240 mutex_unlock(&nf_ct_cache_mutex);
241}
242EXPORT_SYMBOL_GPL(nf_conntrack_unregister_cache);
243
244int 93int
245nf_ct_get_tuple(const struct sk_buff *skb, 94nf_ct_get_tuple(const struct sk_buff *skb,
246 unsigned int nhoff, 95 unsigned int nhoff,
@@ -286,9 +135,9 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
286static void 135static void
287clean_from_lists(struct nf_conn *ct) 136clean_from_lists(struct nf_conn *ct)
288{ 137{
289 DEBUGP("clean_from_lists(%p)\n", ct); 138 pr_debug("clean_from_lists(%p)\n", ct);
290 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 139 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
291 list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list); 140 hlist_del(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
292 141
293 /* Destroy all pending expectations */ 142 /* Destroy all pending expectations */
294 nf_ct_remove_expectations(ct); 143 nf_ct_remove_expectations(ct);
@@ -299,9 +148,8 @@ destroy_conntrack(struct nf_conntrack *nfct)
299{ 148{
300 struct nf_conn *ct = (struct nf_conn *)nfct; 149 struct nf_conn *ct = (struct nf_conn *)nfct;
301 struct nf_conntrack_l4proto *l4proto; 150 struct nf_conntrack_l4proto *l4proto;
302 typeof(nf_conntrack_destroyed) destroyed;
303 151
304 DEBUGP("destroy_conntrack(%p)\n", ct); 152 pr_debug("destroy_conntrack(%p)\n", ct);
305 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 153 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
306 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 154 NF_CT_ASSERT(!timer_pending(&ct->timeout));
307 155
@@ -317,9 +165,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
317 if (l4proto && l4proto->destroy) 165 if (l4proto && l4proto->destroy)
318 l4proto->destroy(ct); 166 l4proto->destroy(ct);
319 167
320 destroyed = rcu_dereference(nf_conntrack_destroyed); 168 nf_ct_ext_destroy(ct);
321 if (destroyed)
322 destroyed(ct);
323 169
324 rcu_read_unlock(); 170 rcu_read_unlock();
325 171
@@ -332,8 +178,8 @@ destroy_conntrack(struct nf_conntrack *nfct)
332 178
333 /* We overload first tuple to link into unconfirmed list. */ 179 /* We overload first tuple to link into unconfirmed list. */
334 if (!nf_ct_is_confirmed(ct)) { 180 if (!nf_ct_is_confirmed(ct)) {
335 BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list)); 181 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
336 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 182 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
337 } 183 }
338 184
339 NF_CT_STAT_INC(delete); 185 NF_CT_STAT_INC(delete);
@@ -342,7 +188,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
342 if (ct->master) 188 if (ct->master)
343 nf_ct_put(ct->master); 189 nf_ct_put(ct->master);
344 190
345 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct); 191 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
346 nf_conntrack_free(ct); 192 nf_conntrack_free(ct);
347} 193}
348 194
@@ -374,9 +220,10 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
374 const struct nf_conn *ignored_conntrack) 220 const struct nf_conn *ignored_conntrack)
375{ 221{
376 struct nf_conntrack_tuple_hash *h; 222 struct nf_conntrack_tuple_hash *h;
223 struct hlist_node *n;
377 unsigned int hash = hash_conntrack(tuple); 224 unsigned int hash = hash_conntrack(tuple);
378 225
379 list_for_each_entry(h, &nf_conntrack_hash[hash], list) { 226 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
380 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 227 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
381 nf_ct_tuple_equal(tuple, &h->tuple)) { 228 nf_ct_tuple_equal(tuple, &h->tuple)) {
382 NF_CT_STAT_INC(found); 229 NF_CT_STAT_INC(found);
@@ -391,13 +238,12 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find);
391 238
392/* Find a connection corresponding to a tuple. */ 239/* Find a connection corresponding to a tuple. */
393struct nf_conntrack_tuple_hash * 240struct nf_conntrack_tuple_hash *
394nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple, 241nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple)
395 const struct nf_conn *ignored_conntrack)
396{ 242{
397 struct nf_conntrack_tuple_hash *h; 243 struct nf_conntrack_tuple_hash *h;
398 244
399 read_lock_bh(&nf_conntrack_lock); 245 read_lock_bh(&nf_conntrack_lock);
400 h = __nf_conntrack_find(tuple, ignored_conntrack); 246 h = __nf_conntrack_find(tuple, NULL);
401 if (h) 247 if (h)
402 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use); 248 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
403 read_unlock_bh(&nf_conntrack_lock); 249 read_unlock_bh(&nf_conntrack_lock);
@@ -411,10 +257,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
411 unsigned int repl_hash) 257 unsigned int repl_hash)
412{ 258{
413 ct->id = ++nf_conntrack_next_id; 259 ct->id = ++nf_conntrack_next_id;
414 list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, 260 hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
415 &nf_conntrack_hash[hash]); 261 &nf_conntrack_hash[hash]);
416 list_add(&ct->tuplehash[IP_CT_DIR_REPLY].list, 262 hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
417 &nf_conntrack_hash[repl_hash]); 263 &nf_conntrack_hash[repl_hash]);
418} 264}
419 265
420void nf_conntrack_hash_insert(struct nf_conn *ct) 266void nf_conntrack_hash_insert(struct nf_conn *ct)
@@ -438,6 +284,7 @@ __nf_conntrack_confirm(struct sk_buff **pskb)
438 struct nf_conntrack_tuple_hash *h; 284 struct nf_conntrack_tuple_hash *h;
439 struct nf_conn *ct; 285 struct nf_conn *ct;
440 struct nf_conn_help *help; 286 struct nf_conn_help *help;
287 struct hlist_node *n;
441 enum ip_conntrack_info ctinfo; 288 enum ip_conntrack_info ctinfo;
442 289
443 ct = nf_ct_get(*pskb, &ctinfo); 290 ct = nf_ct_get(*pskb, &ctinfo);
@@ -460,24 +307,24 @@ __nf_conntrack_confirm(struct sk_buff **pskb)
460 /* No external references means noone else could have 307 /* No external references means noone else could have
461 confirmed us. */ 308 confirmed us. */
462 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 309 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
463 DEBUGP("Confirming conntrack %p\n", ct); 310 pr_debug("Confirming conntrack %p\n", ct);
464 311
465 write_lock_bh(&nf_conntrack_lock); 312 write_lock_bh(&nf_conntrack_lock);
466 313
467 /* See if there's one in the list already, including reverse: 314 /* See if there's one in the list already, including reverse:
468 NAT could have grabbed it without realizing, since we're 315 NAT could have grabbed it without realizing, since we're
469 not in the hash. If there is, we lost race. */ 316 not in the hash. If there is, we lost race. */
470 list_for_each_entry(h, &nf_conntrack_hash[hash], list) 317 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode)
471 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 318 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
472 &h->tuple)) 319 &h->tuple))
473 goto out; 320 goto out;
474 list_for_each_entry(h, &nf_conntrack_hash[repl_hash], list) 321 hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode)
475 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 322 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
476 &h->tuple)) 323 &h->tuple))
477 goto out; 324 goto out;
478 325
479 /* Remove from unconfirmed list */ 326 /* Remove from unconfirmed list */
480 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 327 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
481 328
482 __nf_conntrack_hash_insert(ct, hash, repl_hash); 329 __nf_conntrack_hash_insert(ct, hash, repl_hash);
483 /* Timer relative to confirmation time, not original 330 /* Timer relative to confirmation time, not original
@@ -524,24 +371,33 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
524} 371}
525EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); 372EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
526 373
374#define NF_CT_EVICTION_RANGE 8
375
527/* There's a small race here where we may free a just-assured 376/* There's a small race here where we may free a just-assured
528 connection. Too bad: we're in trouble anyway. */ 377 connection. Too bad: we're in trouble anyway. */
529static int early_drop(struct list_head *chain) 378static int early_drop(unsigned int hash)
530{ 379{
531 /* Traverse backwards: gives us oldest, which is roughly LRU */ 380 /* Use oldest entry, which is roughly LRU */
532 struct nf_conntrack_tuple_hash *h; 381 struct nf_conntrack_tuple_hash *h;
533 struct nf_conn *ct = NULL, *tmp; 382 struct nf_conn *ct = NULL, *tmp;
383 struct hlist_node *n;
384 unsigned int i, cnt = 0;
534 int dropped = 0; 385 int dropped = 0;
535 386
536 read_lock_bh(&nf_conntrack_lock); 387 read_lock_bh(&nf_conntrack_lock);
537 list_for_each_entry_reverse(h, chain, list) { 388 for (i = 0; i < nf_conntrack_htable_size; i++) {
538 tmp = nf_ct_tuplehash_to_ctrack(h); 389 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
539 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) { 390 tmp = nf_ct_tuplehash_to_ctrack(h);
540 ct = tmp; 391 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
541 atomic_inc(&ct->ct_general.use); 392 ct = tmp;
542 break; 393 cnt++;
543 } 394 }
395 if (ct || cnt >= NF_CT_EVICTION_RANGE)
396 break;
397 hash = (hash + 1) % nf_conntrack_htable_size;
544 } 398 }
399 if (ct)
400 atomic_inc(&ct->ct_general.use);
545 read_unlock_bh(&nf_conntrack_lock); 401 read_unlock_bh(&nf_conntrack_lock);
546 402
547 if (!ct) 403 if (!ct)
@@ -556,14 +412,10 @@ static int early_drop(struct list_head *chain)
556 return dropped; 412 return dropped;
557} 413}
558 414
559static struct nf_conn * 415struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
560__nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, 416 const struct nf_conntrack_tuple *repl)
561 const struct nf_conntrack_tuple *repl,
562 const struct nf_conntrack_l3proto *l3proto,
563 u_int32_t features)
564{ 417{
565 struct nf_conn *conntrack = NULL; 418 struct nf_conn *conntrack = NULL;
566 struct nf_conntrack_helper *helper;
567 419
568 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 420 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
569 get_random_bytes(&nf_conntrack_hash_rnd, 4); 421 get_random_bytes(&nf_conntrack_hash_rnd, 4);
@@ -576,8 +428,7 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
576 if (nf_conntrack_max 428 if (nf_conntrack_max
577 && atomic_read(&nf_conntrack_count) > nf_conntrack_max) { 429 && atomic_read(&nf_conntrack_count) > nf_conntrack_max) {
578 unsigned int hash = hash_conntrack(orig); 430 unsigned int hash = hash_conntrack(orig);
579 /* Try dropping from this hash chain. */ 431 if (!early_drop(hash)) {
580 if (!early_drop(&nf_conntrack_hash[hash])) {
581 atomic_dec(&nf_conntrack_count); 432 atomic_dec(&nf_conntrack_count);
582 if (net_ratelimit()) 433 if (net_ratelimit())
583 printk(KERN_WARNING 434 printk(KERN_WARNING
@@ -587,72 +438,28 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
587 } 438 }
588 } 439 }
589 440
590 /* find features needed by this conntrack. */ 441 conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC);
591 features |= l3proto->get_features(orig);
592
593 /* FIXME: protect helper list per RCU */
594 read_lock_bh(&nf_conntrack_lock);
595 helper = __nf_ct_helper_find(repl);
596 /* NAT might want to assign a helper later */
597 if (helper || features & NF_CT_F_NAT)
598 features |= NF_CT_F_HELP;
599 read_unlock_bh(&nf_conntrack_lock);
600
601 DEBUGP("nf_conntrack_alloc: features=0x%x\n", features);
602
603 read_lock_bh(&nf_ct_cache_lock);
604
605 if (unlikely(!nf_ct_cache[features].use)) {
606 DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
607 features);
608 goto out;
609 }
610
611 conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC);
612 if (conntrack == NULL) { 442 if (conntrack == NULL) {
613 DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n"); 443 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
614 goto out; 444 atomic_dec(&nf_conntrack_count);
445 return ERR_PTR(-ENOMEM);
615 } 446 }
616 447
617 memset(conntrack, 0, nf_ct_cache[features].size);
618 conntrack->features = features;
619 atomic_set(&conntrack->ct_general.use, 1); 448 atomic_set(&conntrack->ct_general.use, 1);
620 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; 449 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
621 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; 450 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
622 /* Don't set timer yet: wait for confirmation */ 451 /* Don't set timer yet: wait for confirmation */
623 setup_timer(&conntrack->timeout, death_by_timeout, 452 setup_timer(&conntrack->timeout, death_by_timeout,
624 (unsigned long)conntrack); 453 (unsigned long)conntrack);
625 read_unlock_bh(&nf_ct_cache_lock);
626 454
627 return conntrack; 455 return conntrack;
628out:
629 read_unlock_bh(&nf_ct_cache_lock);
630 atomic_dec(&nf_conntrack_count);
631 return conntrack;
632}
633
634struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
635 const struct nf_conntrack_tuple *repl)
636{
637 struct nf_conntrack_l3proto *l3proto;
638 struct nf_conn *ct;
639
640 rcu_read_lock();
641 l3proto = __nf_ct_l3proto_find(orig->src.l3num);
642 ct = __nf_conntrack_alloc(orig, repl, l3proto, 0);
643 rcu_read_unlock();
644
645 return ct;
646} 456}
647EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 457EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
648 458
649void nf_conntrack_free(struct nf_conn *conntrack) 459void nf_conntrack_free(struct nf_conn *conntrack)
650{ 460{
651 u_int32_t features = conntrack->features; 461 nf_ct_ext_free(conntrack);
652 NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM); 462 kmem_cache_free(nf_conntrack_cachep, conntrack);
653 DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features,
654 conntrack);
655 kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
656 atomic_dec(&nf_conntrack_count); 463 atomic_dec(&nf_conntrack_count);
657} 464}
658EXPORT_SYMBOL_GPL(nf_conntrack_free); 465EXPORT_SYMBOL_GPL(nf_conntrack_free);
@@ -670,43 +477,38 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
670 struct nf_conn_help *help; 477 struct nf_conn_help *help;
671 struct nf_conntrack_tuple repl_tuple; 478 struct nf_conntrack_tuple repl_tuple;
672 struct nf_conntrack_expect *exp; 479 struct nf_conntrack_expect *exp;
673 u_int32_t features = 0;
674 480
675 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 481 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
676 DEBUGP("Can't invert tuple.\n"); 482 pr_debug("Can't invert tuple.\n");
677 return NULL; 483 return NULL;
678 } 484 }
679 485
680 read_lock_bh(&nf_conntrack_lock); 486 conntrack = nf_conntrack_alloc(tuple, &repl_tuple);
681 exp = __nf_conntrack_expect_find(tuple);
682 if (exp && exp->helper)
683 features = NF_CT_F_HELP;
684 read_unlock_bh(&nf_conntrack_lock);
685
686 conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, features);
687 if (conntrack == NULL || IS_ERR(conntrack)) { 487 if (conntrack == NULL || IS_ERR(conntrack)) {
688 DEBUGP("Can't allocate conntrack.\n"); 488 pr_debug("Can't allocate conntrack.\n");
689 return (struct nf_conntrack_tuple_hash *)conntrack; 489 return (struct nf_conntrack_tuple_hash *)conntrack;
690 } 490 }
691 491
692 if (!l4proto->new(conntrack, skb, dataoff)) { 492 if (!l4proto->new(conntrack, skb, dataoff)) {
693 nf_conntrack_free(conntrack); 493 nf_conntrack_free(conntrack);
694 DEBUGP("init conntrack: can't track with proto module\n"); 494 pr_debug("init conntrack: can't track with proto module\n");
695 return NULL; 495 return NULL;
696 } 496 }
697 497
698 write_lock_bh(&nf_conntrack_lock); 498 write_lock_bh(&nf_conntrack_lock);
699 exp = find_expectation(tuple); 499 exp = nf_ct_find_expectation(tuple);
700
701 help = nfct_help(conntrack);
702 if (exp) { 500 if (exp) {
703 DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n", 501 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
704 conntrack, exp); 502 conntrack, exp);
705 /* Welcome, Mr. Bond. We've been expecting you... */ 503 /* Welcome, Mr. Bond. We've been expecting you... */
706 __set_bit(IPS_EXPECTED_BIT, &conntrack->status); 504 __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
707 conntrack->master = exp->master; 505 conntrack->master = exp->master;
708 if (exp->helper) 506 if (exp->helper) {
709 rcu_assign_pointer(help->helper, exp->helper); 507 help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);
508 if (help)
509 rcu_assign_pointer(help->helper, exp->helper);
510 }
511
710#ifdef CONFIG_NF_CONNTRACK_MARK 512#ifdef CONFIG_NF_CONNTRACK_MARK
711 conntrack->mark = exp->master->mark; 513 conntrack->mark = exp->master->mark;
712#endif 514#endif
@@ -716,23 +518,27 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
716 nf_conntrack_get(&conntrack->master->ct_general); 518 nf_conntrack_get(&conntrack->master->ct_general);
717 NF_CT_STAT_INC(expect_new); 519 NF_CT_STAT_INC(expect_new);
718 } else { 520 } else {
719 if (help) { 521 struct nf_conntrack_helper *helper;
720 /* not in hash table yet, so not strictly necessary */ 522
721 rcu_assign_pointer(help->helper, 523 helper = __nf_ct_helper_find(&repl_tuple);
722 __nf_ct_helper_find(&repl_tuple)); 524 if (helper) {
525 help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);
526 if (help)
527 rcu_assign_pointer(help->helper, helper);
723 } 528 }
724 NF_CT_STAT_INC(new); 529 NF_CT_STAT_INC(new);
725 } 530 }
726 531
727 /* Overload tuple linked list to put us in unconfirmed list. */ 532 /* Overload tuple linked list to put us in unconfirmed list. */
728 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); 533 hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
534 &unconfirmed);
729 535
730 write_unlock_bh(&nf_conntrack_lock); 536 write_unlock_bh(&nf_conntrack_lock);
731 537
732 if (exp) { 538 if (exp) {
733 if (exp->expectfn) 539 if (exp->expectfn)
734 exp->expectfn(conntrack, exp); 540 exp->expectfn(conntrack, exp);
735 nf_conntrack_expect_put(exp); 541 nf_ct_expect_put(exp);
736 } 542 }
737 543
738 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL]; 544 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
@@ -756,12 +562,12 @@ resolve_normal_ct(struct sk_buff *skb,
756 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 562 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
757 dataoff, l3num, protonum, &tuple, l3proto, 563 dataoff, l3num, protonum, &tuple, l3proto,
758 l4proto)) { 564 l4proto)) {
759 DEBUGP("resolve_normal_ct: Can't get tuple\n"); 565 pr_debug("resolve_normal_ct: Can't get tuple\n");
760 return NULL; 566 return NULL;
761 } 567 }
762 568
763 /* look for tuple match */ 569 /* look for tuple match */
764 h = nf_conntrack_find_get(&tuple, NULL); 570 h = nf_conntrack_find_get(&tuple);
765 if (!h) { 571 if (!h) {
766 h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff); 572 h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);
767 if (!h) 573 if (!h)
@@ -779,13 +585,14 @@ resolve_normal_ct(struct sk_buff *skb,
779 } else { 585 } else {
780 /* Once we've had two way comms, always ESTABLISHED. */ 586 /* Once we've had two way comms, always ESTABLISHED. */
781 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 587 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
782 DEBUGP("nf_conntrack_in: normal packet for %p\n", ct); 588 pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
783 *ctinfo = IP_CT_ESTABLISHED; 589 *ctinfo = IP_CT_ESTABLISHED;
784 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 590 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
785 DEBUGP("nf_conntrack_in: related packet for %p\n", ct); 591 pr_debug("nf_conntrack_in: related packet for %p\n",
592 ct);
786 *ctinfo = IP_CT_RELATED; 593 *ctinfo = IP_CT_RELATED;
787 } else { 594 } else {
788 DEBUGP("nf_conntrack_in: new packet for %p\n", ct); 595 pr_debug("nf_conntrack_in: new packet for %p\n", ct);
789 *ctinfo = IP_CT_NEW; 596 *ctinfo = IP_CT_NEW;
790 } 597 }
791 *set_reply = 0; 598 *set_reply = 0;
@@ -817,7 +624,7 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
817 l3proto = __nf_ct_l3proto_find((u_int16_t)pf); 624 l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
818 625
819 if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) { 626 if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) {
820 DEBUGP("not prepared to track yet or error occured\n"); 627 pr_debug("not prepared to track yet or error occured\n");
821 return -ret; 628 return -ret;
822 } 629 }
823 630
@@ -853,7 +660,7 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
853 if (ret < 0) { 660 if (ret < 0) {
854 /* Invalid: inverse of the return code tells 661 /* Invalid: inverse of the return code tells
855 * the netfilter core what to do */ 662 * the netfilter core what to do */
856 DEBUGP("nf_conntrack_in: Can't track with proto module\n"); 663 pr_debug("nf_conntrack_in: Can't track with proto module\n");
857 nf_conntrack_put((*pskb)->nfct); 664 nf_conntrack_put((*pskb)->nfct);
858 (*pskb)->nfct = NULL; 665 (*pskb)->nfct = NULL;
859 NF_CT_STAT_INC_ATOMIC(invalid); 666 NF_CT_STAT_INC_ATOMIC(invalid);
@@ -888,23 +695,36 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
888 const struct nf_conntrack_tuple *newreply) 695 const struct nf_conntrack_tuple *newreply)
889{ 696{
890 struct nf_conn_help *help = nfct_help(ct); 697 struct nf_conn_help *help = nfct_help(ct);
698 struct nf_conntrack_helper *helper;
891 699
892 write_lock_bh(&nf_conntrack_lock); 700 write_lock_bh(&nf_conntrack_lock);
893 /* Should be unconfirmed, so not in hash table yet */ 701 /* Should be unconfirmed, so not in hash table yet */
894 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 702 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
895 703
896 DEBUGP("Altering reply tuple of %p to ", ct); 704 pr_debug("Altering reply tuple of %p to ", ct);
897 NF_CT_DUMP_TUPLE(newreply); 705 NF_CT_DUMP_TUPLE(newreply);
898 706
899 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 707 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
900 if (!ct->master && help && help->expecting == 0) { 708 if (ct->master || (help && help->expecting != 0))
901 struct nf_conntrack_helper *helper; 709 goto out;
902 helper = __nf_ct_helper_find(newreply); 710
903 if (helper) 711 helper = __nf_ct_helper_find(newreply);
904 memset(&help->help, 0, sizeof(help->help)); 712 if (helper == NULL) {
905 /* not in hash table yet, so not strictly necessary */ 713 if (help)
906 rcu_assign_pointer(help->helper, helper); 714 rcu_assign_pointer(help->helper, NULL);
715 goto out;
907 } 716 }
717
718 if (help == NULL) {
719 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
720 if (help == NULL)
721 goto out;
722 } else {
723 memset(&help->help, 0, sizeof(help->help));
724 }
725
726 rcu_assign_pointer(help->helper, helper);
727out:
908 write_unlock_bh(&nf_conntrack_lock); 728 write_unlock_bh(&nf_conntrack_lock);
909} 729}
910EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 730EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
@@ -1048,16 +868,17 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1048{ 868{
1049 struct nf_conntrack_tuple_hash *h; 869 struct nf_conntrack_tuple_hash *h;
1050 struct nf_conn *ct; 870 struct nf_conn *ct;
871 struct hlist_node *n;
1051 872
1052 write_lock_bh(&nf_conntrack_lock); 873 write_lock_bh(&nf_conntrack_lock);
1053 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 874 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1054 list_for_each_entry(h, &nf_conntrack_hash[*bucket], list) { 875 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {
1055 ct = nf_ct_tuplehash_to_ctrack(h); 876 ct = nf_ct_tuplehash_to_ctrack(h);
1056 if (iter(ct, data)) 877 if (iter(ct, data))
1057 goto found; 878 goto found;
1058 } 879 }
1059 } 880 }
1060 list_for_each_entry(h, &unconfirmed, list) { 881 hlist_for_each_entry(h, n, &unconfirmed, hnode) {
1061 ct = nf_ct_tuplehash_to_ctrack(h); 882 ct = nf_ct_tuplehash_to_ctrack(h);
1062 if (iter(ct, data)) 883 if (iter(ct, data))
1063 set_bit(IPS_DYING_BIT, &ct->status); 884 set_bit(IPS_DYING_BIT, &ct->status);
@@ -1092,14 +913,15 @@ static int kill_all(struct nf_conn *i, void *data)
1092 return 1; 913 return 1;
1093} 914}
1094 915
1095static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size) 916void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, int size)
1096{ 917{
1097 if (vmalloced) 918 if (vmalloced)
1098 vfree(hash); 919 vfree(hash);
1099 else 920 else
1100 free_pages((unsigned long)hash, 921 free_pages((unsigned long)hash,
1101 get_order(sizeof(struct list_head) * size)); 922 get_order(sizeof(struct hlist_head) * size));
1102} 923}
924EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1103 925
1104void nf_conntrack_flush(void) 926void nf_conntrack_flush(void)
1105{ 927{
@@ -1111,8 +933,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_flush);
1111 supposed to kill the mall. */ 933 supposed to kill the mall. */
1112void nf_conntrack_cleanup(void) 934void nf_conntrack_cleanup(void)
1113{ 935{
1114 int i;
1115
1116 rcu_assign_pointer(ip_ct_attach, NULL); 936 rcu_assign_pointer(ip_ct_attach, NULL);
1117 937
1118 /* This makes sure all current packets have passed through 938 /* This makes sure all current packets have passed through
@@ -1133,49 +953,46 @@ void nf_conntrack_cleanup(void)
1133 953
1134 rcu_assign_pointer(nf_ct_destroy, NULL); 954 rcu_assign_pointer(nf_ct_destroy, NULL);
1135 955
1136 for (i = 0; i < NF_CT_F_NUM; i++) { 956 kmem_cache_destroy(nf_conntrack_cachep);
1137 if (nf_ct_cache[i].use == 0) 957 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
1138 continue; 958 nf_conntrack_htable_size);
1139
1140 NF_CT_ASSERT(nf_ct_cache[i].use == 1);
1141 nf_ct_cache[i].use = 1;
1142 nf_conntrack_unregister_cache(i);
1143 }
1144 kmem_cache_destroy(nf_conntrack_expect_cachep);
1145 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
1146 nf_conntrack_htable_size);
1147 959
1148 nf_conntrack_proto_fini(); 960 nf_conntrack_proto_fini();
961 nf_conntrack_helper_fini();
962 nf_conntrack_expect_fini();
1149} 963}
1150 964
1151static struct list_head *alloc_hashtable(int size, int *vmalloced) 965struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced)
1152{ 966{
1153 struct list_head *hash; 967 struct hlist_head *hash;
1154 unsigned int i; 968 unsigned int size, i;
1155 969
1156 *vmalloced = 0; 970 *vmalloced = 0;
971
972 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1157 hash = (void*)__get_free_pages(GFP_KERNEL, 973 hash = (void*)__get_free_pages(GFP_KERNEL,
1158 get_order(sizeof(struct list_head) 974 get_order(sizeof(struct hlist_head)
1159 * size)); 975 * size));
1160 if (!hash) { 976 if (!hash) {
1161 *vmalloced = 1; 977 *vmalloced = 1;
1162 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 978 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1163 hash = vmalloc(sizeof(struct list_head) * size); 979 hash = vmalloc(sizeof(struct hlist_head) * size);
1164 } 980 }
1165 981
1166 if (hash) 982 if (hash)
1167 for (i = 0; i < size; i++) 983 for (i = 0; i < size; i++)
1168 INIT_LIST_HEAD(&hash[i]); 984 INIT_HLIST_HEAD(&hash[i]);
1169 985
1170 return hash; 986 return hash;
1171} 987}
988EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1172 989
1173int set_hashsize(const char *val, struct kernel_param *kp) 990int set_hashsize(const char *val, struct kernel_param *kp)
1174{ 991{
1175 int i, bucket, hashsize, vmalloced; 992 int i, bucket, hashsize, vmalloced;
1176 int old_vmalloced, old_size; 993 int old_vmalloced, old_size;
1177 int rnd; 994 int rnd;
1178 struct list_head *hash, *old_hash; 995 struct hlist_head *hash, *old_hash;
1179 struct nf_conntrack_tuple_hash *h; 996 struct nf_conntrack_tuple_hash *h;
1180 997
1181 /* On boot, we can set this without any fancy locking. */ 998 /* On boot, we can set this without any fancy locking. */
@@ -1186,7 +1003,7 @@ int set_hashsize(const char *val, struct kernel_param *kp)
1186 if (!hashsize) 1003 if (!hashsize)
1187 return -EINVAL; 1004 return -EINVAL;
1188 1005
1189 hash = alloc_hashtable(hashsize, &vmalloced); 1006 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
1190 if (!hash) 1007 if (!hash)
1191 return -ENOMEM; 1008 return -ENOMEM;
1192 1009
@@ -1196,12 +1013,12 @@ int set_hashsize(const char *val, struct kernel_param *kp)
1196 1013
1197 write_lock_bh(&nf_conntrack_lock); 1014 write_lock_bh(&nf_conntrack_lock);
1198 for (i = 0; i < nf_conntrack_htable_size; i++) { 1015 for (i = 0; i < nf_conntrack_htable_size; i++) {
1199 while (!list_empty(&nf_conntrack_hash[i])) { 1016 while (!hlist_empty(&nf_conntrack_hash[i])) {
1200 h = list_entry(nf_conntrack_hash[i].next, 1017 h = hlist_entry(nf_conntrack_hash[i].first,
1201 struct nf_conntrack_tuple_hash, list); 1018 struct nf_conntrack_tuple_hash, hnode);
1202 list_del(&h->list); 1019 hlist_del(&h->hnode);
1203 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1020 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1204 list_add_tail(&h->list, &hash[bucket]); 1021 hlist_add_head(&h->hnode, &hash[bucket]);
1205 } 1022 }
1206 } 1023 }
1207 old_size = nf_conntrack_htable_size; 1024 old_size = nf_conntrack_htable_size;
@@ -1214,7 +1031,7 @@ int set_hashsize(const char *val, struct kernel_param *kp)
1214 nf_conntrack_hash_rnd = rnd; 1031 nf_conntrack_hash_rnd = rnd;
1215 write_unlock_bh(&nf_conntrack_lock); 1032 write_unlock_bh(&nf_conntrack_lock);
1216 1033
1217 free_conntrack_hash(old_hash, old_vmalloced, old_size); 1034 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1218 return 0; 1035 return 0;
1219} 1036}
1220 1037
@@ -1223,50 +1040,58 @@ module_param_call(hashsize, set_hashsize, param_get_uint,
1223 1040
1224int __init nf_conntrack_init(void) 1041int __init nf_conntrack_init(void)
1225{ 1042{
1043 int max_factor = 8;
1226 int ret; 1044 int ret;
1227 1045
1228 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1046 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1229 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ 1047 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1230 if (!nf_conntrack_htable_size) { 1048 if (!nf_conntrack_htable_size) {
1231 nf_conntrack_htable_size 1049 nf_conntrack_htable_size
1232 = (((num_physpages << PAGE_SHIFT) / 16384) 1050 = (((num_physpages << PAGE_SHIFT) / 16384)
1233 / sizeof(struct list_head)); 1051 / sizeof(struct hlist_head));
1234 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1052 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1235 nf_conntrack_htable_size = 8192; 1053 nf_conntrack_htable_size = 16384;
1236 if (nf_conntrack_htable_size < 16) 1054 if (nf_conntrack_htable_size < 32)
1237 nf_conntrack_htable_size = 16; 1055 nf_conntrack_htable_size = 32;
1056
1057 /* Use a max. factor of four by default to get the same max as
1058 * with the old struct list_heads. When a table size is given
1059 * we use the old value of 8 to avoid reducing the max.
1060 * entries. */
1061 max_factor = 4;
1238 } 1062 }
1239 nf_conntrack_max = 8 * nf_conntrack_htable_size; 1063 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1240 1064 &nf_conntrack_vmalloc);
1241 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1242 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1243 nf_conntrack_max);
1244
1245 nf_conntrack_hash = alloc_hashtable(nf_conntrack_htable_size,
1246 &nf_conntrack_vmalloc);
1247 if (!nf_conntrack_hash) { 1065 if (!nf_conntrack_hash) {
1248 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1066 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1249 goto err_out; 1067 goto err_out;
1250 } 1068 }
1251 1069
1252 ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic", 1070 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1253 sizeof(struct nf_conn)); 1071
1254 if (ret < 0) { 1072 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1073 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1074 nf_conntrack_max);
1075
1076 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1077 sizeof(struct nf_conn),
1078 0, 0, NULL, NULL);
1079 if (!nf_conntrack_cachep) {
1255 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1080 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1256 goto err_free_hash; 1081 goto err_free_hash;
1257 } 1082 }
1258 1083
1259 nf_conntrack_expect_cachep = kmem_cache_create("nf_conntrack_expect", 1084 ret = nf_conntrack_proto_init();
1260 sizeof(struct nf_conntrack_expect), 1085 if (ret < 0)
1261 0, 0, NULL, NULL);
1262 if (!nf_conntrack_expect_cachep) {
1263 printk(KERN_ERR "Unable to create nf_expect slab cache\n");
1264 goto err_free_conntrack_slab; 1086 goto err_free_conntrack_slab;
1265 }
1266 1087
1267 ret = nf_conntrack_proto_init(); 1088 ret = nf_conntrack_expect_init();
1268 if (ret < 0) 1089 if (ret < 0)
1269 goto out_free_expect_slab; 1090 goto out_fini_proto;
1091
1092 ret = nf_conntrack_helper_init();
1093 if (ret < 0)
1094 goto out_fini_expect;
1270 1095
1271 /* For use by REJECT target */ 1096 /* For use by REJECT target */
1272 rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach); 1097 rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach);
@@ -1280,13 +1105,15 @@ int __init nf_conntrack_init(void)
1280 1105
1281 return ret; 1106 return ret;
1282 1107
1283out_free_expect_slab: 1108out_fini_expect:
1284 kmem_cache_destroy(nf_conntrack_expect_cachep); 1109 nf_conntrack_expect_fini();
1110out_fini_proto:
1111 nf_conntrack_proto_fini();
1285err_free_conntrack_slab: 1112err_free_conntrack_slab:
1286 nf_conntrack_unregister_cache(NF_CT_F_BASIC); 1113 kmem_cache_destroy(nf_conntrack_cachep);
1287err_free_hash: 1114err_free_hash:
1288 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, 1115 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
1289 nf_conntrack_htable_size); 1116 nf_conntrack_htable_size);
1290err_out: 1117err_out:
1291 return -ENOMEM; 1118 return -ENOMEM;
1292} 1119}
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 6bd421df2dbc..83c41ac3505b 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -26,8 +26,8 @@
26ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain); 26ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
27EXPORT_SYMBOL_GPL(nf_conntrack_chain); 27EXPORT_SYMBOL_GPL(nf_conntrack_chain);
28 28
29ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain); 29ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain);
30EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain); 30EXPORT_SYMBOL_GPL(nf_ct_expect_chain);
31 31
32DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); 32DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
33EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache); 33EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
@@ -103,14 +103,14 @@ int nf_conntrack_unregister_notifier(struct notifier_block *nb)
103} 103}
104EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 104EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
105 105
106int nf_conntrack_expect_register_notifier(struct notifier_block *nb) 106int nf_ct_expect_register_notifier(struct notifier_block *nb)
107{ 107{
108 return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb); 108 return atomic_notifier_chain_register(&nf_ct_expect_chain, nb);
109} 109}
110EXPORT_SYMBOL_GPL(nf_conntrack_expect_register_notifier); 110EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
111 111
112int nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) 112int nf_ct_expect_unregister_notifier(struct notifier_block *nb)
113{ 113{
114 return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, nb); 114 return atomic_notifier_chain_unregister(&nf_ct_expect_chain, nb);
115} 115}
116EXPORT_SYMBOL_GPL(nf_conntrack_expect_unregister_notifier); 116EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 504fb6c083f9..2191fe008f60 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -19,6 +19,7 @@
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/jhash.h>
22 23
23#include <net/netfilter/nf_conntrack.h> 24#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_core.h> 25#include <net/netfilter/nf_conntrack_core.h>
@@ -26,11 +27,20 @@
26#include <net/netfilter/nf_conntrack_helper.h> 27#include <net/netfilter/nf_conntrack_helper.h>
27#include <net/netfilter/nf_conntrack_tuple.h> 28#include <net/netfilter/nf_conntrack_tuple.h>
28 29
29LIST_HEAD(nf_conntrack_expect_list); 30struct hlist_head *nf_ct_expect_hash __read_mostly;
30EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); 31EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
31 32
32struct kmem_cache *nf_conntrack_expect_cachep __read_mostly; 33unsigned int nf_ct_expect_hsize __read_mostly;
33static unsigned int nf_conntrack_expect_next_id; 34EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
35
36static unsigned int nf_ct_expect_hash_rnd __read_mostly;
37static unsigned int nf_ct_expect_count;
38unsigned int nf_ct_expect_max __read_mostly;
39static int nf_ct_expect_hash_rnd_initted __read_mostly;
40static int nf_ct_expect_vmalloc;
41
42static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
43static unsigned int nf_ct_expect_next_id;
34 44
35/* nf_conntrack_expect helper functions */ 45/* nf_conntrack_expect helper functions */
36void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) 46void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
@@ -40,60 +50,83 @@ void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
40 NF_CT_ASSERT(master_help); 50 NF_CT_ASSERT(master_help);
41 NF_CT_ASSERT(!timer_pending(&exp->timeout)); 51 NF_CT_ASSERT(!timer_pending(&exp->timeout));
42 52
43 list_del(&exp->list); 53 hlist_del(&exp->hnode);
44 NF_CT_STAT_INC(expect_delete); 54 nf_ct_expect_count--;
55
56 hlist_del(&exp->lnode);
45 master_help->expecting--; 57 master_help->expecting--;
46 nf_conntrack_expect_put(exp); 58 nf_ct_expect_put(exp);
59
60 NF_CT_STAT_INC(expect_delete);
47} 61}
48EXPORT_SYMBOL_GPL(nf_ct_unlink_expect); 62EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
49 63
50static void expectation_timed_out(unsigned long ul_expect) 64static void nf_ct_expectation_timed_out(unsigned long ul_expect)
51{ 65{
52 struct nf_conntrack_expect *exp = (void *)ul_expect; 66 struct nf_conntrack_expect *exp = (void *)ul_expect;
53 67
54 write_lock_bh(&nf_conntrack_lock); 68 write_lock_bh(&nf_conntrack_lock);
55 nf_ct_unlink_expect(exp); 69 nf_ct_unlink_expect(exp);
56 write_unlock_bh(&nf_conntrack_lock); 70 write_unlock_bh(&nf_conntrack_lock);
57 nf_conntrack_expect_put(exp); 71 nf_ct_expect_put(exp);
72}
73
74static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
75{
76 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
77 get_random_bytes(&nf_ct_expect_hash_rnd, 4);
78 nf_ct_expect_hash_rnd_initted = 1;
79 }
80
81 return jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
82 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
83 tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
84 nf_ct_expect_hsize;
58} 85}
59 86
60struct nf_conntrack_expect * 87struct nf_conntrack_expect *
61__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple) 88__nf_ct_expect_find(const struct nf_conntrack_tuple *tuple)
62{ 89{
63 struct nf_conntrack_expect *i; 90 struct nf_conntrack_expect *i;
91 struct hlist_node *n;
92 unsigned int h;
93
94 if (!nf_ct_expect_count)
95 return NULL;
64 96
65 list_for_each_entry(i, &nf_conntrack_expect_list, list) { 97 h = nf_ct_expect_dst_hash(tuple);
98 hlist_for_each_entry(i, n, &nf_ct_expect_hash[h], hnode) {
66 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) 99 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
67 return i; 100 return i;
68 } 101 }
69 return NULL; 102 return NULL;
70} 103}
71EXPORT_SYMBOL_GPL(__nf_conntrack_expect_find); 104EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
72 105
73/* Just find a expectation corresponding to a tuple. */ 106/* Just find a expectation corresponding to a tuple. */
74struct nf_conntrack_expect * 107struct nf_conntrack_expect *
75nf_conntrack_expect_find_get(const struct nf_conntrack_tuple *tuple) 108nf_ct_expect_find_get(const struct nf_conntrack_tuple *tuple)
76{ 109{
77 struct nf_conntrack_expect *i; 110 struct nf_conntrack_expect *i;
78 111
79 read_lock_bh(&nf_conntrack_lock); 112 read_lock_bh(&nf_conntrack_lock);
80 i = __nf_conntrack_expect_find(tuple); 113 i = __nf_ct_expect_find(tuple);
81 if (i) 114 if (i)
82 atomic_inc(&i->use); 115 atomic_inc(&i->use);
83 read_unlock_bh(&nf_conntrack_lock); 116 read_unlock_bh(&nf_conntrack_lock);
84 117
85 return i; 118 return i;
86} 119}
87EXPORT_SYMBOL_GPL(nf_conntrack_expect_find_get); 120EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
88 121
89/* If an expectation for this connection is found, it gets delete from 122/* If an expectation for this connection is found, it gets delete from
90 * global list then returned. */ 123 * global list then returned. */
91struct nf_conntrack_expect * 124struct nf_conntrack_expect *
92find_expectation(const struct nf_conntrack_tuple *tuple) 125nf_ct_find_expectation(const struct nf_conntrack_tuple *tuple)
93{ 126{
94 struct nf_conntrack_expect *exp; 127 struct nf_conntrack_expect *exp;
95 128
96 exp = __nf_conntrack_expect_find(tuple); 129 exp = __nf_ct_expect_find(tuple);
97 if (!exp) 130 if (!exp)
98 return NULL; 131 return NULL;
99 132
@@ -119,17 +152,18 @@ find_expectation(const struct nf_conntrack_tuple *tuple)
119/* delete all expectations for this conntrack */ 152/* delete all expectations for this conntrack */
120void nf_ct_remove_expectations(struct nf_conn *ct) 153void nf_ct_remove_expectations(struct nf_conn *ct)
121{ 154{
122 struct nf_conntrack_expect *i, *tmp;
123 struct nf_conn_help *help = nfct_help(ct); 155 struct nf_conn_help *help = nfct_help(ct);
156 struct nf_conntrack_expect *exp;
157 struct hlist_node *n, *next;
124 158
125 /* Optimization: most connection never expect any others. */ 159 /* Optimization: most connection never expect any others. */
126 if (!help || help->expecting == 0) 160 if (!help || help->expecting == 0)
127 return; 161 return;
128 162
129 list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) { 163 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
130 if (i->master == ct && del_timer(&i->timeout)) { 164 if (del_timer(&exp->timeout)) {
131 nf_ct_unlink_expect(i); 165 nf_ct_unlink_expect(exp);
132 nf_conntrack_expect_put(i); 166 nf_ct_expect_put(exp);
133 } 167 }
134 } 168 }
135} 169}
@@ -141,25 +175,16 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
141{ 175{
142 /* Part covered by intersection of masks must be unequal, 176 /* Part covered by intersection of masks must be unequal,
143 otherwise they clash */ 177 otherwise they clash */
144 struct nf_conntrack_tuple intersect_mask; 178 struct nf_conntrack_tuple_mask intersect_mask;
145 int count; 179 int count;
146 180
147 intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
148 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; 181 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
149 intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
150 intersect_mask.dst.protonum = a->mask.dst.protonum
151 & b->mask.dst.protonum;
152 182
153 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ 183 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
154 intersect_mask.src.u3.all[count] = 184 intersect_mask.src.u3.all[count] =
155 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; 185 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
156 } 186 }
157 187
158 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
159 intersect_mask.dst.u3.all[count] =
160 a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
161 }
162
163 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); 188 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
164} 189}
165 190
@@ -168,36 +193,29 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
168{ 193{
169 return a->master == b->master 194 return a->master == b->master
170 && nf_ct_tuple_equal(&a->tuple, &b->tuple) 195 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
171 && nf_ct_tuple_equal(&a->mask, &b->mask); 196 && nf_ct_tuple_mask_equal(&a->mask, &b->mask);
172} 197}
173 198
174/* Generally a bad idea to call this: could have matched already. */ 199/* Generally a bad idea to call this: could have matched already. */
175void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp) 200void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
176{ 201{
177 struct nf_conntrack_expect *i;
178
179 write_lock_bh(&nf_conntrack_lock); 202 write_lock_bh(&nf_conntrack_lock);
180 /* choose the oldest expectation to evict */ 203 if (del_timer(&exp->timeout)) {
181 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { 204 nf_ct_unlink_expect(exp);
182 if (expect_matches(i, exp) && del_timer(&i->timeout)) { 205 nf_ct_expect_put(exp);
183 nf_ct_unlink_expect(i);
184 write_unlock_bh(&nf_conntrack_lock);
185 nf_conntrack_expect_put(i);
186 return;
187 }
188 } 206 }
189 write_unlock_bh(&nf_conntrack_lock); 207 write_unlock_bh(&nf_conntrack_lock);
190} 208}
191EXPORT_SYMBOL_GPL(nf_conntrack_unexpect_related); 209EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
192 210
193/* We don't increase the master conntrack refcount for non-fulfilled 211/* We don't increase the master conntrack refcount for non-fulfilled
194 * conntracks. During the conntrack destruction, the expectations are 212 * conntracks. During the conntrack destruction, the expectations are
195 * always killed before the conntrack itself */ 213 * always killed before the conntrack itself */
196struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me) 214struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
197{ 215{
198 struct nf_conntrack_expect *new; 216 struct nf_conntrack_expect *new;
199 217
200 new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC); 218 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
201 if (!new) 219 if (!new)
202 return NULL; 220 return NULL;
203 221
@@ -205,12 +223,12 @@ struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
205 atomic_set(&new->use, 1); 223 atomic_set(&new->use, 1);
206 return new; 224 return new;
207} 225}
208EXPORT_SYMBOL_GPL(nf_conntrack_expect_alloc); 226EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
209 227
210void nf_conntrack_expect_init(struct nf_conntrack_expect *exp, int family, 228void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
211 union nf_conntrack_address *saddr, 229 union nf_conntrack_address *saddr,
212 union nf_conntrack_address *daddr, 230 union nf_conntrack_address *daddr,
213 u_int8_t proto, __be16 *src, __be16 *dst) 231 u_int8_t proto, __be16 *src, __be16 *dst)
214{ 232{
215 int len; 233 int len;
216 234
@@ -224,8 +242,6 @@ void nf_conntrack_expect_init(struct nf_conntrack_expect *exp, int family,
224 exp->helper = NULL; 242 exp->helper = NULL;
225 exp->tuple.src.l3num = family; 243 exp->tuple.src.l3num = family;
226 exp->tuple.dst.protonum = proto; 244 exp->tuple.dst.protonum = proto;
227 exp->mask.src.l3num = 0xFFFF;
228 exp->mask.dst.protonum = 0xFF;
229 245
230 if (saddr) { 246 if (saddr) {
231 memcpy(&exp->tuple.src.u3, saddr, len); 247 memcpy(&exp->tuple.src.u3, saddr, len);
@@ -242,21 +258,6 @@ void nf_conntrack_expect_init(struct nf_conntrack_expect *exp, int family,
242 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3)); 258 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
243 } 259 }
244 260
245 if (daddr) {
246 memcpy(&exp->tuple.dst.u3, daddr, len);
247 if (sizeof(exp->tuple.dst.u3) > len)
248 /* address needs to be cleared for nf_ct_tuple_equal */
249 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
250 sizeof(exp->tuple.dst.u3) - len);
251 memset(&exp->mask.dst.u3, 0xFF, len);
252 if (sizeof(exp->mask.dst.u3) > len)
253 memset((void *)&exp->mask.dst.u3 + len, 0x00,
254 sizeof(exp->mask.dst.u3) - len);
255 } else {
256 memset(&exp->tuple.dst.u3, 0x00, sizeof(exp->tuple.dst.u3));
257 memset(&exp->mask.dst.u3, 0x00, sizeof(exp->mask.dst.u3));
258 }
259
260 if (src) { 261 if (src) {
261 exp->tuple.src.u.all = (__force u16)*src; 262 exp->tuple.src.u.all = (__force u16)*src;
262 exp->mask.src.u.all = 0xFFFF; 263 exp->mask.src.u.all = 0xFFFF;
@@ -265,36 +266,42 @@ void nf_conntrack_expect_init(struct nf_conntrack_expect *exp, int family,
265 exp->mask.src.u.all = 0; 266 exp->mask.src.u.all = 0;
266 } 267 }
267 268
268 if (dst) { 269 memcpy(&exp->tuple.dst.u3, daddr, len);
269 exp->tuple.dst.u.all = (__force u16)*dst; 270 if (sizeof(exp->tuple.dst.u3) > len)
270 exp->mask.dst.u.all = 0xFFFF; 271 /* address needs to be cleared for nf_ct_tuple_equal */
271 } else { 272 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
272 exp->tuple.dst.u.all = 0; 273 sizeof(exp->tuple.dst.u3) - len);
273 exp->mask.dst.u.all = 0; 274
274 } 275 exp->tuple.dst.u.all = (__force u16)*dst;
275} 276}
276EXPORT_SYMBOL_GPL(nf_conntrack_expect_init); 277EXPORT_SYMBOL_GPL(nf_ct_expect_init);
277 278
278void nf_conntrack_expect_put(struct nf_conntrack_expect *exp) 279void nf_ct_expect_put(struct nf_conntrack_expect *exp)
279{ 280{
280 if (atomic_dec_and_test(&exp->use)) 281 if (atomic_dec_and_test(&exp->use))
281 kmem_cache_free(nf_conntrack_expect_cachep, exp); 282 kmem_cache_free(nf_ct_expect_cachep, exp);
282} 283}
283EXPORT_SYMBOL_GPL(nf_conntrack_expect_put); 284EXPORT_SYMBOL_GPL(nf_ct_expect_put);
284 285
285static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp) 286static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
286{ 287{
287 struct nf_conn_help *master_help = nfct_help(exp->master); 288 struct nf_conn_help *master_help = nfct_help(exp->master);
289 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
288 290
289 atomic_inc(&exp->use); 291 atomic_inc(&exp->use);
292
293 hlist_add_head(&exp->lnode, &master_help->expectations);
290 master_help->expecting++; 294 master_help->expecting++;
291 list_add(&exp->list, &nf_conntrack_expect_list);
292 295
293 setup_timer(&exp->timeout, expectation_timed_out, (unsigned long)exp); 296 hlist_add_head(&exp->hnode, &nf_ct_expect_hash[h]);
297 nf_ct_expect_count++;
298
299 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
300 (unsigned long)exp);
294 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ; 301 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
295 add_timer(&exp->timeout); 302 add_timer(&exp->timeout);
296 303
297 exp->id = ++nf_conntrack_expect_next_id; 304 exp->id = ++nf_ct_expect_next_id;
298 atomic_inc(&exp->use); 305 atomic_inc(&exp->use);
299 NF_CT_STAT_INC(expect_create); 306 NF_CT_STAT_INC(expect_create);
300} 307}
@@ -302,16 +309,16 @@ static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
302/* Race with expectations being used means we could have none to find; OK. */ 309/* Race with expectations being used means we could have none to find; OK. */
303static void evict_oldest_expect(struct nf_conn *master) 310static void evict_oldest_expect(struct nf_conn *master)
304{ 311{
305 struct nf_conntrack_expect *i; 312 struct nf_conn_help *master_help = nfct_help(master);
313 struct nf_conntrack_expect *exp = NULL;
314 struct hlist_node *n;
306 315
307 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { 316 hlist_for_each_entry(exp, n, &master_help->expectations, lnode)
308 if (i->master == master) { 317 ; /* nothing */
309 if (del_timer(&i->timeout)) { 318
310 nf_ct_unlink_expect(i); 319 if (exp && del_timer(&exp->timeout)) {
311 nf_conntrack_expect_put(i); 320 nf_ct_unlink_expect(exp);
312 } 321 nf_ct_expect_put(exp);
313 break;
314 }
315 } 322 }
316} 323}
317 324
@@ -327,11 +334,13 @@ static inline int refresh_timer(struct nf_conntrack_expect *i)
327 return 1; 334 return 1;
328} 335}
329 336
330int nf_conntrack_expect_related(struct nf_conntrack_expect *expect) 337int nf_ct_expect_related(struct nf_conntrack_expect *expect)
331{ 338{
332 struct nf_conntrack_expect *i; 339 struct nf_conntrack_expect *i;
333 struct nf_conn *master = expect->master; 340 struct nf_conn *master = expect->master;
334 struct nf_conn_help *master_help = nfct_help(master); 341 struct nf_conn_help *master_help = nfct_help(master);
342 struct hlist_node *n;
343 unsigned int h;
335 int ret; 344 int ret;
336 345
337 NF_CT_ASSERT(master_help); 346 NF_CT_ASSERT(master_help);
@@ -341,7 +350,8 @@ int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
341 ret = -ESHUTDOWN; 350 ret = -ESHUTDOWN;
342 goto out; 351 goto out;
343 } 352 }
344 list_for_each_entry(i, &nf_conntrack_expect_list, list) { 353 h = nf_ct_expect_dst_hash(&expect->tuple);
354 hlist_for_each_entry(i, n, &nf_ct_expect_hash[h], hnode) {
345 if (expect_matches(i, expect)) { 355 if (expect_matches(i, expect)) {
346 /* Refresh timer: if it's dying, ignore.. */ 356 /* Refresh timer: if it's dying, ignore.. */
347 if (refresh_timer(i)) { 357 if (refresh_timer(i)) {
@@ -358,57 +368,86 @@ int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
358 master_help->expecting >= master_help->helper->max_expected) 368 master_help->expecting >= master_help->helper->max_expected)
359 evict_oldest_expect(master); 369 evict_oldest_expect(master);
360 370
361 nf_conntrack_expect_insert(expect); 371 if (nf_ct_expect_count >= nf_ct_expect_max) {
362 nf_conntrack_expect_event(IPEXP_NEW, expect); 372 if (net_ratelimit())
373 printk(KERN_WARNING
374 "nf_conntrack: expectation table full");
375 ret = -EMFILE;
376 goto out;
377 }
378
379 nf_ct_expect_insert(expect);
380 nf_ct_expect_event(IPEXP_NEW, expect);
363 ret = 0; 381 ret = 0;
364out: 382out:
365 write_unlock_bh(&nf_conntrack_lock); 383 write_unlock_bh(&nf_conntrack_lock);
366 return ret; 384 return ret;
367} 385}
368EXPORT_SYMBOL_GPL(nf_conntrack_expect_related); 386EXPORT_SYMBOL_GPL(nf_ct_expect_related);
369 387
370#ifdef CONFIG_PROC_FS 388#ifdef CONFIG_PROC_FS
371static void *exp_seq_start(struct seq_file *s, loff_t *pos) 389struct ct_expect_iter_state {
390 unsigned int bucket;
391};
392
393static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
372{ 394{
373 struct list_head *e = &nf_conntrack_expect_list; 395 struct ct_expect_iter_state *st = seq->private;
374 loff_t i;
375 396
376 /* strange seq_file api calls stop even if we fail, 397 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
377 * thus we need to grab lock since stop unlocks */ 398 if (!hlist_empty(&nf_ct_expect_hash[st->bucket]))
378 read_lock_bh(&nf_conntrack_lock); 399 return nf_ct_expect_hash[st->bucket].first;
400 }
401 return NULL;
402}
379 403
380 if (list_empty(e)) 404static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
381 return NULL; 405 struct hlist_node *head)
406{
407 struct ct_expect_iter_state *st = seq->private;
382 408
383 for (i = 0; i <= *pos; i++) { 409 head = head->next;
384 e = e->next; 410 while (head == NULL) {
385 if (e == &nf_conntrack_expect_list) 411 if (++st->bucket >= nf_ct_expect_hsize)
386 return NULL; 412 return NULL;
413 head = nf_ct_expect_hash[st->bucket].first;
387 } 414 }
388 return e; 415 return head;
389} 416}
390 417
391static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) 418static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
392{ 419{
393 struct list_head *e = v; 420 struct hlist_node *head = ct_expect_get_first(seq);
394 421
395 ++*pos; 422 if (head)
396 e = e->next; 423 while (pos && (head = ct_expect_get_next(seq, head)))
424 pos--;
425 return pos ? NULL : head;
426}
397 427
398 if (e == &nf_conntrack_expect_list) 428static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
399 return NULL; 429{
430 read_lock_bh(&nf_conntrack_lock);
431 return ct_expect_get_idx(seq, *pos);
432}
400 433
401 return e; 434static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
435{
436 (*pos)++;
437 return ct_expect_get_next(seq, v);
402} 438}
403 439
404static void exp_seq_stop(struct seq_file *s, void *v) 440static void exp_seq_stop(struct seq_file *seq, void *v)
405{ 441{
406 read_unlock_bh(&nf_conntrack_lock); 442 read_unlock_bh(&nf_conntrack_lock);
407} 443}
408 444
409static int exp_seq_show(struct seq_file *s, void *v) 445static int exp_seq_show(struct seq_file *s, void *v)
410{ 446{
411 struct nf_conntrack_expect *expect = v; 447 struct nf_conntrack_expect *expect;
448 struct hlist_node *n = v;
449
450 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
412 451
413 if (expect->timeout.function) 452 if (expect->timeout.function)
414 seq_printf(s, "%ld ", timer_pending(&expect->timeout) 453 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
@@ -425,7 +464,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
425 return seq_putc(s, '\n'); 464 return seq_putc(s, '\n');
426} 465}
427 466
428static struct seq_operations exp_seq_ops = { 467static const struct seq_operations exp_seq_ops = {
429 .start = exp_seq_start, 468 .start = exp_seq_start,
430 .next = exp_seq_next, 469 .next = exp_seq_next,
431 .stop = exp_seq_stop, 470 .stop = exp_seq_stop,
@@ -434,14 +473,96 @@ static struct seq_operations exp_seq_ops = {
434 473
435static int exp_open(struct inode *inode, struct file *file) 474static int exp_open(struct inode *inode, struct file *file)
436{ 475{
437 return seq_open(file, &exp_seq_ops); 476 struct seq_file *seq;
477 struct ct_expect_iter_state *st;
478 int ret;
479
480 st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
481 if (st == NULL)
482 return -ENOMEM;
483 ret = seq_open(file, &exp_seq_ops);
484 if (ret)
485 goto out_free;
486 seq = file->private_data;
487 seq->private = st;
488 memset(st, 0, sizeof(struct ct_expect_iter_state));
489 return ret;
490out_free:
491 kfree(st);
492 return ret;
438} 493}
439 494
440const struct file_operations exp_file_ops = { 495static const struct file_operations exp_file_ops = {
441 .owner = THIS_MODULE, 496 .owner = THIS_MODULE,
442 .open = exp_open, 497 .open = exp_open,
443 .read = seq_read, 498 .read = seq_read,
444 .llseek = seq_lseek, 499 .llseek = seq_lseek,
445 .release = seq_release 500 .release = seq_release_private,
446}; 501};
447#endif /* CONFIG_PROC_FS */ 502#endif /* CONFIG_PROC_FS */
503
504static int __init exp_proc_init(void)
505{
506#ifdef CONFIG_PROC_FS
507 struct proc_dir_entry *proc;
508
509 proc = proc_net_fops_create("nf_conntrack_expect", 0440, &exp_file_ops);
510 if (!proc)
511 return -ENOMEM;
512#endif /* CONFIG_PROC_FS */
513 return 0;
514}
515
516static void exp_proc_remove(void)
517{
518#ifdef CONFIG_PROC_FS
519 proc_net_remove("nf_conntrack_expect");
520#endif /* CONFIG_PROC_FS */
521}
522
523module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
524
525int __init nf_conntrack_expect_init(void)
526{
527 int err = -ENOMEM;
528
529 if (!nf_ct_expect_hsize) {
530 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
531 if (!nf_ct_expect_hsize)
532 nf_ct_expect_hsize = 1;
533 }
534 nf_ct_expect_max = nf_ct_expect_hsize * 4;
535
536 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
537 &nf_ct_expect_vmalloc);
538 if (nf_ct_expect_hash == NULL)
539 goto err1;
540
541 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
542 sizeof(struct nf_conntrack_expect),
543 0, 0, NULL, NULL);
544 if (!nf_ct_expect_cachep)
545 goto err2;
546
547 err = exp_proc_init();
548 if (err < 0)
549 goto err3;
550
551 return 0;
552
553err3:
554 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_vmalloc,
555 nf_ct_expect_hsize);
556err2:
557 kmem_cache_destroy(nf_ct_expect_cachep);
558err1:
559 return err;
560}
561
562void nf_conntrack_expect_fini(void)
563{
564 exp_proc_remove();
565 kmem_cache_destroy(nf_ct_expect_cachep);
566 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_vmalloc,
567 nf_ct_expect_hsize);
568}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
new file mode 100644
index 000000000000..a1a65a1313b3
--- /dev/null
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -0,0 +1,195 @@
1/* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/rcupdate.h>
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <net/netfilter/nf_conntrack_extend.h>
18
19static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
20static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21
22/* Horrible trick to figure out smallest amount worth kmallocing. */
23#define CACHE(x) (x) + 0 *
24enum {
25 NF_CT_EXT_MIN_SIZE =
26#include <linux/kmalloc_sizes.h>
27 1 };
28#undef CACHE
29
30void __nf_ct_ext_destroy(struct nf_conn *ct)
31{
32 unsigned int i;
33 struct nf_ct_ext_type *t;
34
35 for (i = 0; i < NF_CT_EXT_NUM; i++) {
36 if (!nf_ct_ext_exist(ct, i))
37 continue;
38
39 rcu_read_lock();
40 t = rcu_dereference(nf_ct_ext_types[i]);
41
42 /* Here the nf_ct_ext_type might have been unregisterd.
43 * I.e., it has responsible to cleanup private
44 * area in all conntracks when it is unregisterd.
45 */
46 if (t && t->destroy)
47 t->destroy(ct);
48 rcu_read_unlock();
49 }
50}
51EXPORT_SYMBOL(__nf_ct_ext_destroy);
52
53static void *
54nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
55{
56 unsigned int off, len, real_len;
57 struct nf_ct_ext_type *t;
58
59 rcu_read_lock();
60 t = rcu_dereference(nf_ct_ext_types[id]);
61 BUG_ON(t == NULL);
62 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
63 len = off + t->len;
64 real_len = t->alloc_size;
65 rcu_read_unlock();
66
67 *ext = kzalloc(real_len, gfp);
68 if (!*ext)
69 return NULL;
70
71 (*ext)->offset[id] = off;
72 (*ext)->len = len;
73 (*ext)->real_len = real_len;
74
75 return (void *)(*ext) + off;
76}
77
78void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
79{
80 struct nf_ct_ext *new;
81 int i, newlen, newoff;
82 struct nf_ct_ext_type *t;
83
84 if (!ct->ext)
85 return nf_ct_ext_create(&ct->ext, id, gfp);
86
87 if (nf_ct_ext_exist(ct, id))
88 return NULL;
89
90 rcu_read_lock();
91 t = rcu_dereference(nf_ct_ext_types[id]);
92 BUG_ON(t == NULL);
93
94 newoff = ALIGN(ct->ext->len, t->align);
95 newlen = newoff + t->len;
96 rcu_read_unlock();
97
98 if (newlen >= ct->ext->real_len) {
99 new = kmalloc(newlen, gfp);
100 if (!new)
101 return NULL;
102
103 memcpy(new, ct->ext, ct->ext->len);
104
105 for (i = 0; i < NF_CT_EXT_NUM; i++) {
106 if (!nf_ct_ext_exist(ct, i))
107 continue;
108
109 rcu_read_lock();
110 t = rcu_dereference(nf_ct_ext_types[i]);
111 if (t && t->move)
112 t->move(ct, ct->ext + ct->ext->offset[id]);
113 rcu_read_unlock();
114 }
115 kfree(ct->ext);
116 new->real_len = newlen;
117 ct->ext = new;
118 }
119
120 ct->ext->offset[id] = newoff;
121 ct->ext->len = newlen;
122 memset((void *)ct->ext + newoff, 0, newlen - newoff);
123 return (void *)ct->ext + newoff;
124}
125EXPORT_SYMBOL(__nf_ct_ext_add);
126
127static void update_alloc_size(struct nf_ct_ext_type *type)
128{
129 int i, j;
130 struct nf_ct_ext_type *t1, *t2;
131 enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
132
133 /* unnecessary to update all types */
134 if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
135 min = type->id;
136 max = type->id;
137 }
138
139 /* This assumes that extended areas in conntrack for the types
140 whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
141 for (i = min; i <= max; i++) {
142 t1 = nf_ct_ext_types[i];
143 if (!t1)
144 continue;
145
146 t1->alloc_size = sizeof(struct nf_ct_ext)
147 + ALIGN(sizeof(struct nf_ct_ext), t1->align)
148 + t1->len;
149 for (j = 0; j < NF_CT_EXT_NUM; j++) {
150 t2 = nf_ct_ext_types[j];
151 if (t2 == NULL || t2 == t1 ||
152 (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
153 continue;
154
155 t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
156 + t2->len;
157 }
158 if (t1->alloc_size < NF_CT_EXT_MIN_SIZE)
159 t1->alloc_size = NF_CT_EXT_MIN_SIZE;
160 }
161}
162
163/* This MUST be called in process context. */
164int nf_ct_extend_register(struct nf_ct_ext_type *type)
165{
166 int ret = 0;
167
168 mutex_lock(&nf_ct_ext_type_mutex);
169 if (nf_ct_ext_types[type->id]) {
170 ret = -EBUSY;
171 goto out;
172 }
173
174 /* This ensures that nf_ct_ext_create() can allocate enough area
175 before updating alloc_size */
176 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
177 + type->len;
178 rcu_assign_pointer(nf_ct_ext_types[type->id], type);
179 update_alloc_size(type);
180out:
181 mutex_unlock(&nf_ct_ext_type_mutex);
182 return ret;
183}
184EXPORT_SYMBOL_GPL(nf_ct_extend_register);
185
186/* This MUST be called in process context. */
187void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
188{
189 mutex_lock(&nf_ct_ext_type_mutex);
190 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
191 update_alloc_size(type);
192 mutex_unlock(&nf_ct_ext_type_mutex);
193 synchronize_rcu();
194}
195EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 82db2aa53bfc..c763ee74ea02 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -51,12 +51,6 @@ unsigned int (*nf_nat_ftp_hook)(struct sk_buff **pskb,
51 struct nf_conntrack_expect *exp); 51 struct nf_conntrack_expect *exp);
52EXPORT_SYMBOL_GPL(nf_nat_ftp_hook); 52EXPORT_SYMBOL_GPL(nf_nat_ftp_hook);
53 53
54#if 0
55#define DEBUGP printk
56#else
57#define DEBUGP(format, args...)
58#endif
59
60static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char); 54static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char);
61static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char); 55static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char);
62static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *, 56static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *,
@@ -138,13 +132,13 @@ static int try_number(const char *data, size_t dlen, u_int32_t array[],
138 if (*data == term && i == array_size - 1) 132 if (*data == term && i == array_size - 1)
139 return len; 133 return len;
140 134
141 DEBUGP("Char %u (got %u nums) `%u' unexpected\n", 135 pr_debug("Char %u (got %u nums) `%u' unexpected\n",
142 len, i, *data); 136 len, i, *data);
143 return 0; 137 return 0;
144 } 138 }
145 } 139 }
146 DEBUGP("Failed to fill %u numbers separated by %c\n", array_size, sep); 140 pr_debug("Failed to fill %u numbers separated by %c\n",
147 141 array_size, sep);
148 return 0; 142 return 0;
149} 143}
150 144
@@ -178,13 +172,13 @@ static int get_port(const char *data, int start, size_t dlen, char delim,
178 if (tmp_port == 0) 172 if (tmp_port == 0)
179 break; 173 break;
180 *port = htons(tmp_port); 174 *port = htons(tmp_port);
181 DEBUGP("get_port: return %d\n", tmp_port); 175 pr_debug("get_port: return %d\n", tmp_port);
182 return i + 1; 176 return i + 1;
183 } 177 }
184 else if (data[i] >= '0' && data[i] <= '9') 178 else if (data[i] >= '0' && data[i] <= '9')
185 tmp_port = tmp_port*10 + data[i] - '0'; 179 tmp_port = tmp_port*10 + data[i] - '0';
186 else { /* Some other crap */ 180 else { /* Some other crap */
187 DEBUGP("get_port: invalid char.\n"); 181 pr_debug("get_port: invalid char.\n");
188 break; 182 break;
189 } 183 }
190 } 184 }
@@ -201,22 +195,22 @@ static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
201 /* First character is delimiter, then "1" for IPv4 or "2" for IPv6, 195 /* First character is delimiter, then "1" for IPv4 or "2" for IPv6,
202 then delimiter again. */ 196 then delimiter again. */
203 if (dlen <= 3) { 197 if (dlen <= 3) {
204 DEBUGP("EPRT: too short\n"); 198 pr_debug("EPRT: too short\n");
205 return 0; 199 return 0;
206 } 200 }
207 delim = data[0]; 201 delim = data[0];
208 if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) { 202 if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) {
209 DEBUGP("try_eprt: invalid delimitter.\n"); 203 pr_debug("try_eprt: invalid delimitter.\n");
210 return 0; 204 return 0;
211 } 205 }
212 206
213 if ((cmd->l3num == PF_INET && data[1] != '1') || 207 if ((cmd->l3num == PF_INET && data[1] != '1') ||
214 (cmd->l3num == PF_INET6 && data[1] != '2')) { 208 (cmd->l3num == PF_INET6 && data[1] != '2')) {
215 DEBUGP("EPRT: invalid protocol number.\n"); 209 pr_debug("EPRT: invalid protocol number.\n");
216 return 0; 210 return 0;
217 } 211 }
218 212
219 DEBUGP("EPRT: Got %c%c%c\n", delim, data[1], delim); 213 pr_debug("EPRT: Got %c%c%c\n", delim, data[1], delim);
220 214
221 if (data[1] == '1') { 215 if (data[1] == '1') {
222 u_int32_t array[4]; 216 u_int32_t array[4];
@@ -234,7 +228,7 @@ static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
234 228
235 if (length == 0) 229 if (length == 0)
236 return 0; 230 return 0;
237 DEBUGP("EPRT: Got IP address!\n"); 231 pr_debug("EPRT: Got IP address!\n");
238 /* Start offset includes initial "|1|", and trailing delimiter */ 232 /* Start offset includes initial "|1|", and trailing delimiter */
239 return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port); 233 return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port);
240} 234}
@@ -267,7 +261,7 @@ static int find_pattern(const char *data, size_t dlen,
267{ 261{
268 size_t i; 262 size_t i;
269 263
270 DEBUGP("find_pattern `%s': dlen = %u\n", pattern, dlen); 264 pr_debug("find_pattern `%s': dlen = %Zu\n", pattern, dlen);
271 if (dlen == 0) 265 if (dlen == 0)
272 return 0; 266 return 0;
273 267
@@ -282,17 +276,17 @@ static int find_pattern(const char *data, size_t dlen,
282#if 0 276#if 0
283 size_t i; 277 size_t i;
284 278
285 DEBUGP("ftp: string mismatch\n"); 279 pr_debug("ftp: string mismatch\n");
286 for (i = 0; i < plen; i++) { 280 for (i = 0; i < plen; i++) {
287 DEBUGP("ftp:char %u `%c'(%u) vs `%c'(%u)\n", 281 pr_debug("ftp:char %u `%c'(%u) vs `%c'(%u)\n",
288 i, data[i], data[i], 282 i, data[i], data[i],
289 pattern[i], pattern[i]); 283 pattern[i], pattern[i]);
290 } 284 }
291#endif 285#endif
292 return 0; 286 return 0;
293 } 287 }
294 288
295 DEBUGP("Pattern matches!\n"); 289 pr_debug("Pattern matches!\n");
296 /* Now we've found the constant string, try to skip 290 /* Now we've found the constant string, try to skip
297 to the 'skip' character */ 291 to the 'skip' character */
298 for (i = plen; data[i] != skip; i++) 292 for (i = plen; data[i] != skip; i++)
@@ -301,14 +295,14 @@ static int find_pattern(const char *data, size_t dlen,
301 /* Skip over the last character */ 295 /* Skip over the last character */
302 i++; 296 i++;
303 297
304 DEBUGP("Skipped up to `%c'!\n", skip); 298 pr_debug("Skipped up to `%c'!\n", skip);
305 299
306 *numoff = i; 300 *numoff = i;
307 *numlen = getnum(data + i, dlen - i, cmd, term); 301 *numlen = getnum(data + i, dlen - i, cmd, term);
308 if (!*numlen) 302 if (!*numlen)
309 return -1; 303 return -1;
310 304
311 DEBUGP("Match succeeded!\n"); 305 pr_debug("Match succeeded!\n");
312 return 1; 306 return 1;
313} 307}
314 308
@@ -364,6 +358,7 @@ static int help(struct sk_buff **pskb,
364 unsigned int matchlen, matchoff; 358 unsigned int matchlen, matchoff;
365 struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info; 359 struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info;
366 struct nf_conntrack_expect *exp; 360 struct nf_conntrack_expect *exp;
361 union nf_conntrack_address *daddr;
367 struct nf_conntrack_man cmd = {}; 362 struct nf_conntrack_man cmd = {};
368 unsigned int i; 363 unsigned int i;
369 int found = 0, ends_in_nl; 364 int found = 0, ends_in_nl;
@@ -372,7 +367,7 @@ static int help(struct sk_buff **pskb,
372 /* Until there's been traffic both ways, don't look in packets. */ 367 /* Until there's been traffic both ways, don't look in packets. */
373 if (ctinfo != IP_CT_ESTABLISHED 368 if (ctinfo != IP_CT_ESTABLISHED
374 && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) { 369 && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
375 DEBUGP("ftp: Conntrackinfo = %u\n", ctinfo); 370 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
376 return NF_ACCEPT; 371 return NF_ACCEPT;
377 } 372 }
378 373
@@ -383,8 +378,8 @@ static int help(struct sk_buff **pskb,
383 dataoff = protoff + th->doff * 4; 378 dataoff = protoff + th->doff * 4;
384 /* No data? */ 379 /* No data? */
385 if (dataoff >= (*pskb)->len) { 380 if (dataoff >= (*pskb)->len) {
386 DEBUGP("ftp: dataoff(%u) >= skblen(%u)\n", dataoff, 381 pr_debug("ftp: dataoff(%u) >= skblen(%u)\n", dataoff,
387 (*pskb)->len); 382 (*pskb)->len);
388 return NF_ACCEPT; 383 return NF_ACCEPT;
389 } 384 }
390 datalen = (*pskb)->len - dataoff; 385 datalen = (*pskb)->len - dataoff;
@@ -399,11 +394,11 @@ static int help(struct sk_buff **pskb,
399 /* Look up to see if we're just after a \n. */ 394 /* Look up to see if we're just after a \n. */
400 if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { 395 if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
401 /* Now if this ends in \n, update ftp info. */ 396 /* Now if this ends in \n, update ftp info. */
402 DEBUGP("nf_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n", 397 pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n",
403 ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)", 398 ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)",
404 ct_ftp_info->seq_aft_nl[dir][0], 399 ct_ftp_info->seq_aft_nl[dir][0],
405 ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)", 400 ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)",
406 ct_ftp_info->seq_aft_nl[dir][1]); 401 ct_ftp_info->seq_aft_nl[dir][1]);
407 ret = NF_ACCEPT; 402 ret = NF_ACCEPT;
408 goto out_update_nl; 403 goto out_update_nl;
409 } 404 }
@@ -441,11 +436,11 @@ static int help(struct sk_buff **pskb,
441 goto out_update_nl; 436 goto out_update_nl;
442 } 437 }
443 438
444 DEBUGP("conntrack_ftp: match `%.*s' (%u bytes at %u)\n", 439 pr_debug("conntrack_ftp: match `%.*s' (%u bytes at %u)\n",
445 (int)matchlen, fb_ptr + matchoff, 440 matchlen, fb_ptr + matchoff,
446 matchlen, ntohl(th->seq) + matchoff); 441 matchlen, ntohl(th->seq) + matchoff);
447 442
448 exp = nf_conntrack_expect_alloc(ct); 443 exp = nf_ct_expect_alloc(ct);
449 if (exp == NULL) { 444 if (exp == NULL) {
450 ret = NF_DROP; 445 ret = NF_DROP;
451 goto out; 446 goto out;
@@ -454,7 +449,7 @@ static int help(struct sk_buff **pskb,
454 /* We refer to the reverse direction ("!dir") tuples here, 449 /* We refer to the reverse direction ("!dir") tuples here,
455 * because we're expecting something in the other direction. 450 * because we're expecting something in the other direction.
456 * Doesn't matter unless NAT is happening. */ 451 * Doesn't matter unless NAT is happening. */
457 exp->tuple.dst.u3 = ct->tuplehash[!dir].tuple.dst.u3; 452 daddr = &ct->tuplehash[!dir].tuple.dst.u3;
458 453
459 /* Update the ftp info */ 454 /* Update the ftp info */
460 if ((cmd.l3num == ct->tuplehash[dir].tuple.src.l3num) && 455 if ((cmd.l3num == ct->tuplehash[dir].tuple.src.l3num) &&
@@ -465,14 +460,16 @@ static int help(struct sk_buff **pskb,
465 different IP address. Simply don't record it for 460 different IP address. Simply don't record it for
466 NAT. */ 461 NAT. */
467 if (cmd.l3num == PF_INET) { 462 if (cmd.l3num == PF_INET) {
468 DEBUGP("conntrack_ftp: NOT RECORDING: " NIPQUAD_FMT " != " NIPQUAD_FMT "\n", 463 pr_debug("conntrack_ftp: NOT RECORDING: " NIPQUAD_FMT
469 NIPQUAD(cmd.u3.ip), 464 " != " NIPQUAD_FMT "\n",
470 NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip)); 465 NIPQUAD(cmd.u3.ip),
466 NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip));
471 } else { 467 } else {
472 DEBUGP("conntrack_ftp: NOT RECORDING: " NIP6_FMT " != " NIP6_FMT "\n", 468 pr_debug("conntrack_ftp: NOT RECORDING: " NIP6_FMT
473 NIP6(*((struct in6_addr *)cmd.u3.ip6)), 469 " != " NIP6_FMT "\n",
474 NIP6(*((struct in6_addr *)ct->tuplehash[dir] 470 NIP6(*((struct in6_addr *)cmd.u3.ip6)),
475 .tuple.src.u3.ip6))); 471 NIP6(*((struct in6_addr *)
472 ct->tuplehash[dir].tuple.src.u3.ip6)));
476 } 473 }
477 474
478 /* Thanks to Cristiano Lincoln Mattos 475 /* Thanks to Cristiano Lincoln Mattos
@@ -483,37 +480,12 @@ static int help(struct sk_buff **pskb,
483 ret = NF_ACCEPT; 480 ret = NF_ACCEPT;
484 goto out_put_expect; 481 goto out_put_expect;
485 } 482 }
486 memcpy(&exp->tuple.dst.u3, &cmd.u3.all, 483 daddr = &cmd.u3;
487 sizeof(exp->tuple.dst.u3));
488 }
489
490 exp->tuple.src.u3 = ct->tuplehash[!dir].tuple.src.u3;
491 exp->tuple.src.l3num = cmd.l3num;
492 exp->tuple.src.u.tcp.port = 0;
493 exp->tuple.dst.u.tcp.port = cmd.u.tcp.port;
494 exp->tuple.dst.protonum = IPPROTO_TCP;
495
496 exp->mask = (struct nf_conntrack_tuple)
497 { .src = { .l3num = 0xFFFF,
498 .u = { .tcp = { 0 }},
499 },
500 .dst = { .protonum = 0xFF,
501 .u = { .tcp = { __constant_htons(0xFFFF) }},
502 },
503 };
504 if (cmd.l3num == PF_INET) {
505 exp->mask.src.u3.ip = htonl(0xFFFFFFFF);
506 exp->mask.dst.u3.ip = htonl(0xFFFFFFFF);
507 } else {
508 memset(exp->mask.src.u3.ip6, 0xFF,
509 sizeof(exp->mask.src.u3.ip6));
510 memset(exp->mask.dst.u3.ip6, 0xFF,
511 sizeof(exp->mask.src.u3.ip6));
512 } 484 }
513 485
514 exp->expectfn = NULL; 486 nf_ct_expect_init(exp, cmd.l3num,
515 exp->helper = NULL; 487 &ct->tuplehash[!dir].tuple.src.u3, daddr,
516 exp->flags = 0; 488 IPPROTO_TCP, NULL, &cmd.u.tcp.port);
517 489
518 /* Now, NAT might want to mangle the packet, and register the 490 /* Now, NAT might want to mangle the packet, and register the
519 * (possibly changed) expectation itself. */ 491 * (possibly changed) expectation itself. */
@@ -523,14 +495,14 @@ static int help(struct sk_buff **pskb,
523 matchoff, matchlen, exp); 495 matchoff, matchlen, exp);
524 else { 496 else {
525 /* Can't expect this? Best to drop packet now. */ 497 /* Can't expect this? Best to drop packet now. */
526 if (nf_conntrack_expect_related(exp) != 0) 498 if (nf_ct_expect_related(exp) != 0)
527 ret = NF_DROP; 499 ret = NF_DROP;
528 else 500 else
529 ret = NF_ACCEPT; 501 ret = NF_ACCEPT;
530 } 502 }
531 503
532out_put_expect: 504out_put_expect:
533 nf_conntrack_expect_put(exp); 505 nf_ct_expect_put(exp);
534 506
535out_update_nl: 507out_update_nl:
536 /* Now if this ends in \n, update ftp info. Seq may have been 508 /* Now if this ends in \n, update ftp info. Seq may have been
@@ -542,8 +514,8 @@ out_update_nl:
542 return ret; 514 return ret;
543} 515}
544 516
545static struct nf_conntrack_helper ftp[MAX_PORTS][2]; 517static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
546static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")]; 518static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly;
547 519
548/* don't make this __exit, since it's called from __init ! */ 520/* don't make this __exit, since it's called from __init ! */
549static void nf_conntrack_ftp_fini(void) 521static void nf_conntrack_ftp_fini(void)
@@ -554,9 +526,9 @@ static void nf_conntrack_ftp_fini(void)
554 if (ftp[i][j].me == NULL) 526 if (ftp[i][j].me == NULL)
555 continue; 527 continue;
556 528
557 DEBUGP("nf_ct_ftp: unregistering helper for pf: %d " 529 pr_debug("nf_ct_ftp: unregistering helper for pf: %d "
558 "port: %d\n", 530 "port: %d\n",
559 ftp[i][j].tuple.src.l3num, ports[i]); 531 ftp[i][j].tuple.src.l3num, ports[i]);
560 nf_conntrack_helper_unregister(&ftp[i][j]); 532 nf_conntrack_helper_unregister(&ftp[i][j]);
561 } 533 }
562 } 534 }
@@ -584,9 +556,6 @@ static int __init nf_conntrack_ftp_init(void)
584 for (j = 0; j < 2; j++) { 556 for (j = 0; j < 2; j++) {
585 ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); 557 ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
586 ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; 558 ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
587 ftp[i][j].mask.src.l3num = 0xFFFF;
588 ftp[i][j].mask.src.u.tcp.port = htons(0xFFFF);
589 ftp[i][j].mask.dst.protonum = 0xFF;
590 ftp[i][j].max_expected = 1; 559 ftp[i][j].max_expected = 1;
591 ftp[i][j].timeout = 5 * 60; /* 5 Minutes */ 560 ftp[i][j].timeout = 5 * 60; /* 5 Minutes */
592 ftp[i][j].me = THIS_MODULE; 561 ftp[i][j].me = THIS_MODULE;
@@ -598,9 +567,9 @@ static int __init nf_conntrack_ftp_init(void)
598 sprintf(tmpname, "ftp-%d", ports[i]); 567 sprintf(tmpname, "ftp-%d", ports[i]);
599 ftp[i][j].name = tmpname; 568 ftp[i][j].name = tmpname;
600 569
601 DEBUGP("nf_ct_ftp: registering helper for pf: %d " 570 pr_debug("nf_ct_ftp: registering helper for pf: %d "
602 "port: %d\n", 571 "port: %d\n",
603 ftp[i][j].tuple.src.l3num, ports[i]); 572 ftp[i][j].tuple.src.l3num, ports[i]);
604 ret = nf_conntrack_helper_register(&ftp[i][j]); 573 ret = nf_conntrack_helper_register(&ftp[i][j]);
605 if (ret) { 574 if (ret) {
606 printk("nf_ct_ftp: failed to register helper " 575 printk("nf_ct_ftp: failed to register helper "
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 6b7eaa019d4c..a869403b2294 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -555,15 +555,6 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
555 555
556 /* Decode the extension components */ 556 /* Decode the extension components */
557 for (opt = 0; opt < bmp2_len; opt++, i++, son++) { 557 for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
558 if (i < f->ub && son->attr & STOP) {
559 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
560 son->name);
561 return H323_ERROR_STOP;
562 }
563
564 if (!((0x80000000 >> opt) & bmp2)) /* Not present */
565 continue;
566
567 /* Check Range */ 558 /* Check Range */
568 if (i >= f->ub) { /* Newer Version? */ 559 if (i >= f->ub) { /* Newer Version? */
569 CHECK_BOUND(bs, 2); 560 CHECK_BOUND(bs, 2);
@@ -573,6 +564,15 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
573 continue; 564 continue;
574 } 565 }
575 566
567 if (son->attr & STOP) {
568 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
569 son->name);
570 return H323_ERROR_STOP;
571 }
572
573 if (!((0x80000000 >> opt) & bmp2)) /* Not present */
574 continue;
575
576 CHECK_BOUND(bs, 2); 576 CHECK_BOUND(bs, 2);
577 len = get_len(bs); 577 len = get_len(bs);
578 CHECK_BOUND(bs, len); 578 CHECK_BOUND(bs, len);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index a1b95acad297..a8a9dfbe7a67 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -31,12 +31,6 @@
31#include <net/netfilter/nf_conntrack_helper.h> 31#include <net/netfilter/nf_conntrack_helper.h>
32#include <linux/netfilter/nf_conntrack_h323.h> 32#include <linux/netfilter/nf_conntrack_h323.h>
33 33
34#if 0
35#define DEBUGP printk
36#else
37#define DEBUGP(format, args...)
38#endif
39
40/* Parameters */ 34/* Parameters */
41static unsigned int default_rrq_ttl __read_mostly = 300; 35static unsigned int default_rrq_ttl __read_mostly = 300;
42module_param(default_rrq_ttl, uint, 0600); 36module_param(default_rrq_ttl, uint, 0600);
@@ -150,9 +144,9 @@ static int get_tpkt_data(struct sk_buff **pskb, unsigned int protoff,
150 if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) { 144 if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) {
151 /* Netmeeting sends TPKT header and data separately */ 145 /* Netmeeting sends TPKT header and data separately */
152 if (info->tpkt_len[dir] > 0) { 146 if (info->tpkt_len[dir] > 0) {
153 DEBUGP("nf_ct_h323: previous packet " 147 pr_debug("nf_ct_h323: previous packet "
154 "indicated separate TPKT data of %hu " 148 "indicated separate TPKT data of %hu "
155 "bytes\n", info->tpkt_len[dir]); 149 "bytes\n", info->tpkt_len[dir]);
156 if (info->tpkt_len[dir] <= tcpdatalen) { 150 if (info->tpkt_len[dir] <= tcpdatalen) {
157 /* Yes, there was a TPKT header 151 /* Yes, there was a TPKT header
158 * received */ 152 * received */
@@ -163,9 +157,7 @@ static int get_tpkt_data(struct sk_buff **pskb, unsigned int protoff,
163 } 157 }
164 158
165 /* Fragmented TPKT */ 159 /* Fragmented TPKT */
166 if (net_ratelimit()) 160 pr_debug("nf_ct_h323: fragmented TPKT\n");
167 printk("nf_ct_h323: "
168 "fragmented TPKT\n");
169 goto clear_out; 161 goto clear_out;
170 } 162 }
171 163
@@ -192,9 +184,9 @@ static int get_tpkt_data(struct sk_buff **pskb, unsigned int protoff,
192 if (tpktlen > tcpdatalen) { 184 if (tpktlen > tcpdatalen) {
193 if (tcpdatalen == 4) { /* Separate TPKT header */ 185 if (tcpdatalen == 4) { /* Separate TPKT header */
194 /* Netmeeting sends TPKT header and data separately */ 186 /* Netmeeting sends TPKT header and data separately */
195 DEBUGP("nf_ct_h323: separate TPKT header indicates " 187 pr_debug("nf_ct_h323: separate TPKT header indicates "
196 "there will be TPKT data of %hu bytes\n", 188 "there will be TPKT data of %hu bytes\n",
197 tpktlen - 4); 189 tpktlen - 4);
198 info->tpkt_len[dir] = tpktlen - 4; 190 info->tpkt_len[dir] = tpktlen - 4;
199 return 0; 191 return 0;
200 } 192 }
@@ -282,22 +274,22 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
282 rtcp_port = htons(ntohs(port) + 1); 274 rtcp_port = htons(ntohs(port) + 1);
283 275
284 /* Create expect for RTP */ 276 /* Create expect for RTP */
285 if ((rtp_exp = nf_conntrack_expect_alloc(ct)) == NULL) 277 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
286 return -1; 278 return -1;
287 nf_conntrack_expect_init(rtp_exp, ct->tuplehash[!dir].tuple.src.l3num, 279 nf_ct_expect_init(rtp_exp, ct->tuplehash[!dir].tuple.src.l3num,
288 &ct->tuplehash[!dir].tuple.src.u3, 280 &ct->tuplehash[!dir].tuple.src.u3,
289 &ct->tuplehash[!dir].tuple.dst.u3, 281 &ct->tuplehash[!dir].tuple.dst.u3,
290 IPPROTO_UDP, NULL, &rtp_port); 282 IPPROTO_UDP, NULL, &rtp_port);
291 283
292 /* Create expect for RTCP */ 284 /* Create expect for RTCP */
293 if ((rtcp_exp = nf_conntrack_expect_alloc(ct)) == NULL) { 285 if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) {
294 nf_conntrack_expect_put(rtp_exp); 286 nf_ct_expect_put(rtp_exp);
295 return -1; 287 return -1;
296 } 288 }
297 nf_conntrack_expect_init(rtcp_exp, ct->tuplehash[!dir].tuple.src.l3num, 289 nf_ct_expect_init(rtcp_exp, ct->tuplehash[!dir].tuple.src.l3num,
298 &ct->tuplehash[!dir].tuple.src.u3, 290 &ct->tuplehash[!dir].tuple.src.u3,
299 &ct->tuplehash[!dir].tuple.dst.u3, 291 &ct->tuplehash[!dir].tuple.dst.u3,
300 IPPROTO_UDP, NULL, &rtcp_port); 292 IPPROTO_UDP, NULL, &rtcp_port);
301 293
302 if (memcmp(&ct->tuplehash[dir].tuple.src.u3, 294 if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
303 &ct->tuplehash[!dir].tuple.dst.u3, 295 &ct->tuplehash[!dir].tuple.dst.u3,
@@ -308,22 +300,22 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
308 ret = nat_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, 300 ret = nat_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
309 taddr, port, rtp_port, rtp_exp, rtcp_exp); 301 taddr, port, rtp_port, rtp_exp, rtcp_exp);
310 } else { /* Conntrack only */ 302 } else { /* Conntrack only */
311 if (nf_conntrack_expect_related(rtp_exp) == 0) { 303 if (nf_ct_expect_related(rtp_exp) == 0) {
312 if (nf_conntrack_expect_related(rtcp_exp) == 0) { 304 if (nf_ct_expect_related(rtcp_exp) == 0) {
313 DEBUGP("nf_ct_h323: expect RTP "); 305 pr_debug("nf_ct_h323: expect RTP ");
314 NF_CT_DUMP_TUPLE(&rtp_exp->tuple); 306 NF_CT_DUMP_TUPLE(&rtp_exp->tuple);
315 DEBUGP("nf_ct_h323: expect RTCP "); 307 pr_debug("nf_ct_h323: expect RTCP ");
316 NF_CT_DUMP_TUPLE(&rtcp_exp->tuple); 308 NF_CT_DUMP_TUPLE(&rtcp_exp->tuple);
317 } else { 309 } else {
318 nf_conntrack_unexpect_related(rtp_exp); 310 nf_ct_unexpect_related(rtp_exp);
319 ret = -1; 311 ret = -1;
320 } 312 }
321 } else 313 } else
322 ret = -1; 314 ret = -1;
323 } 315 }
324 316
325 nf_conntrack_expect_put(rtp_exp); 317 nf_ct_expect_put(rtp_exp);
326 nf_conntrack_expect_put(rtcp_exp); 318 nf_ct_expect_put(rtcp_exp);
327 319
328 return ret; 320 return ret;
329} 321}
@@ -349,12 +341,12 @@ static int expect_t120(struct sk_buff **pskb,
349 return 0; 341 return 0;
350 342
351 /* Create expect for T.120 connections */ 343 /* Create expect for T.120 connections */
352 if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) 344 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
353 return -1; 345 return -1;
354 nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 346 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
355 &ct->tuplehash[!dir].tuple.src.u3, 347 &ct->tuplehash[!dir].tuple.src.u3,
356 &ct->tuplehash[!dir].tuple.dst.u3, 348 &ct->tuplehash[!dir].tuple.dst.u3,
357 IPPROTO_TCP, NULL, &port); 349 IPPROTO_TCP, NULL, &port);
358 exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */ 350 exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */
359 351
360 if (memcmp(&ct->tuplehash[dir].tuple.src.u3, 352 if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
@@ -366,14 +358,14 @@ static int expect_t120(struct sk_buff **pskb,
366 ret = nat_t120(pskb, ct, ctinfo, data, dataoff, taddr, 358 ret = nat_t120(pskb, ct, ctinfo, data, dataoff, taddr,
367 port, exp); 359 port, exp);
368 } else { /* Conntrack only */ 360 } else { /* Conntrack only */
369 if (nf_conntrack_expect_related(exp) == 0) { 361 if (nf_ct_expect_related(exp) == 0) {
370 DEBUGP("nf_ct_h323: expect T.120 "); 362 pr_debug("nf_ct_h323: expect T.120 ");
371 NF_CT_DUMP_TUPLE(&exp->tuple); 363 NF_CT_DUMP_TUPLE(&exp->tuple);
372 } else 364 } else
373 ret = -1; 365 ret = -1;
374 } 366 }
375 367
376 nf_conntrack_expect_put(exp); 368 nf_ct_expect_put(exp);
377 369
378 return ret; 370 return ret;
379} 371}
@@ -415,7 +407,7 @@ static int process_olc(struct sk_buff **pskb, struct nf_conn *ct,
415{ 407{
416 int ret; 408 int ret;
417 409
418 DEBUGP("nf_ct_h323: OpenLogicalChannel\n"); 410 pr_debug("nf_ct_h323: OpenLogicalChannel\n");
419 411
420 if (olc->forwardLogicalChannelParameters.multiplexParameters.choice == 412 if (olc->forwardLogicalChannelParameters.multiplexParameters.choice ==
421 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters) 413 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)
@@ -475,7 +467,7 @@ static int process_olca(struct sk_buff **pskb, struct nf_conn *ct,
475 H2250LogicalChannelAckParameters *ack; 467 H2250LogicalChannelAckParameters *ack;
476 int ret; 468 int ret;
477 469
478 DEBUGP("nf_ct_h323: OpenLogicalChannelAck\n"); 470 pr_debug("nf_ct_h323: OpenLogicalChannelAck\n");
479 471
480 if ((olca->options & 472 if ((olca->options &
481 eOpenLogicalChannelAck_reverseLogicalChannelParameters) && 473 eOpenLogicalChannelAck_reverseLogicalChannelParameters) &&
@@ -546,8 +538,8 @@ static int process_h245(struct sk_buff **pskb, struct nf_conn *ct,
546 return process_olc(pskb, ct, ctinfo, data, dataoff, 538 return process_olc(pskb, ct, ctinfo, data, dataoff,
547 &mscm->request.openLogicalChannel); 539 &mscm->request.openLogicalChannel);
548 } 540 }
549 DEBUGP("nf_ct_h323: H.245 Request %d\n", 541 pr_debug("nf_ct_h323: H.245 Request %d\n",
550 mscm->request.choice); 542 mscm->request.choice);
551 break; 543 break;
552 case eMultimediaSystemControlMessage_response: 544 case eMultimediaSystemControlMessage_response:
553 if (mscm->response.choice == 545 if (mscm->response.choice ==
@@ -556,11 +548,11 @@ static int process_h245(struct sk_buff **pskb, struct nf_conn *ct,
556 &mscm->response. 548 &mscm->response.
557 openLogicalChannelAck); 549 openLogicalChannelAck);
558 } 550 }
559 DEBUGP("nf_ct_h323: H.245 Response %d\n", 551 pr_debug("nf_ct_h323: H.245 Response %d\n",
560 mscm->response.choice); 552 mscm->response.choice);
561 break; 553 break;
562 default: 554 default:
563 DEBUGP("nf_ct_h323: H.245 signal %d\n", mscm->choice); 555 pr_debug("nf_ct_h323: H.245 signal %d\n", mscm->choice);
564 break; 556 break;
565 } 557 }
566 558
@@ -582,24 +574,23 @@ static int h245_help(struct sk_buff **pskb, unsigned int protoff,
582 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { 574 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
583 return NF_ACCEPT; 575 return NF_ACCEPT;
584 } 576 }
585 DEBUGP("nf_ct_h245: skblen = %u\n", (*pskb)->len); 577 pr_debug("nf_ct_h245: skblen = %u\n", (*pskb)->len);
586 578
587 spin_lock_bh(&nf_h323_lock); 579 spin_lock_bh(&nf_h323_lock);
588 580
589 /* Process each TPKT */ 581 /* Process each TPKT */
590 while (get_tpkt_data(pskb, protoff, ct, ctinfo, 582 while (get_tpkt_data(pskb, protoff, ct, ctinfo,
591 &data, &datalen, &dataoff)) { 583 &data, &datalen, &dataoff)) {
592 DEBUGP("nf_ct_h245: TPKT len=%d ", datalen); 584 pr_debug("nf_ct_h245: TPKT len=%d ", datalen);
593 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); 585 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
594 586
595 /* Decode H.245 signal */ 587 /* Decode H.245 signal */
596 ret = DecodeMultimediaSystemControlMessage(data, datalen, 588 ret = DecodeMultimediaSystemControlMessage(data, datalen,
597 &mscm); 589 &mscm);
598 if (ret < 0) { 590 if (ret < 0) {
599 if (net_ratelimit()) 591 pr_debug("nf_ct_h245: decoding error: %s\n",
600 printk("nf_ct_h245: decoding error: %s\n", 592 ret == H323_ERROR_BOUND ?
601 ret == H323_ERROR_BOUND ? 593 "out of bound" : "out of range");
602 "out of bound" : "out of range");
603 /* We don't drop when decoding error */ 594 /* We don't drop when decoding error */
604 break; 595 break;
605 } 596 }
@@ -626,8 +617,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
626 .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */, 617 .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
627 .timeout = 240, 618 .timeout = 240,
628 .tuple.dst.protonum = IPPROTO_UDP, 619 .tuple.dst.protonum = IPPROTO_UDP,
629 .mask.src.u.udp.port = __constant_htons(0xFFFF),
630 .mask.dst.protonum = 0xFF,
631 .help = h245_help 620 .help = h245_help
632}; 621};
633 622
@@ -684,12 +673,12 @@ static int expect_h245(struct sk_buff **pskb, struct nf_conn *ct,
684 return 0; 673 return 0;
685 674
686 /* Create expect for h245 connection */ 675 /* Create expect for h245 connection */
687 if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) 676 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
688 return -1; 677 return -1;
689 nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 678 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
690 &ct->tuplehash[!dir].tuple.src.u3, 679 &ct->tuplehash[!dir].tuple.src.u3,
691 &ct->tuplehash[!dir].tuple.dst.u3, 680 &ct->tuplehash[!dir].tuple.dst.u3,
692 IPPROTO_TCP, NULL, &port); 681 IPPROTO_TCP, NULL, &port);
693 exp->helper = &nf_conntrack_helper_h245; 682 exp->helper = &nf_conntrack_helper_h245;
694 683
695 if (memcmp(&ct->tuplehash[dir].tuple.src.u3, 684 if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
@@ -701,14 +690,14 @@ static int expect_h245(struct sk_buff **pskb, struct nf_conn *ct,
701 ret = nat_h245(pskb, ct, ctinfo, data, dataoff, taddr, 690 ret = nat_h245(pskb, ct, ctinfo, data, dataoff, taddr,
702 port, exp); 691 port, exp);
703 } else { /* Conntrack only */ 692 } else { /* Conntrack only */
704 if (nf_conntrack_expect_related(exp) == 0) { 693 if (nf_ct_expect_related(exp) == 0) {
705 DEBUGP("nf_ct_q931: expect H.245 "); 694 pr_debug("nf_ct_q931: expect H.245 ");
706 NF_CT_DUMP_TUPLE(&exp->tuple); 695 NF_CT_DUMP_TUPLE(&exp->tuple);
707 } else 696 } else
708 ret = -1; 697 ret = -1;
709 } 698 }
710 699
711 nf_conntrack_expect_put(exp); 700 nf_ct_expect_put(exp);
712 701
713 return ret; 702 return ret;
714} 703}
@@ -791,16 +780,16 @@ static int expect_callforwarding(struct sk_buff **pskb,
791 if (callforward_filter && 780 if (callforward_filter &&
792 callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, 781 callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3,
793 ct->tuplehash[!dir].tuple.src.l3num)) { 782 ct->tuplehash[!dir].tuple.src.l3num)) {
794 DEBUGP("nf_ct_q931: Call Forwarding not tracked\n"); 783 pr_debug("nf_ct_q931: Call Forwarding not tracked\n");
795 return 0; 784 return 0;
796 } 785 }
797 786
798 /* Create expect for the second call leg */ 787 /* Create expect for the second call leg */
799 if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) 788 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
800 return -1; 789 return -1;
801 nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 790 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
802 &ct->tuplehash[!dir].tuple.src.u3, &addr, 791 &ct->tuplehash[!dir].tuple.src.u3, &addr,
803 IPPROTO_TCP, NULL, &port); 792 IPPROTO_TCP, NULL, &port);
804 exp->helper = nf_conntrack_helper_q931; 793 exp->helper = nf_conntrack_helper_q931;
805 794
806 if (memcmp(&ct->tuplehash[dir].tuple.src.u3, 795 if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
@@ -812,14 +801,14 @@ static int expect_callforwarding(struct sk_buff **pskb,
812 ret = nat_callforwarding(pskb, ct, ctinfo, data, dataoff, 801 ret = nat_callforwarding(pskb, ct, ctinfo, data, dataoff,
813 taddr, port, exp); 802 taddr, port, exp);
814 } else { /* Conntrack only */ 803 } else { /* Conntrack only */
815 if (nf_conntrack_expect_related(exp) == 0) { 804 if (nf_ct_expect_related(exp) == 0) {
816 DEBUGP("nf_ct_q931: expect Call Forwarding "); 805 pr_debug("nf_ct_q931: expect Call Forwarding ");
817 NF_CT_DUMP_TUPLE(&exp->tuple); 806 NF_CT_DUMP_TUPLE(&exp->tuple);
818 } else 807 } else
819 ret = -1; 808 ret = -1;
820 } 809 }
821 810
822 nf_conntrack_expect_put(exp); 811 nf_ct_expect_put(exp);
823 812
824 return ret; 813 return ret;
825} 814}
@@ -837,7 +826,7 @@ static int process_setup(struct sk_buff **pskb, struct nf_conn *ct,
837 union nf_conntrack_address addr; 826 union nf_conntrack_address addr;
838 typeof(set_h225_addr_hook) set_h225_addr; 827 typeof(set_h225_addr_hook) set_h225_addr;
839 828
840 DEBUGP("nf_ct_q931: Setup\n"); 829 pr_debug("nf_ct_q931: Setup\n");
841 830
842 if (setup->options & eSetup_UUIE_h245Address) { 831 if (setup->options & eSetup_UUIE_h245Address) {
843 ret = expect_h245(pskb, ct, ctinfo, data, dataoff, 832 ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
@@ -852,11 +841,11 @@ static int process_setup(struct sk_buff **pskb, struct nf_conn *ct,
852 get_h225_addr(ct, *data, &setup->destCallSignalAddress, 841 get_h225_addr(ct, *data, &setup->destCallSignalAddress,
853 &addr, &port) && 842 &addr, &port) &&
854 memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { 843 memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
855 DEBUGP("nf_ct_q931: set destCallSignalAddress " 844 pr_debug("nf_ct_q931: set destCallSignalAddress "
856 NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n", 845 NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n",
857 NIP6(*(struct in6_addr *)&addr), ntohs(port), 846 NIP6(*(struct in6_addr *)&addr), ntohs(port),
858 NIP6(*(struct in6_addr *)&ct->tuplehash[!dir].tuple.src.u3), 847 NIP6(*(struct in6_addr *)&ct->tuplehash[!dir].tuple.src.u3),
859 ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); 848 ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
860 ret = set_h225_addr(pskb, data, dataoff, 849 ret = set_h225_addr(pskb, data, dataoff,
861 &setup->destCallSignalAddress, 850 &setup->destCallSignalAddress,
862 &ct->tuplehash[!dir].tuple.src.u3, 851 &ct->tuplehash[!dir].tuple.src.u3,
@@ -870,11 +859,11 @@ static int process_setup(struct sk_buff **pskb, struct nf_conn *ct,
870 get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, 859 get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
871 &addr, &port) && 860 &addr, &port) &&
872 memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { 861 memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
873 DEBUGP("nf_ct_q931: set sourceCallSignalAddress " 862 pr_debug("nf_ct_q931: set sourceCallSignalAddress "
874 NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n", 863 NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n",
875 NIP6(*(struct in6_addr *)&addr), ntohs(port), 864 NIP6(*(struct in6_addr *)&addr), ntohs(port),
876 NIP6(*(struct in6_addr *)&ct->tuplehash[!dir].tuple.dst.u3), 865 NIP6(*(struct in6_addr *)&ct->tuplehash[!dir].tuple.dst.u3),
877 ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); 866 ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
878 ret = set_h225_addr(pskb, data, dataoff, 867 ret = set_h225_addr(pskb, data, dataoff,
879 &setup->sourceCallSignalAddress, 868 &setup->sourceCallSignalAddress,
880 &ct->tuplehash[!dir].tuple.dst.u3, 869 &ct->tuplehash[!dir].tuple.dst.u3,
@@ -905,7 +894,7 @@ static int process_callproceeding(struct sk_buff **pskb,
905 int ret; 894 int ret;
906 int i; 895 int i;
907 896
908 DEBUGP("nf_ct_q931: CallProceeding\n"); 897 pr_debug("nf_ct_q931: CallProceeding\n");
909 898
910 if (callproc->options & eCallProceeding_UUIE_h245Address) { 899 if (callproc->options & eCallProceeding_UUIE_h245Address) {
911 ret = expect_h245(pskb, ct, ctinfo, data, dataoff, 900 ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
@@ -935,7 +924,7 @@ static int process_connect(struct sk_buff **pskb, struct nf_conn *ct,
935 int ret; 924 int ret;
936 int i; 925 int i;
937 926
938 DEBUGP("nf_ct_q931: Connect\n"); 927 pr_debug("nf_ct_q931: Connect\n");
939 928
940 if (connect->options & eConnect_UUIE_h245Address) { 929 if (connect->options & eConnect_UUIE_h245Address) {
941 ret = expect_h245(pskb, ct, ctinfo, data, dataoff, 930 ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
@@ -965,7 +954,7 @@ static int process_alerting(struct sk_buff **pskb, struct nf_conn *ct,
965 int ret; 954 int ret;
966 int i; 955 int i;
967 956
968 DEBUGP("nf_ct_q931: Alerting\n"); 957 pr_debug("nf_ct_q931: Alerting\n");
969 958
970 if (alert->options & eAlerting_UUIE_h245Address) { 959 if (alert->options & eAlerting_UUIE_h245Address) {
971 ret = expect_h245(pskb, ct, ctinfo, data, dataoff, 960 ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
@@ -995,7 +984,7 @@ static int process_facility(struct sk_buff **pskb, struct nf_conn *ct,
995 int ret; 984 int ret;
996 int i; 985 int i;
997 986
998 DEBUGP("nf_ct_q931: Facility\n"); 987 pr_debug("nf_ct_q931: Facility\n");
999 988
1000 if (facility->reason.choice == eFacilityReason_callForwarded) { 989 if (facility->reason.choice == eFacilityReason_callForwarded) {
1001 if (facility->options & eFacility_UUIE_alternativeAddress) 990 if (facility->options & eFacility_UUIE_alternativeAddress)
@@ -1034,7 +1023,7 @@ static int process_progress(struct sk_buff **pskb, struct nf_conn *ct,
1034 int ret; 1023 int ret;
1035 int i; 1024 int i;
1036 1025
1037 DEBUGP("nf_ct_q931: Progress\n"); 1026 pr_debug("nf_ct_q931: Progress\n");
1038 1027
1039 if (progress->options & eProgress_UUIE_h245Address) { 1028 if (progress->options & eProgress_UUIE_h245Address) {
1040 ret = expect_h245(pskb, ct, ctinfo, data, dataoff, 1029 ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
@@ -1091,8 +1080,8 @@ static int process_q931(struct sk_buff **pskb, struct nf_conn *ct,
1091 &pdu->h323_message_body.progress); 1080 &pdu->h323_message_body.progress);
1092 break; 1081 break;
1093 default: 1082 default:
1094 DEBUGP("nf_ct_q931: Q.931 signal %d\n", 1083 pr_debug("nf_ct_q931: Q.931 signal %d\n",
1095 pdu->h323_message_body.choice); 1084 pdu->h323_message_body.choice);
1096 break; 1085 break;
1097 } 1086 }
1098 1087
@@ -1126,23 +1115,22 @@ static int q931_help(struct sk_buff **pskb, unsigned int protoff,
1126 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { 1115 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
1127 return NF_ACCEPT; 1116 return NF_ACCEPT;
1128 } 1117 }
1129 DEBUGP("nf_ct_q931: skblen = %u\n", (*pskb)->len); 1118 pr_debug("nf_ct_q931: skblen = %u\n", (*pskb)->len);
1130 1119
1131 spin_lock_bh(&nf_h323_lock); 1120 spin_lock_bh(&nf_h323_lock);
1132 1121
1133 /* Process each TPKT */ 1122 /* Process each TPKT */
1134 while (get_tpkt_data(pskb, protoff, ct, ctinfo, 1123 while (get_tpkt_data(pskb, protoff, ct, ctinfo,
1135 &data, &datalen, &dataoff)) { 1124 &data, &datalen, &dataoff)) {
1136 DEBUGP("nf_ct_q931: TPKT len=%d ", datalen); 1125 pr_debug("nf_ct_q931: TPKT len=%d ", datalen);
1137 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); 1126 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
1138 1127
1139 /* Decode Q.931 signal */ 1128 /* Decode Q.931 signal */
1140 ret = DecodeQ931(data, datalen, &q931); 1129 ret = DecodeQ931(data, datalen, &q931);
1141 if (ret < 0) { 1130 if (ret < 0) {
1142 if (net_ratelimit()) 1131 pr_debug("nf_ct_q931: decoding error: %s\n",
1143 printk("nf_ct_q931: decoding error: %s\n", 1132 ret == H323_ERROR_BOUND ?
1144 ret == H323_ERROR_BOUND ? 1133 "out of bound" : "out of range");
1145 "out of bound" : "out of range");
1146 /* We don't drop when decoding error */ 1134 /* We don't drop when decoding error */
1147 break; 1135 break;
1148 } 1136 }
@@ -1173,9 +1161,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
1173 .tuple.src.l3num = AF_INET, 1161 .tuple.src.l3num = AF_INET,
1174 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), 1162 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT),
1175 .tuple.dst.protonum = IPPROTO_TCP, 1163 .tuple.dst.protonum = IPPROTO_TCP,
1176 .mask.src.l3num = 0xFFFF,
1177 .mask.src.u.tcp.port = __constant_htons(0xFFFF),
1178 .mask.dst.protonum = 0xFF,
1179 .help = q931_help 1164 .help = q931_help
1180 }, 1165 },
1181 { 1166 {
@@ -1187,9 +1172,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
1187 .tuple.src.l3num = AF_INET6, 1172 .tuple.src.l3num = AF_INET6,
1188 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), 1173 .tuple.src.u.tcp.port = __constant_htons(Q931_PORT),
1189 .tuple.dst.protonum = IPPROTO_TCP, 1174 .tuple.dst.protonum = IPPROTO_TCP,
1190 .mask.src.l3num = 0xFFFF,
1191 .mask.src.u.tcp.port = __constant_htons(0xFFFF),
1192 .mask.dst.protonum = 0xFF,
1193 .help = q931_help 1175 .help = q931_help
1194 }, 1176 },
1195}; 1177};
@@ -1225,7 +1207,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
1225 tuple.dst.u.tcp.port = port; 1207 tuple.dst.u.tcp.port = port;
1226 tuple.dst.protonum = IPPROTO_TCP; 1208 tuple.dst.protonum = IPPROTO_TCP;
1227 1209
1228 exp = __nf_conntrack_expect_find(&tuple); 1210 exp = __nf_ct_expect_find(&tuple);
1229 if (exp && exp->master == ct) 1211 if (exp && exp->master == ct)
1230 return exp; 1212 return exp;
1231 return NULL; 1213 return NULL;
@@ -1271,14 +1253,13 @@ static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct,
1271 return 0; 1253 return 0;
1272 1254
1273 /* Create expect for Q.931 */ 1255 /* Create expect for Q.931 */
1274 if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) 1256 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
1275 return -1; 1257 return -1;
1276 nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 1258 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
1277 gkrouted_only ? /* only accept calls from GK? */ 1259 gkrouted_only ? /* only accept calls from GK? */
1278 &ct->tuplehash[!dir].tuple.src.u3 : 1260 &ct->tuplehash[!dir].tuple.src.u3 : NULL,
1279 NULL, 1261 &ct->tuplehash[!dir].tuple.dst.u3,
1280 &ct->tuplehash[!dir].tuple.dst.u3, 1262 IPPROTO_TCP, NULL, &port);
1281 IPPROTO_TCP, NULL, &port);
1282 exp->helper = nf_conntrack_helper_q931; 1263 exp->helper = nf_conntrack_helper_q931;
1283 exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */ 1264 exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */
1284 1265
@@ -1286,8 +1267,8 @@ static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct,
1286 if (nat_q931 && ct->status & IPS_NAT_MASK) { /* Need NAT */ 1267 if (nat_q931 && ct->status & IPS_NAT_MASK) { /* Need NAT */
1287 ret = nat_q931(pskb, ct, ctinfo, data, taddr, i, port, exp); 1268 ret = nat_q931(pskb, ct, ctinfo, data, taddr, i, port, exp);
1288 } else { /* Conntrack only */ 1269 } else { /* Conntrack only */
1289 if (nf_conntrack_expect_related(exp) == 0) { 1270 if (nf_ct_expect_related(exp) == 0) {
1290 DEBUGP("nf_ct_ras: expect Q.931 "); 1271 pr_debug("nf_ct_ras: expect Q.931 ");
1291 NF_CT_DUMP_TUPLE(&exp->tuple); 1272 NF_CT_DUMP_TUPLE(&exp->tuple);
1292 1273
1293 /* Save port for looking up expect in processing RCF */ 1274 /* Save port for looking up expect in processing RCF */
@@ -1296,7 +1277,7 @@ static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct,
1296 ret = -1; 1277 ret = -1;
1297 } 1278 }
1298 1279
1299 nf_conntrack_expect_put(exp); 1280 nf_ct_expect_put(exp);
1300 1281
1301 return ret; 1282 return ret;
1302} 1283}
@@ -1308,7 +1289,7 @@ static int process_grq(struct sk_buff **pskb, struct nf_conn *ct,
1308{ 1289{
1309 typeof(set_ras_addr_hook) set_ras_addr; 1290 typeof(set_ras_addr_hook) set_ras_addr;
1310 1291
1311 DEBUGP("nf_ct_ras: GRQ\n"); 1292 pr_debug("nf_ct_ras: GRQ\n");
1312 1293
1313 set_ras_addr = rcu_dereference(set_ras_addr_hook); 1294 set_ras_addr = rcu_dereference(set_ras_addr_hook);
1314 if (set_ras_addr && ct->status & IPS_NAT_MASK) /* NATed */ 1295 if (set_ras_addr && ct->status & IPS_NAT_MASK) /* NATed */
@@ -1328,7 +1309,7 @@ static int process_gcf(struct sk_buff **pskb, struct nf_conn *ct,
1328 union nf_conntrack_address addr; 1309 union nf_conntrack_address addr;
1329 struct nf_conntrack_expect *exp; 1310 struct nf_conntrack_expect *exp;
1330 1311
1331 DEBUGP("nf_ct_ras: GCF\n"); 1312 pr_debug("nf_ct_ras: GCF\n");
1332 1313
1333 if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port)) 1314 if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port))
1334 return 0; 1315 return 0;
@@ -1343,20 +1324,20 @@ static int process_gcf(struct sk_buff **pskb, struct nf_conn *ct,
1343 return 0; 1324 return 0;
1344 1325
1345 /* Need new expect */ 1326 /* Need new expect */
1346 if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) 1327 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
1347 return -1; 1328 return -1;
1348 nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 1329 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
1349 &ct->tuplehash[!dir].tuple.src.u3, &addr, 1330 &ct->tuplehash[!dir].tuple.src.u3, &addr,
1350 IPPROTO_UDP, NULL, &port); 1331 IPPROTO_UDP, NULL, &port);
1351 exp->helper = nf_conntrack_helper_ras; 1332 exp->helper = nf_conntrack_helper_ras;
1352 1333
1353 if (nf_conntrack_expect_related(exp) == 0) { 1334 if (nf_ct_expect_related(exp) == 0) {
1354 DEBUGP("nf_ct_ras: expect RAS "); 1335 pr_debug("nf_ct_ras: expect RAS ");
1355 NF_CT_DUMP_TUPLE(&exp->tuple); 1336 NF_CT_DUMP_TUPLE(&exp->tuple);
1356 } else 1337 } else
1357 ret = -1; 1338 ret = -1;
1358 1339
1359 nf_conntrack_expect_put(exp); 1340 nf_ct_expect_put(exp);
1360 1341
1361 return ret; 1342 return ret;
1362} 1343}
@@ -1370,7 +1351,7 @@ static int process_rrq(struct sk_buff **pskb, struct nf_conn *ct,
1370 int ret; 1351 int ret;
1371 typeof(set_ras_addr_hook) set_ras_addr; 1352 typeof(set_ras_addr_hook) set_ras_addr;
1372 1353
1373 DEBUGP("nf_ct_ras: RRQ\n"); 1354 pr_debug("nf_ct_ras: RRQ\n");
1374 1355
1375 ret = expect_q931(pskb, ct, ctinfo, data, 1356 ret = expect_q931(pskb, ct, ctinfo, data,
1376 rrq->callSignalAddress.item, 1357 rrq->callSignalAddress.item,
@@ -1388,7 +1369,7 @@ static int process_rrq(struct sk_buff **pskb, struct nf_conn *ct,
1388 } 1369 }
1389 1370
1390 if (rrq->options & eRegistrationRequest_timeToLive) { 1371 if (rrq->options & eRegistrationRequest_timeToLive) {
1391 DEBUGP("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive); 1372 pr_debug("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive);
1392 info->timeout = rrq->timeToLive; 1373 info->timeout = rrq->timeToLive;
1393 } else 1374 } else
1394 info->timeout = default_rrq_ttl; 1375 info->timeout = default_rrq_ttl;
@@ -1407,7 +1388,7 @@ static int process_rcf(struct sk_buff **pskb, struct nf_conn *ct,
1407 struct nf_conntrack_expect *exp; 1388 struct nf_conntrack_expect *exp;
1408 typeof(set_sig_addr_hook) set_sig_addr; 1389 typeof(set_sig_addr_hook) set_sig_addr;
1409 1390
1410 DEBUGP("nf_ct_ras: RCF\n"); 1391 pr_debug("nf_ct_ras: RCF\n");
1411 1392
1412 set_sig_addr = rcu_dereference(set_sig_addr_hook); 1393 set_sig_addr = rcu_dereference(set_sig_addr_hook);
1413 if (set_sig_addr && ct->status & IPS_NAT_MASK) { 1394 if (set_sig_addr && ct->status & IPS_NAT_MASK) {
@@ -1419,14 +1400,13 @@ static int process_rcf(struct sk_buff **pskb, struct nf_conn *ct,
1419 } 1400 }
1420 1401
1421 if (rcf->options & eRegistrationConfirm_timeToLive) { 1402 if (rcf->options & eRegistrationConfirm_timeToLive) {
1422 DEBUGP("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive); 1403 pr_debug("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive);
1423 info->timeout = rcf->timeToLive; 1404 info->timeout = rcf->timeToLive;
1424 } 1405 }
1425 1406
1426 if (info->timeout > 0) { 1407 if (info->timeout > 0) {
1427 DEBUGP 1408 pr_debug("nf_ct_ras: set RAS connection timeout to "
1428 ("nf_ct_ras: set RAS connection timeout to %u seconds\n", 1409 "%u seconds\n", info->timeout);
1429 info->timeout);
1430 nf_ct_refresh(ct, *pskb, info->timeout * HZ); 1410 nf_ct_refresh(ct, *pskb, info->timeout * HZ);
1431 1411
1432 /* Set expect timeout */ 1412 /* Set expect timeout */
@@ -1434,9 +1414,9 @@ static int process_rcf(struct sk_buff **pskb, struct nf_conn *ct,
1434 exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, 1414 exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3,
1435 info->sig_port[!dir]); 1415 info->sig_port[!dir]);
1436 if (exp) { 1416 if (exp) {
1437 DEBUGP("nf_ct_ras: set Q.931 expect " 1417 pr_debug("nf_ct_ras: set Q.931 expect "
1438 "timeout to %u seconds for", 1418 "timeout to %u seconds for",
1439 info->timeout); 1419 info->timeout);
1440 NF_CT_DUMP_TUPLE(&exp->tuple); 1420 NF_CT_DUMP_TUPLE(&exp->tuple);
1441 set_expect_timeout(exp, info->timeout); 1421 set_expect_timeout(exp, info->timeout);
1442 } 1422 }
@@ -1456,7 +1436,7 @@ static int process_urq(struct sk_buff **pskb, struct nf_conn *ct,
1456 int ret; 1436 int ret;
1457 typeof(set_sig_addr_hook) set_sig_addr; 1437 typeof(set_sig_addr_hook) set_sig_addr;
1458 1438
1459 DEBUGP("nf_ct_ras: URQ\n"); 1439 pr_debug("nf_ct_ras: URQ\n");
1460 1440
1461 set_sig_addr = rcu_dereference(set_sig_addr_hook); 1441 set_sig_addr = rcu_dereference(set_sig_addr_hook);
1462 if (set_sig_addr && ct->status & IPS_NAT_MASK) { 1442 if (set_sig_addr && ct->status & IPS_NAT_MASK) {
@@ -1489,7 +1469,7 @@ static int process_arq(struct sk_buff **pskb, struct nf_conn *ct,
1489 union nf_conntrack_address addr; 1469 union nf_conntrack_address addr;
1490 typeof(set_h225_addr_hook) set_h225_addr; 1470 typeof(set_h225_addr_hook) set_h225_addr;
1491 1471
1492 DEBUGP("nf_ct_ras: ARQ\n"); 1472 pr_debug("nf_ct_ras: ARQ\n");
1493 1473
1494 set_h225_addr = rcu_dereference(set_h225_addr_hook); 1474 set_h225_addr = rcu_dereference(set_h225_addr_hook);
1495 if ((arq->options & eAdmissionRequest_destCallSignalAddress) && 1475 if ((arq->options & eAdmissionRequest_destCallSignalAddress) &&
@@ -1532,7 +1512,7 @@ static int process_acf(struct sk_buff **pskb, struct nf_conn *ct,
1532 struct nf_conntrack_expect *exp; 1512 struct nf_conntrack_expect *exp;
1533 typeof(set_sig_addr_hook) set_sig_addr; 1513 typeof(set_sig_addr_hook) set_sig_addr;
1534 1514
1535 DEBUGP("nf_ct_ras: ACF\n"); 1515 pr_debug("nf_ct_ras: ACF\n");
1536 1516
1537 if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress, 1517 if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress,
1538 &addr, &port)) 1518 &addr, &port))
@@ -1548,21 +1528,21 @@ static int process_acf(struct sk_buff **pskb, struct nf_conn *ct,
1548 } 1528 }
1549 1529
1550 /* Need new expect */ 1530 /* Need new expect */
1551 if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) 1531 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
1552 return -1; 1532 return -1;
1553 nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 1533 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
1554 &ct->tuplehash[!dir].tuple.src.u3, &addr, 1534 &ct->tuplehash[!dir].tuple.src.u3, &addr,
1555 IPPROTO_TCP, NULL, &port); 1535 IPPROTO_TCP, NULL, &port);
1556 exp->flags = NF_CT_EXPECT_PERMANENT; 1536 exp->flags = NF_CT_EXPECT_PERMANENT;
1557 exp->helper = nf_conntrack_helper_q931; 1537 exp->helper = nf_conntrack_helper_q931;
1558 1538
1559 if (nf_conntrack_expect_related(exp) == 0) { 1539 if (nf_ct_expect_related(exp) == 0) {
1560 DEBUGP("nf_ct_ras: expect Q.931 "); 1540 pr_debug("nf_ct_ras: expect Q.931 ");
1561 NF_CT_DUMP_TUPLE(&exp->tuple); 1541 NF_CT_DUMP_TUPLE(&exp->tuple);
1562 } else 1542 } else
1563 ret = -1; 1543 ret = -1;
1564 1544
1565 nf_conntrack_expect_put(exp); 1545 nf_ct_expect_put(exp);
1566 1546
1567 return ret; 1547 return ret;
1568} 1548}
@@ -1574,7 +1554,7 @@ static int process_lrq(struct sk_buff **pskb, struct nf_conn *ct,
1574{ 1554{
1575 typeof(set_ras_addr_hook) set_ras_addr; 1555 typeof(set_ras_addr_hook) set_ras_addr;
1576 1556
1577 DEBUGP("nf_ct_ras: LRQ\n"); 1557 pr_debug("nf_ct_ras: LRQ\n");
1578 1558
1579 set_ras_addr = rcu_dereference(set_ras_addr_hook); 1559 set_ras_addr = rcu_dereference(set_ras_addr_hook);
1580 if (set_ras_addr && ct->status & IPS_NAT_MASK) 1560 if (set_ras_addr && ct->status & IPS_NAT_MASK)
@@ -1594,28 +1574,28 @@ static int process_lcf(struct sk_buff **pskb, struct nf_conn *ct,
1594 union nf_conntrack_address addr; 1574 union nf_conntrack_address addr;
1595 struct nf_conntrack_expect *exp; 1575 struct nf_conntrack_expect *exp;
1596 1576
1597 DEBUGP("nf_ct_ras: LCF\n"); 1577 pr_debug("nf_ct_ras: LCF\n");
1598 1578
1599 if (!get_h225_addr(ct, *data, &lcf->callSignalAddress, 1579 if (!get_h225_addr(ct, *data, &lcf->callSignalAddress,
1600 &addr, &port)) 1580 &addr, &port))
1601 return 0; 1581 return 0;
1602 1582
1603 /* Need new expect for call signal */ 1583 /* Need new expect for call signal */
1604 if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) 1584 if ((exp = nf_ct_expect_alloc(ct)) == NULL)
1605 return -1; 1585 return -1;
1606 nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, 1586 nf_ct_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
1607 &ct->tuplehash[!dir].tuple.src.u3, &addr, 1587 &ct->tuplehash[!dir].tuple.src.u3, &addr,
1608 IPPROTO_TCP, NULL, &port); 1588 IPPROTO_TCP, NULL, &port);
1609 exp->flags = NF_CT_EXPECT_PERMANENT; 1589 exp->flags = NF_CT_EXPECT_PERMANENT;
1610 exp->helper = nf_conntrack_helper_q931; 1590 exp->helper = nf_conntrack_helper_q931;
1611 1591
1612 if (nf_conntrack_expect_related(exp) == 0) { 1592 if (nf_ct_expect_related(exp) == 0) {
1613 DEBUGP("nf_ct_ras: expect Q.931 "); 1593 pr_debug("nf_ct_ras: expect Q.931 ");
1614 NF_CT_DUMP_TUPLE(&exp->tuple); 1594 NF_CT_DUMP_TUPLE(&exp->tuple);
1615 } else 1595 } else
1616 ret = -1; 1596 ret = -1;
1617 1597
1618 nf_conntrack_expect_put(exp); 1598 nf_ct_expect_put(exp);
1619 1599
1620 /* Ignore rasAddress */ 1600 /* Ignore rasAddress */
1621 1601
@@ -1631,7 +1611,7 @@ static int process_irr(struct sk_buff **pskb, struct nf_conn *ct,
1631 typeof(set_ras_addr_hook) set_ras_addr; 1611 typeof(set_ras_addr_hook) set_ras_addr;
1632 typeof(set_sig_addr_hook) set_sig_addr; 1612 typeof(set_sig_addr_hook) set_sig_addr;
1633 1613
1634 DEBUGP("nf_ct_ras: IRR\n"); 1614 pr_debug("nf_ct_ras: IRR\n");
1635 1615
1636 set_ras_addr = rcu_dereference(set_ras_addr_hook); 1616 set_ras_addr = rcu_dereference(set_ras_addr_hook);
1637 if (set_ras_addr && ct->status & IPS_NAT_MASK) { 1617 if (set_ras_addr && ct->status & IPS_NAT_MASK) {
@@ -1690,7 +1670,7 @@ static int process_ras(struct sk_buff **pskb, struct nf_conn *ct,
1690 return process_irr(pskb, ct, ctinfo, data, 1670 return process_irr(pskb, ct, ctinfo, data,
1691 &ras->infoRequestResponse); 1671 &ras->infoRequestResponse);
1692 default: 1672 default:
1693 DEBUGP("nf_ct_ras: RAS message %d\n", ras->choice); 1673 pr_debug("nf_ct_ras: RAS message %d\n", ras->choice);
1694 break; 1674 break;
1695 } 1675 }
1696 1676
@@ -1706,7 +1686,7 @@ static int ras_help(struct sk_buff **pskb, unsigned int protoff,
1706 int datalen = 0; 1686 int datalen = 0;
1707 int ret; 1687 int ret;
1708 1688
1709 DEBUGP("nf_ct_ras: skblen = %u\n", (*pskb)->len); 1689 pr_debug("nf_ct_ras: skblen = %u\n", (*pskb)->len);
1710 1690
1711 spin_lock_bh(&nf_h323_lock); 1691 spin_lock_bh(&nf_h323_lock);
1712 1692
@@ -1714,16 +1694,15 @@ static int ras_help(struct sk_buff **pskb, unsigned int protoff,
1714 data = get_udp_data(pskb, protoff, &datalen); 1694 data = get_udp_data(pskb, protoff, &datalen);
1715 if (data == NULL) 1695 if (data == NULL)
1716 goto accept; 1696 goto accept;
1717 DEBUGP("nf_ct_ras: RAS message len=%d ", datalen); 1697 pr_debug("nf_ct_ras: RAS message len=%d ", datalen);
1718 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); 1698 NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
1719 1699
1720 /* Decode RAS message */ 1700 /* Decode RAS message */
1721 ret = DecodeRasMessage(data, datalen, &ras); 1701 ret = DecodeRasMessage(data, datalen, &ras);
1722 if (ret < 0) { 1702 if (ret < 0) {
1723 if (net_ratelimit()) 1703 pr_debug("nf_ct_ras: decoding error: %s\n",
1724 printk("nf_ct_ras: decoding error: %s\n", 1704 ret == H323_ERROR_BOUND ?
1725 ret == H323_ERROR_BOUND ? 1705 "out of bound" : "out of range");
1726 "out of bound" : "out of range");
1727 goto accept; 1706 goto accept;
1728 } 1707 }
1729 1708
@@ -1752,9 +1731,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1752 .tuple.src.l3num = AF_INET, 1731 .tuple.src.l3num = AF_INET,
1753 .tuple.src.u.udp.port = __constant_htons(RAS_PORT), 1732 .tuple.src.u.udp.port = __constant_htons(RAS_PORT),
1754 .tuple.dst.protonum = IPPROTO_UDP, 1733 .tuple.dst.protonum = IPPROTO_UDP,
1755 .mask.src.l3num = 0xFFFF,
1756 .mask.src.u.udp.port = __constant_htons(0xFFFF),
1757 .mask.dst.protonum = 0xFF,
1758 .help = ras_help, 1734 .help = ras_help,
1759 }, 1735 },
1760 { 1736 {
@@ -1765,9 +1741,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
1765 .tuple.src.l3num = AF_INET6, 1741 .tuple.src.l3num = AF_INET6,
1766 .tuple.src.u.udp.port = __constant_htons(RAS_PORT), 1742 .tuple.src.u.udp.port = __constant_htons(RAS_PORT),
1767 .tuple.dst.protonum = IPPROTO_UDP, 1743 .tuple.dst.protonum = IPPROTO_UDP,
1768 .mask.src.l3num = 0xFFFF,
1769 .mask.src.u.udp.port = __constant_htons(0xFFFF),
1770 .mask.dst.protonum = 0xFF,
1771 .help = ras_help, 1744 .help = ras_help,
1772 }, 1745 },
1773}; 1746};
@@ -1780,7 +1753,7 @@ static void __exit nf_conntrack_h323_fini(void)
1780 nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); 1753 nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]);
1781 nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); 1754 nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]);
1782 kfree(h323_buffer); 1755 kfree(h323_buffer);
1783 DEBUGP("nf_ct_h323: fini\n"); 1756 pr_debug("nf_ct_h323: fini\n");
1784} 1757}
1785 1758
1786/****************************************************************************/ 1759/****************************************************************************/
@@ -1803,7 +1776,7 @@ static int __init nf_conntrack_h323_init(void)
1803 ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]); 1776 ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]);
1804 if (ret < 0) 1777 if (ret < 0)
1805 goto err4; 1778 goto err4;
1806 DEBUGP("nf_ct_h323: init success\n"); 1779 pr_debug("nf_ct_h323: init success\n");
1807 return 0; 1780 return 0;
1808 1781
1809err4: 1782err4:
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index f868b7fbd9b4..b1179dd3d8c3 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -26,23 +26,43 @@
26#include <net/netfilter/nf_conntrack_l4proto.h> 26#include <net/netfilter/nf_conntrack_l4proto.h>
27#include <net/netfilter/nf_conntrack_helper.h> 27#include <net/netfilter/nf_conntrack_helper.h>
28#include <net/netfilter/nf_conntrack_core.h> 28#include <net/netfilter/nf_conntrack_core.h>
29#include <net/netfilter/nf_conntrack_extend.h>
29 30
30static __read_mostly LIST_HEAD(helpers); 31static struct hlist_head *nf_ct_helper_hash __read_mostly;
32static unsigned int nf_ct_helper_hsize __read_mostly;
33static unsigned int nf_ct_helper_count __read_mostly;
34static int nf_ct_helper_vmalloc;
35
36
37/* Stupid hash, but collision free for the default registrations of the
38 * helpers currently in the kernel. */
39static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
40{
41 return (((tuple->src.l3num << 8) | tuple->dst.protonum) ^
42 tuple->src.u.all) % nf_ct_helper_hsize;
43}
31 44
32struct nf_conntrack_helper * 45struct nf_conntrack_helper *
33__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) 46__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
34{ 47{
35 struct nf_conntrack_helper *h; 48 struct nf_conntrack_helper *helper;
49 struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
50 struct hlist_node *n;
51 unsigned int h;
52
53 if (!nf_ct_helper_count)
54 return NULL;
36 55
37 list_for_each_entry(h, &helpers, list) { 56 h = helper_hash(tuple);
38 if (nf_ct_tuple_mask_cmp(tuple, &h->tuple, &h->mask)) 57 hlist_for_each_entry(helper, n, &nf_ct_helper_hash[h], hnode) {
39 return h; 58 if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
59 return helper;
40 } 60 }
41 return NULL; 61 return NULL;
42} 62}
43 63
44struct nf_conntrack_helper * 64struct nf_conntrack_helper *
45nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple) 65nf_ct_helper_find_get(const struct nf_conntrack_tuple *tuple)
46{ 66{
47 struct nf_conntrack_helper *helper; 67 struct nf_conntrack_helper *helper;
48 68
@@ -75,16 +95,32 @@ struct nf_conntrack_helper *
75__nf_conntrack_helper_find_byname(const char *name) 95__nf_conntrack_helper_find_byname(const char *name)
76{ 96{
77 struct nf_conntrack_helper *h; 97 struct nf_conntrack_helper *h;
98 struct hlist_node *n;
99 unsigned int i;
78 100
79 list_for_each_entry(h, &helpers, list) { 101 for (i = 0; i < nf_ct_helper_hsize; i++) {
80 if (!strcmp(h->name, name)) 102 hlist_for_each_entry(h, n, &nf_ct_helper_hash[i], hnode) {
81 return h; 103 if (!strcmp(h->name, name))
104 return h;
105 }
82 } 106 }
83
84 return NULL; 107 return NULL;
85} 108}
86EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname); 109EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname);
87 110
111struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
112{
113 struct nf_conn_help *help;
114
115 help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp);
116 if (help)
117 INIT_HLIST_HEAD(&help->expectations);
118 else
119 pr_debug("failed to add helper extension area");
120 return help;
121}
122EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
123
88static inline int unhelp(struct nf_conntrack_tuple_hash *i, 124static inline int unhelp(struct nf_conntrack_tuple_hash *i,
89 const struct nf_conntrack_helper *me) 125 const struct nf_conntrack_helper *me)
90{ 126{
@@ -100,20 +136,13 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
100 136
101int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 137int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
102{ 138{
103 int size, ret; 139 unsigned int h = helper_hash(&me->tuple);
104 140
105 BUG_ON(me->timeout == 0); 141 BUG_ON(me->timeout == 0);
106 142
107 size = ALIGN(sizeof(struct nf_conn), __alignof__(struct nf_conn_help)) +
108 sizeof(struct nf_conn_help);
109 ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help",
110 size);
111 if (ret < 0) {
112 printk(KERN_ERR "nf_conntrack_helper_register: Unable to create slab cache for conntracks\n");
113 return ret;
114 }
115 write_lock_bh(&nf_conntrack_lock); 143 write_lock_bh(&nf_conntrack_lock);
116 list_add(&me->list, &helpers); 144 hlist_add_head(&me->hnode, &nf_ct_helper_hash[h]);
145 nf_ct_helper_count++;
117 write_unlock_bh(&nf_conntrack_lock); 146 write_unlock_bh(&nf_conntrack_lock);
118 147
119 return 0; 148 return 0;
@@ -122,29 +151,34 @@ EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
122 151
123void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) 152void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
124{ 153{
125 unsigned int i;
126 struct nf_conntrack_tuple_hash *h; 154 struct nf_conntrack_tuple_hash *h;
127 struct nf_conntrack_expect *exp, *tmp; 155 struct nf_conntrack_expect *exp;
156 struct hlist_node *n, *next;
157 unsigned int i;
128 158
129 /* Need write lock here, to delete helper. */ 159 /* Need write lock here, to delete helper. */
130 write_lock_bh(&nf_conntrack_lock); 160 write_lock_bh(&nf_conntrack_lock);
131 list_del(&me->list); 161 hlist_del(&me->hnode);
162 nf_ct_helper_count--;
132 163
133 /* Get rid of expectations */ 164 /* Get rid of expectations */
134 list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) { 165 for (i = 0; i < nf_ct_expect_hsize; i++) {
135 struct nf_conn_help *help = nfct_help(exp->master); 166 hlist_for_each_entry_safe(exp, n, next,
136 if ((help->helper == me || exp->helper == me) && 167 &nf_ct_expect_hash[i], hnode) {
137 del_timer(&exp->timeout)) { 168 struct nf_conn_help *help = nfct_help(exp->master);
138 nf_ct_unlink_expect(exp); 169 if ((help->helper == me || exp->helper == me) &&
139 nf_conntrack_expect_put(exp); 170 del_timer(&exp->timeout)) {
171 nf_ct_unlink_expect(exp);
172 nf_ct_expect_put(exp);
173 }
140 } 174 }
141 } 175 }
142 176
143 /* Get rid of expecteds, set helpers to NULL. */ 177 /* Get rid of expecteds, set helpers to NULL. */
144 list_for_each_entry(h, &unconfirmed, list) 178 hlist_for_each_entry(h, n, &unconfirmed, hnode)
145 unhelp(h, me); 179 unhelp(h, me);
146 for (i = 0; i < nf_conntrack_htable_size; i++) { 180 for (i = 0; i < nf_conntrack_htable_size; i++) {
147 list_for_each_entry(h, &nf_conntrack_hash[i], list) 181 hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode)
148 unhelp(h, me); 182 unhelp(h, me);
149 } 183 }
150 write_unlock_bh(&nf_conntrack_lock); 184 write_unlock_bh(&nf_conntrack_lock);
@@ -153,3 +187,38 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
153 synchronize_net(); 187 synchronize_net();
154} 188}
155EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); 189EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
190
191static struct nf_ct_ext_type helper_extend __read_mostly = {
192 .len = sizeof(struct nf_conn_help),
193 .align = __alignof__(struct nf_conn_help),
194 .id = NF_CT_EXT_HELPER,
195};
196
197int nf_conntrack_helper_init()
198{
199 int err;
200
201 nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
202 nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
203 &nf_ct_helper_vmalloc);
204 if (!nf_ct_helper_hash)
205 return -ENOMEM;
206
207 err = nf_ct_extend_register(&helper_extend);
208 if (err < 0)
209 goto err1;
210
211 return 0;
212
213err1:
214 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
215 nf_ct_helper_hsize);
216 return err;
217}
218
219void nf_conntrack_helper_fini()
220{
221 nf_ct_extend_unregister(&helper_extend);
222 nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
223 nf_ct_helper_hsize);
224}
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 43ccd0e2e8ae..1562ca97a349 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -12,6 +12,7 @@
12#include <linux/moduleparam.h> 12#include <linux/moduleparam.h>
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/in.h> 14#include <linux/in.h>
15#include <linux/ip.h>
15#include <linux/tcp.h> 16#include <linux/tcp.h>
16#include <linux/netfilter.h> 17#include <linux/netfilter.h>
17 18
@@ -55,13 +56,6 @@ static const char *dccprotos[] = {
55 56
56#define MINMATCHLEN 5 57#define MINMATCHLEN 5
57 58
58#if 0
59#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \
60 __FILE__, __FUNCTION__ , ## args)
61#else
62#define DEBUGP(format, args...)
63#endif
64
65/* tries to get the ip_addr and port out of a dcc command 59/* tries to get the ip_addr and port out of a dcc command
66 * return value: -1 on failure, 0 on success 60 * return value: -1 on failure, 0 on success
67 * data pointer to first byte of DCC command data 61 * data pointer to first byte of DCC command data
@@ -99,6 +93,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
99 struct nf_conn *ct, enum ip_conntrack_info ctinfo) 93 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
100{ 94{
101 unsigned int dataoff; 95 unsigned int dataoff;
96 struct iphdr *iph;
102 struct tcphdr _tcph, *th; 97 struct tcphdr _tcph, *th;
103 char *data, *data_limit, *ib_ptr; 98 char *data, *data_limit, *ib_ptr;
104 int dir = CTINFO2DIR(ctinfo); 99 int dir = CTINFO2DIR(ctinfo);
@@ -148,9 +143,10 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
148 data += 5; 143 data += 5;
149 /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ 144 /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
150 145
151 DEBUGP("DCC found in master %u.%u.%u.%u:%u %u.%u.%u.%u:%u...\n", 146 iph = ip_hdr(*pskb);
152 NIPQUAD(iph->saddr), ntohs(th->source), 147 pr_debug("DCC found in master %u.%u.%u.%u:%u %u.%u.%u.%u:%u\n",
153 NIPQUAD(iph->daddr), ntohs(th->dest)); 148 NIPQUAD(iph->saddr), ntohs(th->source),
149 NIPQUAD(iph->daddr), ntohs(th->dest));
154 150
155 for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { 151 for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
156 if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) { 152 if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
@@ -158,18 +154,18 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
158 continue; 154 continue;
159 } 155 }
160 data += strlen(dccprotos[i]); 156 data += strlen(dccprotos[i]);
161 DEBUGP("DCC %s detected\n", dccprotos[i]); 157 pr_debug("DCC %s detected\n", dccprotos[i]);
162 158
163 /* we have at least 159 /* we have at least
164 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid 160 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
165 * data left (== 14/13 bytes) */ 161 * data left (== 14/13 bytes) */
166 if (parse_dcc((char *)data, data_limit, &dcc_ip, 162 if (parse_dcc((char *)data, data_limit, &dcc_ip,
167 &dcc_port, &addr_beg_p, &addr_end_p)) { 163 &dcc_port, &addr_beg_p, &addr_end_p)) {
168 DEBUGP("unable to parse dcc command\n"); 164 pr_debug("unable to parse dcc command\n");
169 continue; 165 continue;
170 } 166 }
171 DEBUGP("DCC bound ip/port: %u.%u.%u.%u:%u\n", 167 pr_debug("DCC bound ip/port: %u.%u.%u.%u:%u\n",
172 HIPQUAD(dcc_ip), dcc_port); 168 HIPQUAD(dcc_ip), dcc_port);
173 169
174 /* dcc_ip can be the internal OR external (NAT'ed) IP */ 170 /* dcc_ip can be the internal OR external (NAT'ed) IP */
175 tuple = &ct->tuplehash[dir].tuple; 171 tuple = &ct->tuplehash[dir].tuple;
@@ -184,16 +180,16 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
184 continue; 180 continue;
185 } 181 }
186 182
187 exp = nf_conntrack_expect_alloc(ct); 183 exp = nf_ct_expect_alloc(ct);
188 if (exp == NULL) { 184 if (exp == NULL) {
189 ret = NF_DROP; 185 ret = NF_DROP;
190 goto out; 186 goto out;
191 } 187 }
192 tuple = &ct->tuplehash[!dir].tuple; 188 tuple = &ct->tuplehash[!dir].tuple;
193 port = htons(dcc_port); 189 port = htons(dcc_port);
194 nf_conntrack_expect_init(exp, tuple->src.l3num, 190 nf_ct_expect_init(exp, tuple->src.l3num,
195 NULL, &tuple->dst.u3, 191 NULL, &tuple->dst.u3,
196 IPPROTO_TCP, NULL, &port); 192 IPPROTO_TCP, NULL, &port);
197 193
198 nf_nat_irc = rcu_dereference(nf_nat_irc_hook); 194 nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
199 if (nf_nat_irc && ct->status & IPS_NAT_MASK) 195 if (nf_nat_irc && ct->status & IPS_NAT_MASK)
@@ -201,9 +197,9 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
201 addr_beg_p - ib_ptr, 197 addr_beg_p - ib_ptr,
202 addr_end_p - addr_beg_p, 198 addr_end_p - addr_beg_p,
203 exp); 199 exp);
204 else if (nf_conntrack_expect_related(exp) != 0) 200 else if (nf_ct_expect_related(exp) != 0)
205 ret = NF_DROP; 201 ret = NF_DROP;
206 nf_conntrack_expect_put(exp); 202 nf_ct_expect_put(exp);
207 goto out; 203 goto out;
208 } 204 }
209 } 205 }
@@ -239,9 +235,6 @@ static int __init nf_conntrack_irc_init(void)
239 irc[i].tuple.src.l3num = AF_INET; 235 irc[i].tuple.src.l3num = AF_INET;
240 irc[i].tuple.src.u.tcp.port = htons(ports[i]); 236 irc[i].tuple.src.u.tcp.port = htons(ports[i]);
241 irc[i].tuple.dst.protonum = IPPROTO_TCP; 237 irc[i].tuple.dst.protonum = IPPROTO_TCP;
242 irc[i].mask.src.l3num = 0xFFFF;
243 irc[i].mask.src.u.tcp.port = htons(0xFFFF);
244 irc[i].mask.dst.protonum = 0xFF;
245 irc[i].max_expected = max_dcc_channels; 238 irc[i].max_expected = max_dcc_channels;
246 irc[i].timeout = dcc_timeout; 239 irc[i].timeout = dcc_timeout;
247 irc[i].me = THIS_MODULE; 240 irc[i].me = THIS_MODULE;
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
index cbd96f3c1b89..b1bfa207a850 100644
--- a/net/netfilter/nf_conntrack_l3proto_generic.c
+++ b/net/netfilter/nf_conntrack_l3proto_generic.c
@@ -31,12 +31,6 @@
31#include <net/netfilter/nf_conntrack_core.h> 31#include <net/netfilter/nf_conntrack_core.h>
32#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 32#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
33 33
34#if 0
35#define DEBUGP printk
36#else
37#define DEBUGP(format, args...)
38#endif
39
40static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 34static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
41 struct nf_conntrack_tuple *tuple) 35 struct nf_conntrack_tuple *tuple)
42{ 36{
@@ -76,12 +70,6 @@ generic_prepare(struct sk_buff **pskb, unsigned int hooknum,
76} 70}
77 71
78 72
79static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple)
80
81{
82 return NF_CT_F_BASIC;
83}
84
85struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = { 73struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = {
86 .l3proto = PF_UNSPEC, 74 .l3proto = PF_UNSPEC,
87 .name = "unknown", 75 .name = "unknown",
@@ -90,6 +78,5 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = {
90 .print_tuple = generic_print_tuple, 78 .print_tuple = generic_print_tuple,
91 .print_conntrack = generic_print_conntrack, 79 .print_conntrack = generic_print_conntrack,
92 .prepare = generic_prepare, 80 .prepare = generic_prepare,
93 .get_features = generic_get_features,
94}; 81};
95EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); 82EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic);
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index 1093478cc007..1d59fabeb5f7 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -74,7 +74,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
74 if (mask == 0) 74 if (mask == 0)
75 goto out; 75 goto out;
76 76
77 exp = nf_conntrack_expect_alloc(ct); 77 exp = nf_ct_expect_alloc(ct);
78 if (exp == NULL) 78 if (exp == NULL)
79 goto out; 79 goto out;
80 80
@@ -83,16 +83,13 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
83 83
84 exp->mask.src.u3.ip = mask; 84 exp->mask.src.u3.ip = mask;
85 exp->mask.src.u.udp.port = htons(0xFFFF); 85 exp->mask.src.u.udp.port = htons(0xFFFF);
86 exp->mask.dst.u3.ip = htonl(0xFFFFFFFF);
87 exp->mask.dst.u.udp.port = htons(0xFFFF);
88 exp->mask.dst.protonum = 0xFF;
89 86
90 exp->expectfn = NULL; 87 exp->expectfn = NULL;
91 exp->flags = NF_CT_EXPECT_PERMANENT; 88 exp->flags = NF_CT_EXPECT_PERMANENT;
92 exp->helper = NULL; 89 exp->helper = NULL;
93 90
94 nf_conntrack_expect_related(exp); 91 nf_ct_expect_related(exp);
95 nf_conntrack_expect_put(exp); 92 nf_ct_expect_put(exp);
96 93
97 nf_ct_refresh(ct, *pskb, timeout * HZ); 94 nf_ct_refresh(ct, *pskb, timeout * HZ);
98out: 95out:
@@ -104,9 +101,6 @@ static struct nf_conntrack_helper helper __read_mostly = {
104 .tuple.src.l3num = AF_INET, 101 .tuple.src.l3num = AF_INET,
105 .tuple.src.u.udp.port = __constant_htons(NMBD_PORT), 102 .tuple.src.u.udp.port = __constant_htons(NMBD_PORT),
106 .tuple.dst.protonum = IPPROTO_UDP, 103 .tuple.dst.protonum = IPPROTO_UDP,
107 .mask.src.l3num = 0xFFFF,
108 .mask.src.u.udp.port = __constant_htons(0xFFFF),
109 .mask.dst.protonum = 0xFF,
110 .max_expected = 1, 104 .max_expected = 1,
111 .me = THIS_MODULE, 105 .me = THIS_MODULE,
112 .help = help, 106 .help = help,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d0fe3d769828..6f89b105a205 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -428,7 +428,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
428{ 428{
429 struct nf_conn *ct, *last; 429 struct nf_conn *ct, *last;
430 struct nf_conntrack_tuple_hash *h; 430 struct nf_conntrack_tuple_hash *h;
431 struct list_head *i; 431 struct hlist_node *n;
432 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 432 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
433 u_int8_t l3proto = nfmsg->nfgen_family; 433 u_int8_t l3proto = nfmsg->nfgen_family;
434 434
@@ -436,8 +436,8 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
436 last = (struct nf_conn *)cb->args[1]; 436 last = (struct nf_conn *)cb->args[1];
437 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 437 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
438restart: 438restart:
439 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { 439 hlist_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
440 h = (struct nf_conntrack_tuple_hash *) i; 440 hnode) {
441 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 441 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
442 continue; 442 continue;
443 ct = nf_ct_tuplehash_to_ctrack(h); 443 ct = nf_ct_tuplehash_to_ctrack(h);
@@ -689,7 +689,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
689 if (err < 0) 689 if (err < 0)
690 return err; 690 return err;
691 691
692 h = nf_conntrack_find_get(&tuple, NULL); 692 h = nf_conntrack_find_get(&tuple);
693 if (!h) 693 if (!h)
694 return -ENOENT; 694 return -ENOENT;
695 695
@@ -744,7 +744,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
744 if (err < 0) 744 if (err < 0)
745 return err; 745 return err;
746 746
747 h = nf_conntrack_find_get(&tuple, NULL); 747 h = nf_conntrack_find_get(&tuple);
748 if (!h) 748 if (!h)
749 return -ENOENT; 749 return -ENOENT;
750 750
@@ -856,23 +856,23 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[])
856 return 0; 856 return 0;
857 } 857 }
858 858
859 if (!help) {
860 /* FIXME: we need to reallocate and rehash */
861 return -EBUSY;
862 }
863
864 helper = __nf_conntrack_helper_find_byname(helpname); 859 helper = __nf_conntrack_helper_find_byname(helpname);
865 if (helper == NULL) 860 if (helper == NULL)
866 return -EINVAL; 861 return -EINVAL;
867 862
868 if (help->helper == helper) 863 if (help) {
869 return 0; 864 if (help->helper == helper)
870 865 return 0;
871 if (help->helper) 866 if (help->helper)
872 return -EBUSY; 867 return -EBUSY;
868 /* need to zero data of old helper */
869 memset(&help->help, 0, sizeof(help->help));
870 } else {
871 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
872 if (help == NULL)
873 return -ENOMEM;
874 }
873 875
874 /* need to zero data of old helper */
875 memset(&help->help, 0, sizeof(help->help));
876 rcu_assign_pointer(help->helper, helper); 876 rcu_assign_pointer(help->helper, helper);
877 877
878 return 0; 878 return 0;
@@ -957,7 +957,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
957 struct nf_conn *ct; 957 struct nf_conn *ct;
958 int err = -EINVAL; 958 int err = -EINVAL;
959 struct nf_conn_help *help; 959 struct nf_conn_help *help;
960 struct nf_conntrack_helper *helper = NULL; 960 struct nf_conntrack_helper *helper;
961 961
962 ct = nf_conntrack_alloc(otuple, rtuple); 962 ct = nf_conntrack_alloc(otuple, rtuple);
963 if (ct == NULL || IS_ERR(ct)) 963 if (ct == NULL || IS_ERR(ct))
@@ -987,9 +987,14 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
987 ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1])); 987 ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1]));
988#endif 988#endif
989 989
990 help = nfct_help(ct); 990 helper = nf_ct_helper_find_get(rtuple);
991 if (help) { 991 if (helper) {
992 helper = nf_ct_helper_find_get(rtuple); 992 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
993 if (help == NULL) {
994 nf_ct_helper_put(helper);
995 err = -ENOMEM;
996 goto err;
997 }
993 /* not in hash table yet so not strictly necessary */ 998 /* not in hash table yet so not strictly necessary */
994 rcu_assign_pointer(help->helper, helper); 999 rcu_assign_pointer(help->helper, helper);
995 } 1000 }
@@ -1089,22 +1094,29 @@ nfattr_failure:
1089static inline int 1094static inline int
1090ctnetlink_exp_dump_mask(struct sk_buff *skb, 1095ctnetlink_exp_dump_mask(struct sk_buff *skb,
1091 const struct nf_conntrack_tuple *tuple, 1096 const struct nf_conntrack_tuple *tuple,
1092 const struct nf_conntrack_tuple *mask) 1097 const struct nf_conntrack_tuple_mask *mask)
1093{ 1098{
1094 int ret; 1099 int ret;
1095 struct nf_conntrack_l3proto *l3proto; 1100 struct nf_conntrack_l3proto *l3proto;
1096 struct nf_conntrack_l4proto *l4proto; 1101 struct nf_conntrack_l4proto *l4proto;
1097 struct nfattr *nest_parms = NFA_NEST(skb, CTA_EXPECT_MASK); 1102 struct nf_conntrack_tuple m;
1103 struct nfattr *nest_parms;
1104
1105 memset(&m, 0xFF, sizeof(m));
1106 m.src.u.all = mask->src.u.all;
1107 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
1108
1109 nest_parms = NFA_NEST(skb, CTA_EXPECT_MASK);
1098 1110
1099 l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); 1111 l3proto = nf_ct_l3proto_find_get(tuple->src.l3num);
1100 ret = ctnetlink_dump_tuples_ip(skb, mask, l3proto); 1112 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
1101 nf_ct_l3proto_put(l3proto); 1113 nf_ct_l3proto_put(l3proto);
1102 1114
1103 if (unlikely(ret < 0)) 1115 if (unlikely(ret < 0))
1104 goto nfattr_failure; 1116 goto nfattr_failure;
1105 1117
1106 l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum); 1118 l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum);
1107 ret = ctnetlink_dump_tuples_proto(skb, mask, l4proto); 1119 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
1108 nf_ct_l4proto_put(l4proto); 1120 nf_ct_l4proto_put(l4proto);
1109 if (unlikely(ret < 0)) 1121 if (unlikely(ret < 0))
1110 goto nfattr_failure; 1122 goto nfattr_failure;
@@ -1223,32 +1235,52 @@ nfattr_failure:
1223 return NOTIFY_DONE; 1235 return NOTIFY_DONE;
1224} 1236}
1225#endif 1237#endif
1238static int ctnetlink_exp_done(struct netlink_callback *cb)
1239{
1240 if (cb->args[1])
1241 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
1242 return 0;
1243}
1226 1244
1227static int 1245static int
1228ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 1246ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1229{ 1247{
1230 struct nf_conntrack_expect *exp = NULL; 1248 struct nf_conntrack_expect *exp, *last;
1231 struct list_head *i;
1232 u_int32_t *id = (u_int32_t *) &cb->args[0];
1233 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 1249 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
1250 struct hlist_node *n;
1234 u_int8_t l3proto = nfmsg->nfgen_family; 1251 u_int8_t l3proto = nfmsg->nfgen_family;
1235 1252
1236 read_lock_bh(&nf_conntrack_lock); 1253 read_lock_bh(&nf_conntrack_lock);
1237 list_for_each_prev(i, &nf_conntrack_expect_list) { 1254 last = (struct nf_conntrack_expect *)cb->args[1];
1238 exp = (struct nf_conntrack_expect *) i; 1255 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
1239 if (l3proto && exp->tuple.src.l3num != l3proto) 1256restart:
1240 continue; 1257 hlist_for_each_entry(exp, n, &nf_ct_expect_hash[cb->args[0]],
1241 if (exp->id <= *id) 1258 hnode) {
1242 continue; 1259 if (l3proto && exp->tuple.src.l3num != l3proto)
1243 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).pid, 1260 continue;
1244 cb->nlh->nlmsg_seq, 1261 if (cb->args[1]) {
1245 IPCTNL_MSG_EXP_NEW, 1262 if (exp != last)
1246 1, exp) < 0) 1263 continue;
1247 goto out; 1264 cb->args[1] = 0;
1248 *id = exp->id; 1265 }
1266 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).pid,
1267 cb->nlh->nlmsg_seq,
1268 IPCTNL_MSG_EXP_NEW,
1269 1, exp) < 0) {
1270 atomic_inc(&exp->use);
1271 cb->args[1] = (unsigned long)exp;
1272 goto out;
1273 }
1274 }
1275 if (cb->args[1]) {
1276 cb->args[1] = 0;
1277 goto restart;
1278 }
1249 } 1279 }
1250out: 1280out:
1251 read_unlock_bh(&nf_conntrack_lock); 1281 read_unlock_bh(&nf_conntrack_lock);
1282 if (last)
1283 nf_ct_expect_put(last);
1252 1284
1253 return skb->len; 1285 return skb->len;
1254} 1286}
@@ -1275,7 +1307,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1275 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1307 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1276 return netlink_dump_start(ctnl, skb, nlh, 1308 return netlink_dump_start(ctnl, skb, nlh,
1277 ctnetlink_exp_dump_table, 1309 ctnetlink_exp_dump_table,
1278 ctnetlink_done); 1310 ctnetlink_exp_done);
1279 } 1311 }
1280 1312
1281 if (cda[CTA_EXPECT_MASTER-1]) 1313 if (cda[CTA_EXPECT_MASTER-1])
@@ -1286,14 +1318,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1286 if (err < 0) 1318 if (err < 0)
1287 return err; 1319 return err;
1288 1320
1289 exp = nf_conntrack_expect_find_get(&tuple); 1321 exp = nf_ct_expect_find_get(&tuple);
1290 if (!exp) 1322 if (!exp)
1291 return -ENOENT; 1323 return -ENOENT;
1292 1324
1293 if (cda[CTA_EXPECT_ID-1]) { 1325 if (cda[CTA_EXPECT_ID-1]) {
1294 __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]); 1326 __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
1295 if (exp->id != ntohl(id)) { 1327 if (exp->id != ntohl(id)) {
1296 nf_conntrack_expect_put(exp); 1328 nf_ct_expect_put(exp);
1297 return -ENOENT; 1329 return -ENOENT;
1298 } 1330 }
1299 } 1331 }
@@ -1309,14 +1341,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1309 if (err <= 0) 1341 if (err <= 0)
1310 goto free; 1342 goto free;
1311 1343
1312 nf_conntrack_expect_put(exp); 1344 nf_ct_expect_put(exp);
1313 1345
1314 return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1346 return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1315 1347
1316free: 1348free:
1317 kfree_skb(skb2); 1349 kfree_skb(skb2);
1318out: 1350out:
1319 nf_conntrack_expect_put(exp); 1351 nf_ct_expect_put(exp);
1320 return err; 1352 return err;
1321} 1353}
1322 1354
@@ -1324,11 +1356,13 @@ static int
1324ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, 1356ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1325 struct nlmsghdr *nlh, struct nfattr *cda[]) 1357 struct nlmsghdr *nlh, struct nfattr *cda[])
1326{ 1358{
1327 struct nf_conntrack_expect *exp, *tmp; 1359 struct nf_conntrack_expect *exp;
1328 struct nf_conntrack_tuple tuple; 1360 struct nf_conntrack_tuple tuple;
1329 struct nf_conntrack_helper *h; 1361 struct nf_conntrack_helper *h;
1330 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); 1362 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
1363 struct hlist_node *n, *next;
1331 u_int8_t u3 = nfmsg->nfgen_family; 1364 u_int8_t u3 = nfmsg->nfgen_family;
1365 unsigned int i;
1332 int err; 1366 int err;
1333 1367
1334 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) 1368 if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
@@ -1341,25 +1375,26 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1341 return err; 1375 return err;
1342 1376
1343 /* bump usage count to 2 */ 1377 /* bump usage count to 2 */
1344 exp = nf_conntrack_expect_find_get(&tuple); 1378 exp = nf_ct_expect_find_get(&tuple);
1345 if (!exp) 1379 if (!exp)
1346 return -ENOENT; 1380 return -ENOENT;
1347 1381
1348 if (cda[CTA_EXPECT_ID-1]) { 1382 if (cda[CTA_EXPECT_ID-1]) {
1349 __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]); 1383 __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
1350 if (exp->id != ntohl(id)) { 1384 if (exp->id != ntohl(id)) {
1351 nf_conntrack_expect_put(exp); 1385 nf_ct_expect_put(exp);
1352 return -ENOENT; 1386 return -ENOENT;
1353 } 1387 }
1354 } 1388 }
1355 1389
1356 /* after list removal, usage count == 1 */ 1390 /* after list removal, usage count == 1 */
1357 nf_conntrack_unexpect_related(exp); 1391 nf_ct_unexpect_related(exp);
1358 /* have to put what we 'get' above. 1392 /* have to put what we 'get' above.
1359 * after this line usage count == 0 */ 1393 * after this line usage count == 0 */
1360 nf_conntrack_expect_put(exp); 1394 nf_ct_expect_put(exp);
1361 } else if (cda[CTA_EXPECT_HELP_NAME-1]) { 1395 } else if (cda[CTA_EXPECT_HELP_NAME-1]) {
1362 char *name = NFA_DATA(cda[CTA_EXPECT_HELP_NAME-1]); 1396 char *name = NFA_DATA(cda[CTA_EXPECT_HELP_NAME-1]);
1397 struct nf_conn_help *m_help;
1363 1398
1364 /* delete all expectations for this helper */ 1399 /* delete all expectations for this helper */
1365 write_lock_bh(&nf_conntrack_lock); 1400 write_lock_bh(&nf_conntrack_lock);
@@ -1368,24 +1403,30 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1368 write_unlock_bh(&nf_conntrack_lock); 1403 write_unlock_bh(&nf_conntrack_lock);
1369 return -EINVAL; 1404 return -EINVAL;
1370 } 1405 }
1371 list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, 1406 for (i = 0; i < nf_ct_expect_hsize; i++) {
1372 list) { 1407 hlist_for_each_entry_safe(exp, n, next,
1373 struct nf_conn_help *m_help = nfct_help(exp->master); 1408 &nf_ct_expect_hash[i],
1374 if (m_help->helper == h 1409 hnode) {
1375 && del_timer(&exp->timeout)) { 1410 m_help = nfct_help(exp->master);
1376 nf_ct_unlink_expect(exp); 1411 if (m_help->helper == h
1377 nf_conntrack_expect_put(exp); 1412 && del_timer(&exp->timeout)) {
1413 nf_ct_unlink_expect(exp);
1414 nf_ct_expect_put(exp);
1415 }
1378 } 1416 }
1379 } 1417 }
1380 write_unlock_bh(&nf_conntrack_lock); 1418 write_unlock_bh(&nf_conntrack_lock);
1381 } else { 1419 } else {
1382 /* This basically means we have to flush everything*/ 1420 /* This basically means we have to flush everything*/
1383 write_lock_bh(&nf_conntrack_lock); 1421 write_lock_bh(&nf_conntrack_lock);
1384 list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, 1422 for (i = 0; i < nf_ct_expect_hsize; i++) {
1385 list) { 1423 hlist_for_each_entry_safe(exp, n, next,
1386 if (del_timer(&exp->timeout)) { 1424 &nf_ct_expect_hash[i],
1387 nf_ct_unlink_expect(exp); 1425 hnode) {
1388 nf_conntrack_expect_put(exp); 1426 if (del_timer(&exp->timeout)) {
1427 nf_ct_unlink_expect(exp);
1428 nf_ct_expect_put(exp);
1429 }
1389 } 1430 }
1390 } 1431 }
1391 write_unlock_bh(&nf_conntrack_lock); 1432 write_unlock_bh(&nf_conntrack_lock);
@@ -1421,7 +1462,7 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3)
1421 return err; 1462 return err;
1422 1463
1423 /* Look for master conntrack of this expectation */ 1464 /* Look for master conntrack of this expectation */
1424 h = nf_conntrack_find_get(&master_tuple, NULL); 1465 h = nf_conntrack_find_get(&master_tuple);
1425 if (!h) 1466 if (!h)
1426 return -ENOENT; 1467 return -ENOENT;
1427 ct = nf_ct_tuplehash_to_ctrack(h); 1468 ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1433,7 +1474,7 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3)
1433 goto out; 1474 goto out;
1434 } 1475 }
1435 1476
1436 exp = nf_conntrack_expect_alloc(ct); 1477 exp = nf_ct_expect_alloc(ct);
1437 if (!exp) { 1478 if (!exp) {
1438 err = -ENOMEM; 1479 err = -ENOMEM;
1439 goto out; 1480 goto out;
@@ -1444,10 +1485,11 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3)
1444 exp->master = ct; 1485 exp->master = ct;
1445 exp->helper = NULL; 1486 exp->helper = NULL;
1446 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); 1487 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
1447 memcpy(&exp->mask, &mask, sizeof(struct nf_conntrack_tuple)); 1488 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
1489 exp->mask.src.u.all = mask.src.u.all;
1448 1490
1449 err = nf_conntrack_expect_related(exp); 1491 err = nf_ct_expect_related(exp);
1450 nf_conntrack_expect_put(exp); 1492 nf_ct_expect_put(exp);
1451 1493
1452out: 1494out:
1453 nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); 1495 nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
@@ -1477,7 +1519,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1477 return err; 1519 return err;
1478 1520
1479 write_lock_bh(&nf_conntrack_lock); 1521 write_lock_bh(&nf_conntrack_lock);
1480 exp = __nf_conntrack_expect_find(&tuple); 1522 exp = __nf_ct_expect_find(&tuple);
1481 1523
1482 if (!exp) { 1524 if (!exp) {
1483 write_unlock_bh(&nf_conntrack_lock); 1525 write_unlock_bh(&nf_conntrack_lock);
@@ -1567,7 +1609,7 @@ static int __init ctnetlink_init(void)
1567 goto err_unreg_exp_subsys; 1609 goto err_unreg_exp_subsys;
1568 } 1610 }
1569 1611
1570 ret = nf_conntrack_expect_register_notifier(&ctnl_notifier_exp); 1612 ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
1571 if (ret < 0) { 1613 if (ret < 0) {
1572 printk("ctnetlink_init: cannot expect register notifier.\n"); 1614 printk("ctnetlink_init: cannot expect register notifier.\n");
1573 goto err_unreg_notifier; 1615 goto err_unreg_notifier;
@@ -1593,7 +1635,7 @@ static void __exit ctnetlink_exit(void)
1593 printk("ctnetlink: unregistering from nfnetlink.\n"); 1635 printk("ctnetlink: unregistering from nfnetlink.\n");
1594 1636
1595#ifdef CONFIG_NF_CONNTRACK_EVENTS 1637#ifdef CONFIG_NF_CONNTRACK_EVENTS
1596 nf_conntrack_expect_unregister_notifier(&ctnl_notifier_exp); 1638 nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
1597 nf_conntrack_unregister_notifier(&ctnl_notifier); 1639 nf_conntrack_unregister_notifier(&ctnl_notifier);
1598#endif 1640#endif
1599 1641
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 115bcb5d5a7c..b0804199ab59 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -65,7 +65,7 @@ void
65 struct nf_conntrack_expect *exp) __read_mostly; 65 struct nf_conntrack_expect *exp) __read_mostly;
66EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); 66EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn);
67 67
68#if 0 68#ifdef DEBUG
69/* PptpControlMessageType names */ 69/* PptpControlMessageType names */
70const char *pptp_msg_name[] = { 70const char *pptp_msg_name[] = {
71 "UNKNOWN_MESSAGE", 71 "UNKNOWN_MESSAGE",
@@ -86,9 +86,6 @@ const char *pptp_msg_name[] = {
86 "SET_LINK_INFO" 86 "SET_LINK_INFO"
87}; 87};
88EXPORT_SYMBOL(pptp_msg_name); 88EXPORT_SYMBOL(pptp_msg_name);
89#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
90#else
91#define DEBUGP(format, args...)
92#endif 89#endif
93 90
94#define SECS *HZ 91#define SECS *HZ
@@ -102,7 +99,7 @@ static void pptp_expectfn(struct nf_conn *ct,
102 struct nf_conntrack_expect *exp) 99 struct nf_conntrack_expect *exp)
103{ 100{
104 typeof(nf_nat_pptp_hook_expectfn) nf_nat_pptp_expectfn; 101 typeof(nf_nat_pptp_hook_expectfn) nf_nat_pptp_expectfn;
105 DEBUGP("increasing timeouts\n"); 102 pr_debug("increasing timeouts\n");
106 103
107 /* increase timeout of GRE data channel conntrack entry */ 104 /* increase timeout of GRE data channel conntrack entry */
108 ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; 105 ct->proto.gre.timeout = PPTP_GRE_TIMEOUT;
@@ -121,17 +118,17 @@ static void pptp_expectfn(struct nf_conn *ct,
121 118
122 /* obviously this tuple inversion only works until you do NAT */ 119 /* obviously this tuple inversion only works until you do NAT */
123 nf_ct_invert_tuplepr(&inv_t, &exp->tuple); 120 nf_ct_invert_tuplepr(&inv_t, &exp->tuple);
124 DEBUGP("trying to unexpect other dir: "); 121 pr_debug("trying to unexpect other dir: ");
125 NF_CT_DUMP_TUPLE(&inv_t); 122 NF_CT_DUMP_TUPLE(&inv_t);
126 123
127 exp_other = nf_conntrack_expect_find_get(&inv_t); 124 exp_other = nf_ct_expect_find_get(&inv_t);
128 if (exp_other) { 125 if (exp_other) {
129 /* delete other expectation. */ 126 /* delete other expectation. */
130 DEBUGP("found\n"); 127 pr_debug("found\n");
131 nf_conntrack_unexpect_related(exp_other); 128 nf_ct_unexpect_related(exp_other);
132 nf_conntrack_expect_put(exp_other); 129 nf_ct_expect_put(exp_other);
133 } else { 130 } else {
134 DEBUGP("not found\n"); 131 pr_debug("not found\n");
135 } 132 }
136 } 133 }
137 rcu_read_unlock(); 134 rcu_read_unlock();
@@ -143,13 +140,13 @@ static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
143 struct nf_conntrack_expect *exp; 140 struct nf_conntrack_expect *exp;
144 struct nf_conn *sibling; 141 struct nf_conn *sibling;
145 142
146 DEBUGP("trying to timeout ct or exp for tuple "); 143 pr_debug("trying to timeout ct or exp for tuple ");
147 NF_CT_DUMP_TUPLE(t); 144 NF_CT_DUMP_TUPLE(t);
148 145
149 h = nf_conntrack_find_get(t, NULL); 146 h = nf_conntrack_find_get(t);
150 if (h) { 147 if (h) {
151 sibling = nf_ct_tuplehash_to_ctrack(h); 148 sibling = nf_ct_tuplehash_to_ctrack(h);
152 DEBUGP("setting timeout of conntrack %p to 0\n", sibling); 149 pr_debug("setting timeout of conntrack %p to 0\n", sibling);
153 sibling->proto.gre.timeout = 0; 150 sibling->proto.gre.timeout = 0;
154 sibling->proto.gre.stream_timeout = 0; 151 sibling->proto.gre.stream_timeout = 0;
155 if (del_timer(&sibling->timeout)) 152 if (del_timer(&sibling->timeout))
@@ -157,11 +154,11 @@ static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
157 nf_ct_put(sibling); 154 nf_ct_put(sibling);
158 return 1; 155 return 1;
159 } else { 156 } else {
160 exp = nf_conntrack_expect_find_get(t); 157 exp = nf_ct_expect_find_get(t);
161 if (exp) { 158 if (exp) {
162 DEBUGP("unexpect_related of expect %p\n", exp); 159 pr_debug("unexpect_related of expect %p\n", exp);
163 nf_conntrack_unexpect_related(exp); 160 nf_ct_unexpect_related(exp);
164 nf_conntrack_expect_put(exp); 161 nf_ct_expect_put(exp);
165 return 1; 162 return 1;
166 } 163 }
167 } 164 }
@@ -182,7 +179,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
182 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; 179 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
183 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; 180 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
184 if (!destroy_sibling_or_exp(&t)) 181 if (!destroy_sibling_or_exp(&t))
185 DEBUGP("failed to timeout original pns->pac ct/exp\n"); 182 pr_debug("failed to timeout original pns->pac ct/exp\n");
186 183
187 /* try reply (pac->pns) tuple */ 184 /* try reply (pac->pns) tuple */
188 memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); 185 memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
@@ -190,7 +187,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
190 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; 187 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
191 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; 188 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
192 if (!destroy_sibling_or_exp(&t)) 189 if (!destroy_sibling_or_exp(&t))
193 DEBUGP("failed to timeout reply pac->pns ct/exp\n"); 190 pr_debug("failed to timeout reply pac->pns ct/exp\n");
194} 191}
195 192
196/* expect GRE connections (PNS->PAC and PAC->PNS direction) */ 193/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
@@ -201,36 +198,36 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
201 int ret = 1; 198 int ret = 1;
202 typeof(nf_nat_pptp_hook_exp_gre) nf_nat_pptp_exp_gre; 199 typeof(nf_nat_pptp_hook_exp_gre) nf_nat_pptp_exp_gre;
203 200
204 exp_orig = nf_conntrack_expect_alloc(ct); 201 exp_orig = nf_ct_expect_alloc(ct);
205 if (exp_orig == NULL) 202 if (exp_orig == NULL)
206 goto out; 203 goto out;
207 204
208 exp_reply = nf_conntrack_expect_alloc(ct); 205 exp_reply = nf_ct_expect_alloc(ct);
209 if (exp_reply == NULL) 206 if (exp_reply == NULL)
210 goto out_put_orig; 207 goto out_put_orig;
211 208
212 /* original direction, PNS->PAC */ 209 /* original direction, PNS->PAC */
213 dir = IP_CT_DIR_ORIGINAL; 210 dir = IP_CT_DIR_ORIGINAL;
214 nf_conntrack_expect_init(exp_orig, ct->tuplehash[dir].tuple.src.l3num, 211 nf_ct_expect_init(exp_orig, ct->tuplehash[dir].tuple.src.l3num,
215 &ct->tuplehash[dir].tuple.src.u3, 212 &ct->tuplehash[dir].tuple.src.u3,
216 &ct->tuplehash[dir].tuple.dst.u3, 213 &ct->tuplehash[dir].tuple.dst.u3,
217 IPPROTO_GRE, &peer_callid, &callid); 214 IPPROTO_GRE, &peer_callid, &callid);
218 exp_orig->expectfn = pptp_expectfn; 215 exp_orig->expectfn = pptp_expectfn;
219 216
220 /* reply direction, PAC->PNS */ 217 /* reply direction, PAC->PNS */
221 dir = IP_CT_DIR_REPLY; 218 dir = IP_CT_DIR_REPLY;
222 nf_conntrack_expect_init(exp_reply, ct->tuplehash[dir].tuple.src.l3num, 219 nf_ct_expect_init(exp_reply, ct->tuplehash[dir].tuple.src.l3num,
223 &ct->tuplehash[dir].tuple.src.u3, 220 &ct->tuplehash[dir].tuple.src.u3,
224 &ct->tuplehash[dir].tuple.dst.u3, 221 &ct->tuplehash[dir].tuple.dst.u3,
225 IPPROTO_GRE, &callid, &peer_callid); 222 IPPROTO_GRE, &callid, &peer_callid);
226 exp_reply->expectfn = pptp_expectfn; 223 exp_reply->expectfn = pptp_expectfn;
227 224
228 nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre); 225 nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre);
229 if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) 226 if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK)
230 nf_nat_pptp_exp_gre(exp_orig, exp_reply); 227 nf_nat_pptp_exp_gre(exp_orig, exp_reply);
231 if (nf_conntrack_expect_related(exp_orig) != 0) 228 if (nf_ct_expect_related(exp_orig) != 0)
232 goto out_put_both; 229 goto out_put_both;
233 if (nf_conntrack_expect_related(exp_reply) != 0) 230 if (nf_ct_expect_related(exp_reply) != 0)
234 goto out_unexpect_orig; 231 goto out_unexpect_orig;
235 232
236 /* Add GRE keymap entries */ 233 /* Add GRE keymap entries */
@@ -243,16 +240,16 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
243 ret = 0; 240 ret = 0;
244 241
245out_put_both: 242out_put_both:
246 nf_conntrack_expect_put(exp_reply); 243 nf_ct_expect_put(exp_reply);
247out_put_orig: 244out_put_orig:
248 nf_conntrack_expect_put(exp_orig); 245 nf_ct_expect_put(exp_orig);
249out: 246out:
250 return ret; 247 return ret;
251 248
252out_unexpect_both: 249out_unexpect_both:
253 nf_conntrack_unexpect_related(exp_reply); 250 nf_ct_unexpect_related(exp_reply);
254out_unexpect_orig: 251out_unexpect_orig:
255 nf_conntrack_unexpect_related(exp_orig); 252 nf_ct_unexpect_related(exp_orig);
256 goto out_put_both; 253 goto out_put_both;
257} 254}
258 255
@@ -270,7 +267,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
270 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; 267 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
271 268
272 msg = ntohs(ctlh->messageType); 269 msg = ntohs(ctlh->messageType);
273 DEBUGP("inbound control message %s\n", pptp_msg_name[msg]); 270 pr_debug("inbound control message %s\n", pptp_msg_name[msg]);
274 271
275 switch (msg) { 272 switch (msg) {
276 case PPTP_START_SESSION_REPLY: 273 case PPTP_START_SESSION_REPLY:
@@ -305,8 +302,8 @@ pptp_inbound_pkt(struct sk_buff **pskb,
305 pcid = pptpReq->ocack.peersCallID; 302 pcid = pptpReq->ocack.peersCallID;
306 if (info->pns_call_id != pcid) 303 if (info->pns_call_id != pcid)
307 goto invalid; 304 goto invalid;
308 DEBUGP("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], 305 pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
309 ntohs(cid), ntohs(pcid)); 306 ntohs(cid), ntohs(pcid));
310 307
311 if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { 308 if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
312 info->cstate = PPTP_CALL_OUT_CONF; 309 info->cstate = PPTP_CALL_OUT_CONF;
@@ -322,7 +319,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
322 goto invalid; 319 goto invalid;
323 320
324 cid = pptpReq->icreq.callID; 321 cid = pptpReq->icreq.callID;
325 DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 322 pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
326 info->cstate = PPTP_CALL_IN_REQ; 323 info->cstate = PPTP_CALL_IN_REQ;
327 info->pac_call_id = cid; 324 info->pac_call_id = cid;
328 break; 325 break;
@@ -341,7 +338,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
341 if (info->pns_call_id != pcid) 338 if (info->pns_call_id != pcid)
342 goto invalid; 339 goto invalid;
343 340
344 DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); 341 pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
345 info->cstate = PPTP_CALL_IN_CONF; 342 info->cstate = PPTP_CALL_IN_CONF;
346 343
347 /* we expect a GRE connection from PAC to PNS */ 344 /* we expect a GRE connection from PAC to PNS */
@@ -351,7 +348,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
351 case PPTP_CALL_DISCONNECT_NOTIFY: 348 case PPTP_CALL_DISCONNECT_NOTIFY:
352 /* server confirms disconnect */ 349 /* server confirms disconnect */
353 cid = pptpReq->disc.callID; 350 cid = pptpReq->disc.callID;
354 DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 351 pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
355 info->cstate = PPTP_CALL_NONE; 352 info->cstate = PPTP_CALL_NONE;
356 353
357 /* untrack this call id, unexpect GRE packets */ 354 /* untrack this call id, unexpect GRE packets */
@@ -374,11 +371,11 @@ pptp_inbound_pkt(struct sk_buff **pskb,
374 return NF_ACCEPT; 371 return NF_ACCEPT;
375 372
376invalid: 373invalid:
377 DEBUGP("invalid %s: type=%d cid=%u pcid=%u " 374 pr_debug("invalid %s: type=%d cid=%u pcid=%u "
378 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 375 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
379 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], 376 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
380 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 377 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
381 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 378 ntohs(info->pns_call_id), ntohs(info->pac_call_id));
382 return NF_ACCEPT; 379 return NF_ACCEPT;
383} 380}
384 381
@@ -396,7 +393,7 @@ pptp_outbound_pkt(struct sk_buff **pskb,
396 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; 393 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
397 394
398 msg = ntohs(ctlh->messageType); 395 msg = ntohs(ctlh->messageType);
399 DEBUGP("outbound control message %s\n", pptp_msg_name[msg]); 396 pr_debug("outbound control message %s\n", pptp_msg_name[msg]);
400 397
401 switch (msg) { 398 switch (msg) {
402 case PPTP_START_SESSION_REQUEST: 399 case PPTP_START_SESSION_REQUEST:
@@ -418,7 +415,7 @@ pptp_outbound_pkt(struct sk_buff **pskb,
418 info->cstate = PPTP_CALL_OUT_REQ; 415 info->cstate = PPTP_CALL_OUT_REQ;
419 /* track PNS call id */ 416 /* track PNS call id */
420 cid = pptpReq->ocreq.callID; 417 cid = pptpReq->ocreq.callID;
421 DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 418 pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
422 info->pns_call_id = cid; 419 info->pns_call_id = cid;
423 break; 420 break;
424 421
@@ -432,8 +429,8 @@ pptp_outbound_pkt(struct sk_buff **pskb,
432 pcid = pptpReq->icack.peersCallID; 429 pcid = pptpReq->icack.peersCallID;
433 if (info->pac_call_id != pcid) 430 if (info->pac_call_id != pcid)
434 goto invalid; 431 goto invalid;
435 DEBUGP("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], 432 pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
436 ntohs(cid), ntohs(pcid)); 433 ntohs(cid), ntohs(pcid));
437 434
438 if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { 435 if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
439 /* part two of the three-way handshake */ 436 /* part two of the three-way handshake */
@@ -469,11 +466,11 @@ pptp_outbound_pkt(struct sk_buff **pskb,
469 return NF_ACCEPT; 466 return NF_ACCEPT;
470 467
471invalid: 468invalid:
472 DEBUGP("invalid %s: type=%d cid=%u pcid=%u " 469 pr_debug("invalid %s: type=%d cid=%u pcid=%u "
473 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 470 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
474 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], 471 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
475 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 472 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
476 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 473 ntohs(info->pns_call_id), ntohs(info->pac_call_id));
477 return NF_ACCEPT; 474 return NF_ACCEPT;
478} 475}
479 476
@@ -524,7 +521,7 @@ conntrack_pptp_help(struct sk_buff **pskb, unsigned int protoff,
524 521
525 pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); 522 pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph);
526 if (!pptph) { 523 if (!pptph) {
527 DEBUGP("no full PPTP header, can't track\n"); 524 pr_debug("no full PPTP header, can't track\n");
528 return NF_ACCEPT; 525 return NF_ACCEPT;
529 } 526 }
530 nexthdr_off += sizeof(_pptph); 527 nexthdr_off += sizeof(_pptph);
@@ -533,7 +530,7 @@ conntrack_pptp_help(struct sk_buff **pskb, unsigned int protoff,
533 /* if it's not a control message we can't do anything with it */ 530 /* if it's not a control message we can't do anything with it */
534 if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL || 531 if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
535 ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) { 532 ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
536 DEBUGP("not a control packet\n"); 533 pr_debug("not a control packet\n");
537 return NF_ACCEPT; 534 return NF_ACCEPT;
538 } 535 }
539 536
@@ -569,8 +566,8 @@ conntrack_pptp_help(struct sk_buff **pskb, unsigned int protoff,
569 /* server -> client (PAC -> PNS) */ 566 /* server -> client (PAC -> PNS) */
570 ret = pptp_inbound_pkt(pskb, ctlh, pptpReq, reqlen, ct, 567 ret = pptp_inbound_pkt(pskb, ctlh, pptpReq, reqlen, ct,
571 ctinfo); 568 ctinfo);
572 DEBUGP("sstate: %d->%d, cstate: %d->%d\n", 569 pr_debug("sstate: %d->%d, cstate: %d->%d\n",
573 oldsstate, info->sstate, oldcstate, info->cstate); 570 oldsstate, info->sstate, oldcstate, info->cstate);
574 spin_unlock_bh(&nf_pptp_lock); 571 spin_unlock_bh(&nf_pptp_lock);
575 572
576 return ret; 573 return ret;
@@ -585,9 +582,6 @@ static struct nf_conntrack_helper pptp __read_mostly = {
585 .tuple.src.l3num = AF_INET, 582 .tuple.src.l3num = AF_INET,
586 .tuple.src.u.tcp.port = __constant_htons(PPTP_CONTROL_PORT), 583 .tuple.src.u.tcp.port = __constant_htons(PPTP_CONTROL_PORT),
587 .tuple.dst.protonum = IPPROTO_TCP, 584 .tuple.dst.protonum = IPPROTO_TCP,
588 .mask.src.l3num = 0xffff,
589 .mask.src.u.tcp.port = __constant_htons(0xffff),
590 .mask.dst.protonum = 0xff,
591 .help = conntrack_pptp_help, 585 .help = conntrack_pptp_help,
592 .destroy = pptp_destroy_siblings, 586 .destroy = pptp_destroy_siblings,
593}; 587};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 339c397d1b5f..771c4c29936e 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -40,12 +40,6 @@
40#define GRE_TIMEOUT (30 * HZ) 40#define GRE_TIMEOUT (30 * HZ)
41#define GRE_STREAM_TIMEOUT (180 * HZ) 41#define GRE_STREAM_TIMEOUT (180 * HZ)
42 42
43#if 0
44#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
45#else
46#define DEBUGP(x, args...)
47#endif
48
49static DEFINE_RWLOCK(nf_ct_gre_lock); 43static DEFINE_RWLOCK(nf_ct_gre_lock);
50static LIST_HEAD(gre_keymap_list); 44static LIST_HEAD(gre_keymap_list);
51 45
@@ -87,7 +81,7 @@ static __be16 gre_keymap_lookup(struct nf_conntrack_tuple *t)
87 } 81 }
88 read_unlock_bh(&nf_ct_gre_lock); 82 read_unlock_bh(&nf_ct_gre_lock);
89 83
90 DEBUGP("lookup src key 0x%x for ", key); 84 pr_debug("lookup src key 0x%x for ", key);
91 NF_CT_DUMP_TUPLE(t); 85 NF_CT_DUMP_TUPLE(t);
92 86
93 return key; 87 return key;
@@ -107,8 +101,8 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
107 if (gre_key_cmpfn(km, t) && km == *kmp) 101 if (gre_key_cmpfn(km, t) && km == *kmp)
108 return 0; 102 return 0;
109 } 103 }
110 DEBUGP("trying to override keymap_%s for ct %p\n", 104 pr_debug("trying to override keymap_%s for ct %p\n",
111 dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); 105 dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct);
112 return -EEXIST; 106 return -EEXIST;
113 } 107 }
114 108
@@ -118,7 +112,7 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
118 memcpy(&km->tuple, t, sizeof(*t)); 112 memcpy(&km->tuple, t, sizeof(*t));
119 *kmp = km; 113 *kmp = km;
120 114
121 DEBUGP("adding new entry %p: ", km); 115 pr_debug("adding new entry %p: ", km);
122 NF_CT_DUMP_TUPLE(&km->tuple); 116 NF_CT_DUMP_TUPLE(&km->tuple);
123 117
124 write_lock_bh(&nf_ct_gre_lock); 118 write_lock_bh(&nf_ct_gre_lock);
@@ -135,13 +129,13 @@ void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
135 struct nf_conn_help *help = nfct_help(ct); 129 struct nf_conn_help *help = nfct_help(ct);
136 enum ip_conntrack_dir dir; 130 enum ip_conntrack_dir dir;
137 131
138 DEBUGP("entering for ct %p\n", ct); 132 pr_debug("entering for ct %p\n", ct);
139 133
140 write_lock_bh(&nf_ct_gre_lock); 134 write_lock_bh(&nf_ct_gre_lock);
141 for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) { 135 for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
142 if (help->help.ct_pptp_info.keymap[dir]) { 136 if (help->help.ct_pptp_info.keymap[dir]) {
143 DEBUGP("removing %p from list\n", 137 pr_debug("removing %p from list\n",
144 help->help.ct_pptp_info.keymap[dir]); 138 help->help.ct_pptp_info.keymap[dir]);
145 list_del(&help->help.ct_pptp_info.keymap[dir]->list); 139 list_del(&help->help.ct_pptp_info.keymap[dir]->list);
146 kfree(help->help.ct_pptp_info.keymap[dir]); 140 kfree(help->help.ct_pptp_info.keymap[dir]);
147 help->help.ct_pptp_info.keymap[dir] = NULL; 141 help->help.ct_pptp_info.keymap[dir] = NULL;
@@ -186,7 +180,7 @@ static int gre_pkt_to_tuple(const struct sk_buff *skb,
186 return 1; 180 return 1;
187 181
188 if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) { 182 if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
189 DEBUGP("GRE_VERSION_PPTP but unknown proto\n"); 183 pr_debug("GRE_VERSION_PPTP but unknown proto\n");
190 return 0; 184 return 0;
191 } 185 }
192 186
@@ -242,7 +236,7 @@ static int gre_packet(struct nf_conn *ct,
242static int gre_new(struct nf_conn *ct, const struct sk_buff *skb, 236static int gre_new(struct nf_conn *ct, const struct sk_buff *skb,
243 unsigned int dataoff) 237 unsigned int dataoff)
244{ 238{
245 DEBUGP(": "); 239 pr_debug(": ");
246 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 240 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
247 241
248 /* initialize to sane value. Ideally a conntrack helper 242 /* initialize to sane value. Ideally a conntrack helper
@@ -258,10 +252,10 @@ static int gre_new(struct nf_conn *ct, const struct sk_buff *skb,
258static void gre_destroy(struct nf_conn *ct) 252static void gre_destroy(struct nf_conn *ct)
259{ 253{
260 struct nf_conn *master = ct->master; 254 struct nf_conn *master = ct->master;
261 DEBUGP(" entering\n"); 255 pr_debug(" entering\n");
262 256
263 if (!master) 257 if (!master)
264 DEBUGP("no master !?!\n"); 258 pr_debug("no master !?!\n");
265 else 259 else
266 nf_ct_gre_keymap_destroy(master); 260 nf_ct_gre_keymap_destroy(master);
267} 261}
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 0d3254b974c5..debfe61378a1 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -25,12 +25,6 @@
25#include <net/netfilter/nf_conntrack_l4proto.h> 25#include <net/netfilter/nf_conntrack_l4proto.h>
26#include <net/netfilter/nf_conntrack_ecache.h> 26#include <net/netfilter/nf_conntrack_ecache.h>
27 27
28#if 0
29#define DEBUGP(format, ...) printk(format, ## __VA_ARGS__)
30#else
31#define DEBUGP(format, args...)
32#endif
33
34/* Protects conntrack->proto.sctp */ 28/* Protects conntrack->proto.sctp */
35static DEFINE_RWLOCK(sctp_lock); 29static DEFINE_RWLOCK(sctp_lock);
36 30
@@ -151,9 +145,6 @@ static int sctp_pkt_to_tuple(const struct sk_buff *skb,
151{ 145{
152 sctp_sctphdr_t _hdr, *hp; 146 sctp_sctphdr_t _hdr, *hp;
153 147
154 DEBUGP(__FUNCTION__);
155 DEBUGP("\n");
156
157 /* Actually only need first 8 bytes. */ 148 /* Actually only need first 8 bytes. */
158 hp = skb_header_pointer(skb, dataoff, 8, &_hdr); 149 hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
159 if (hp == NULL) 150 if (hp == NULL)
@@ -167,9 +158,6 @@ static int sctp_pkt_to_tuple(const struct sk_buff *skb,
167static int sctp_invert_tuple(struct nf_conntrack_tuple *tuple, 158static int sctp_invert_tuple(struct nf_conntrack_tuple *tuple,
168 const struct nf_conntrack_tuple *orig) 159 const struct nf_conntrack_tuple *orig)
169{ 160{
170 DEBUGP(__FUNCTION__);
171 DEBUGP("\n");
172
173 tuple->src.u.sctp.port = orig->dst.u.sctp.port; 161 tuple->src.u.sctp.port = orig->dst.u.sctp.port;
174 tuple->dst.u.sctp.port = orig->src.u.sctp.port; 162 tuple->dst.u.sctp.port = orig->src.u.sctp.port;
175 return 1; 163 return 1;
@@ -179,9 +167,6 @@ static int sctp_invert_tuple(struct nf_conntrack_tuple *tuple,
179static int sctp_print_tuple(struct seq_file *s, 167static int sctp_print_tuple(struct seq_file *s,
180 const struct nf_conntrack_tuple *tuple) 168 const struct nf_conntrack_tuple *tuple)
181{ 169{
182 DEBUGP(__FUNCTION__);
183 DEBUGP("\n");
184
185 return seq_printf(s, "sport=%hu dport=%hu ", 170 return seq_printf(s, "sport=%hu dport=%hu ",
186 ntohs(tuple->src.u.sctp.port), 171 ntohs(tuple->src.u.sctp.port),
187 ntohs(tuple->dst.u.sctp.port)); 172 ntohs(tuple->dst.u.sctp.port));
@@ -193,9 +178,6 @@ static int sctp_print_conntrack(struct seq_file *s,
193{ 178{
194 enum sctp_conntrack state; 179 enum sctp_conntrack state;
195 180
196 DEBUGP(__FUNCTION__);
197 DEBUGP("\n");
198
199 read_lock_bh(&sctp_lock); 181 read_lock_bh(&sctp_lock);
200 state = conntrack->proto.sctp.state; 182 state = conntrack->proto.sctp.state;
201 read_unlock_bh(&sctp_lock); 183 read_unlock_bh(&sctp_lock);
@@ -219,13 +201,10 @@ static int do_basic_checks(struct nf_conn *conntrack,
219 sctp_chunkhdr_t _sch, *sch; 201 sctp_chunkhdr_t _sch, *sch;
220 int flag; 202 int flag;
221 203
222 DEBUGP(__FUNCTION__);
223 DEBUGP("\n");
224
225 flag = 0; 204 flag = 0;
226 205
227 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { 206 for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
228 DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type); 207 pr_debug("Chunk Num: %d Type: %d\n", count, sch->type);
229 208
230 if (sch->type == SCTP_CID_INIT 209 if (sch->type == SCTP_CID_INIT
231 || sch->type == SCTP_CID_INIT_ACK 210 || sch->type == SCTP_CID_INIT_ACK
@@ -242,7 +221,7 @@ static int do_basic_checks(struct nf_conn *conntrack,
242 || sch->type == SCTP_CID_COOKIE_ECHO 221 || sch->type == SCTP_CID_COOKIE_ECHO
243 || flag) 222 || flag)
244 && count !=0) || !sch->length) { 223 && count !=0) || !sch->length) {
245 DEBUGP("Basic checks failed\n"); 224 pr_debug("Basic checks failed\n");
246 return 1; 225 return 1;
247 } 226 }
248 227
@@ -251,7 +230,7 @@ static int do_basic_checks(struct nf_conn *conntrack,
251 } 230 }
252 } 231 }
253 232
254 DEBUGP("Basic checks passed\n"); 233 pr_debug("Basic checks passed\n");
255 return count == 0; 234 return count == 0;
256} 235}
257 236
@@ -261,50 +240,47 @@ static int new_state(enum ip_conntrack_dir dir,
261{ 240{
262 int i; 241 int i;
263 242
264 DEBUGP(__FUNCTION__); 243 pr_debug("Chunk type: %d\n", chunk_type);
265 DEBUGP("\n");
266
267 DEBUGP("Chunk type: %d\n", chunk_type);
268 244
269 switch (chunk_type) { 245 switch (chunk_type) {
270 case SCTP_CID_INIT: 246 case SCTP_CID_INIT:
271 DEBUGP("SCTP_CID_INIT\n"); 247 pr_debug("SCTP_CID_INIT\n");
272 i = 0; break; 248 i = 0; break;
273 case SCTP_CID_INIT_ACK: 249 case SCTP_CID_INIT_ACK:
274 DEBUGP("SCTP_CID_INIT_ACK\n"); 250 pr_debug("SCTP_CID_INIT_ACK\n");
275 i = 1; break; 251 i = 1; break;
276 case SCTP_CID_ABORT: 252 case SCTP_CID_ABORT:
277 DEBUGP("SCTP_CID_ABORT\n"); 253 pr_debug("SCTP_CID_ABORT\n");
278 i = 2; break; 254 i = 2; break;
279 case SCTP_CID_SHUTDOWN: 255 case SCTP_CID_SHUTDOWN:
280 DEBUGP("SCTP_CID_SHUTDOWN\n"); 256 pr_debug("SCTP_CID_SHUTDOWN\n");
281 i = 3; break; 257 i = 3; break;
282 case SCTP_CID_SHUTDOWN_ACK: 258 case SCTP_CID_SHUTDOWN_ACK:
283 DEBUGP("SCTP_CID_SHUTDOWN_ACK\n"); 259 pr_debug("SCTP_CID_SHUTDOWN_ACK\n");
284 i = 4; break; 260 i = 4; break;
285 case SCTP_CID_ERROR: 261 case SCTP_CID_ERROR:
286 DEBUGP("SCTP_CID_ERROR\n"); 262 pr_debug("SCTP_CID_ERROR\n");
287 i = 5; break; 263 i = 5; break;
288 case SCTP_CID_COOKIE_ECHO: 264 case SCTP_CID_COOKIE_ECHO:
289 DEBUGP("SCTP_CID_COOKIE_ECHO\n"); 265 pr_debug("SCTP_CID_COOKIE_ECHO\n");
290 i = 6; break; 266 i = 6; break;
291 case SCTP_CID_COOKIE_ACK: 267 case SCTP_CID_COOKIE_ACK:
292 DEBUGP("SCTP_CID_COOKIE_ACK\n"); 268 pr_debug("SCTP_CID_COOKIE_ACK\n");
293 i = 7; break; 269 i = 7; break;
294 case SCTP_CID_SHUTDOWN_COMPLETE: 270 case SCTP_CID_SHUTDOWN_COMPLETE:
295 DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n"); 271 pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n");
296 i = 8; break; 272 i = 8; break;
297 default: 273 default:
298 /* Other chunks like DATA, SACK, HEARTBEAT and 274 /* Other chunks like DATA, SACK, HEARTBEAT and
299 its ACK do not cause a change in state */ 275 its ACK do not cause a change in state */
300 DEBUGP("Unknown chunk type, Will stay in %s\n", 276 pr_debug("Unknown chunk type, Will stay in %s\n",
301 sctp_conntrack_names[cur_state]); 277 sctp_conntrack_names[cur_state]);
302 return cur_state; 278 return cur_state;
303 } 279 }
304 280
305 DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", 281 pr_debug("dir: %d cur_state: %s chunk_type: %d new_state: %s\n",
306 dir, sctp_conntrack_names[cur_state], chunk_type, 282 dir, sctp_conntrack_names[cur_state], chunk_type,
307 sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); 283 sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]);
308 284
309 return sctp_conntracks[dir][i][cur_state]; 285 return sctp_conntracks[dir][i][cur_state];
310} 286}
@@ -323,9 +299,6 @@ static int sctp_packet(struct nf_conn *conntrack,
323 u_int32_t offset, count; 299 u_int32_t offset, count;
324 char map[256 / sizeof (char)] = {0}; 300 char map[256 / sizeof (char)] = {0};
325 301
326 DEBUGP(__FUNCTION__);
327 DEBUGP("\n");
328
329 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 302 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
330 if (sh == NULL) 303 if (sh == NULL)
331 return -1; 304 return -1;
@@ -340,7 +313,7 @@ static int sctp_packet(struct nf_conn *conntrack,
340 && !test_bit(SCTP_CID_ABORT, (void *)map) 313 && !test_bit(SCTP_CID_ABORT, (void *)map)
341 && !test_bit(SCTP_CID_SHUTDOWN_ACK, (void *)map) 314 && !test_bit(SCTP_CID_SHUTDOWN_ACK, (void *)map)
342 && (sh->vtag != conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) { 315 && (sh->vtag != conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) {
343 DEBUGP("Verification tag check failed\n"); 316 pr_debug("Verification tag check failed\n");
344 return -1; 317 return -1;
345 } 318 }
346 319
@@ -385,8 +358,9 @@ static int sctp_packet(struct nf_conn *conntrack,
385 358
386 /* Invalid */ 359 /* Invalid */
387 if (newconntrack == SCTP_CONNTRACK_MAX) { 360 if (newconntrack == SCTP_CONNTRACK_MAX) {
388 DEBUGP("nf_conntrack_sctp: Invalid dir=%i ctype=%u conntrack=%u\n", 361 pr_debug("nf_conntrack_sctp: Invalid dir=%i ctype=%u "
389 CTINFO2DIR(ctinfo), sch->type, oldsctpstate); 362 "conntrack=%u\n",
363 CTINFO2DIR(ctinfo), sch->type, oldsctpstate);
390 write_unlock_bh(&sctp_lock); 364 write_unlock_bh(&sctp_lock);
391 return -1; 365 return -1;
392 } 366 }
@@ -402,8 +376,8 @@ static int sctp_packet(struct nf_conn *conntrack,
402 write_unlock_bh(&sctp_lock); 376 write_unlock_bh(&sctp_lock);
403 return -1; 377 return -1;
404 } 378 }
405 DEBUGP("Setting vtag %x for dir %d\n", 379 pr_debug("Setting vtag %x for dir %d\n",
406 ih->init_tag, !CTINFO2DIR(ctinfo)); 380 ih->init_tag, !CTINFO2DIR(ctinfo));
407 conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag; 381 conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag;
408 } 382 }
409 383
@@ -418,7 +392,7 @@ static int sctp_packet(struct nf_conn *conntrack,
418 if (oldsctpstate == SCTP_CONNTRACK_COOKIE_ECHOED 392 if (oldsctpstate == SCTP_CONNTRACK_COOKIE_ECHOED
419 && CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY 393 && CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY
420 && newconntrack == SCTP_CONNTRACK_ESTABLISHED) { 394 && newconntrack == SCTP_CONNTRACK_ESTABLISHED) {
421 DEBUGP("Setting assured bit\n"); 395 pr_debug("Setting assured bit\n");
422 set_bit(IPS_ASSURED_BIT, &conntrack->status); 396 set_bit(IPS_ASSURED_BIT, &conntrack->status);
423 nf_conntrack_event_cache(IPCT_STATUS, skb); 397 nf_conntrack_event_cache(IPCT_STATUS, skb);
424 } 398 }
@@ -436,9 +410,6 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
436 u_int32_t offset, count; 410 u_int32_t offset, count;
437 char map[256 / sizeof (char)] = {0}; 411 char map[256 / sizeof (char)] = {0};
438 412
439 DEBUGP(__FUNCTION__);
440 DEBUGP("\n");
441
442 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 413 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
443 if (sh == NULL) 414 if (sh == NULL)
444 return 0; 415 return 0;
@@ -460,8 +431,9 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
460 SCTP_CONNTRACK_NONE, sch->type); 431 SCTP_CONNTRACK_NONE, sch->type);
461 432
462 /* Invalid: delete conntrack */ 433 /* Invalid: delete conntrack */
463 if (newconntrack == SCTP_CONNTRACK_MAX) { 434 if (newconntrack == SCTP_CONNTRACK_NONE ||
464 DEBUGP("nf_conntrack_sctp: invalid new deleting.\n"); 435 newconntrack == SCTP_CONNTRACK_MAX) {
436 pr_debug("nf_conntrack_sctp: invalid new deleting.\n");
465 return 0; 437 return 0;
466 } 438 }
467 439
@@ -475,8 +447,8 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
475 if (ih == NULL) 447 if (ih == NULL)
476 return 0; 448 return 0;
477 449
478 DEBUGP("Setting vtag %x for new conn\n", 450 pr_debug("Setting vtag %x for new conn\n",
479 ih->init_tag); 451 ih->init_tag);
480 452
481 conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = 453 conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] =
482 ih->init_tag; 454 ih->init_tag;
@@ -488,8 +460,8 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
488 /* If it is a shutdown ack OOTB packet, we expect a return 460 /* If it is a shutdown ack OOTB packet, we expect a return
489 shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ 461 shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
490 else { 462 else {
491 DEBUGP("Setting vtag %x for new conn OOTB\n", 463 pr_debug("Setting vtag %x for new conn OOTB\n",
492 sh->vtag); 464 sh->vtag);
493 conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; 465 conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
494 } 466 }
495 467
@@ -688,8 +660,6 @@ int __init nf_conntrack_proto_sctp_init(void)
688 cleanup_sctp4: 660 cleanup_sctp4:
689 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 661 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
690 out: 662 out:
691 DEBUGP("SCTP conntrack module loading %s\n",
692 ret ? "failed": "succeeded");
693 return ret; 663 return ret;
694} 664}
695 665
@@ -697,7 +667,6 @@ void __exit nf_conntrack_proto_sctp_fini(void)
697{ 667{
698 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6); 668 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
699 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 669 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
700 DEBUGP("SCTP conntrack module unloaded\n");
701} 670}
702 671
703module_init(nf_conntrack_proto_sctp_init); 672module_init(nf_conntrack_proto_sctp_init);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index ccdd5d231e0d..1c8206e6560a 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -26,13 +26,6 @@
26#include <net/netfilter/nf_conntrack_l4proto.h> 26#include <net/netfilter/nf_conntrack_l4proto.h>
27#include <net/netfilter/nf_conntrack_ecache.h> 27#include <net/netfilter/nf_conntrack_ecache.h>
28 28
29#if 0
30#define DEBUGP printk
31#define DEBUGP_VARS
32#else
33#define DEBUGP(format, args...)
34#endif
35
36/* Protects conntrack->proto.tcp */ 29/* Protects conntrack->proto.tcp */
37static DEFINE_RWLOCK(tcp_lock); 30static DEFINE_RWLOCK(tcp_lock);
38 31
@@ -496,7 +489,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
496 } 489 }
497} 490}
498 491
499static int tcp_in_window(struct ip_ct_tcp *state, 492static int tcp_in_window(struct nf_conn *ct,
493 struct ip_ct_tcp *state,
500 enum ip_conntrack_dir dir, 494 enum ip_conntrack_dir dir,
501 unsigned int index, 495 unsigned int index,
502 const struct sk_buff *skb, 496 const struct sk_buff *skb,
@@ -506,6 +500,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
506{ 500{
507 struct ip_ct_tcp_state *sender = &state->seen[dir]; 501 struct ip_ct_tcp_state *sender = &state->seen[dir];
508 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 502 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
503 struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
509 __u32 seq, ack, sack, end, win, swin; 504 __u32 seq, ack, sack, end, win, swin;
510 int res; 505 int res;
511 506
@@ -520,18 +515,17 @@ static int tcp_in_window(struct ip_ct_tcp *state,
520 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) 515 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
521 tcp_sack(skb, dataoff, tcph, &sack); 516 tcp_sack(skb, dataoff, tcph, &sack);
522 517
523 DEBUGP("tcp_in_window: START\n"); 518 pr_debug("tcp_in_window: START\n");
524 DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " 519 pr_debug("tcp_in_window: ");
525 "seq=%u ack=%u sack=%u win=%u end=%u\n", 520 NF_CT_DUMP_TUPLE(tuple);
526 NIPQUAD(iph->saddr), ntohs(tcph->source), 521 pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n",
527 NIPQUAD(iph->daddr), ntohs(tcph->dest), 522 seq, ack, sack, win, end);
528 seq, ack, sack, win, end); 523 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
529 DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " 524 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
530 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 525 sender->td_end, sender->td_maxend, sender->td_maxwin,
531 sender->td_end, sender->td_maxend, sender->td_maxwin, 526 sender->td_scale,
532 sender->td_scale, 527 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
533 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 528 receiver->td_scale);
534 receiver->td_scale);
535 529
536 if (sender->td_end == 0) { 530 if (sender->td_end == 0) {
537 /* 531 /*
@@ -609,23 +603,22 @@ static int tcp_in_window(struct ip_ct_tcp *state,
609 */ 603 */
610 seq = end = sender->td_end; 604 seq = end = sender->td_end;
611 605
612 DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " 606 pr_debug("tcp_in_window: ");
613 "seq=%u ack=%u sack =%u win=%u end=%u\n", 607 NF_CT_DUMP_TUPLE(tuple);
614 NIPQUAD(iph->saddr), ntohs(tcph->source), 608 pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n",
615 NIPQUAD(iph->daddr), ntohs(tcph->dest), 609 seq, ack, sack, win, end);
616 seq, ack, sack, win, end); 610 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
617 DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " 611 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
618 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 612 sender->td_end, sender->td_maxend, sender->td_maxwin,
619 sender->td_end, sender->td_maxend, sender->td_maxwin, 613 sender->td_scale,
620 sender->td_scale, 614 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
621 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 615 receiver->td_scale);
622 receiver->td_scale); 616
623 617 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
624 DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n", 618 before(seq, sender->td_maxend + 1),
625 before(seq, sender->td_maxend + 1), 619 after(end, sender->td_end - receiver->td_maxwin - 1),
626 after(end, sender->td_end - receiver->td_maxwin - 1), 620 before(sack, receiver->td_end + 1),
627 before(sack, receiver->td_end + 1), 621 after(ack, receiver->td_end - MAXACKWINDOW(sender)));
628 after(ack, receiver->td_end - MAXACKWINDOW(sender)));
629 622
630 if (before(seq, sender->td_maxend + 1) && 623 if (before(seq, sender->td_maxend + 1) &&
631 after(end, sender->td_end - receiver->td_maxwin - 1) && 624 after(end, sender->td_end - receiver->td_maxwin - 1) &&
@@ -694,10 +687,10 @@ static int tcp_in_window(struct ip_ct_tcp *state,
694 : "SEQ is over the upper bound (over the window of the receiver)"); 687 : "SEQ is over the upper bound (over the window of the receiver)");
695 } 688 }
696 689
697 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " 690 pr_debug("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
698 "receiver end=%u maxend=%u maxwin=%u\n", 691 "receiver end=%u maxend=%u maxwin=%u\n",
699 res, sender->td_end, sender->td_maxend, sender->td_maxwin, 692 res, sender->td_end, sender->td_maxend, sender->td_maxwin,
700 receiver->td_end, receiver->td_maxend, receiver->td_maxwin); 693 receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
701 694
702 return res; 695 return res;
703} 696}
@@ -711,11 +704,9 @@ void nf_conntrack_tcp_update(struct sk_buff *skb,
711 int dir) 704 int dir)
712{ 705{
713 struct tcphdr *tcph = (void *)skb->data + dataoff; 706 struct tcphdr *tcph = (void *)skb->data + dataoff;
714 __u32 end;
715#ifdef DEBUGP_VARS
716 struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir]; 707 struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir];
717 struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[!dir]; 708 struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[!dir];
718#endif 709 __u32 end;
719 710
720 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph); 711 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
721 712
@@ -727,12 +718,12 @@ void nf_conntrack_tcp_update(struct sk_buff *skb,
727 conntrack->proto.tcp.seen[dir].td_end = end; 718 conntrack->proto.tcp.seen[dir].td_end = end;
728 conntrack->proto.tcp.last_end = end; 719 conntrack->proto.tcp.last_end = end;
729 write_unlock_bh(&tcp_lock); 720 write_unlock_bh(&tcp_lock);
730 DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " 721 pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
731 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 722 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
732 sender->td_end, sender->td_maxend, sender->td_maxwin, 723 sender->td_end, sender->td_maxend, sender->td_maxwin,
733 sender->td_scale, 724 sender->td_scale,
734 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 725 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
735 receiver->td_scale); 726 receiver->td_scale);
736} 727}
737EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update); 728EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
738#endif 729#endif
@@ -823,6 +814,7 @@ static int tcp_packet(struct nf_conn *conntrack,
823 int pf, 814 int pf,
824 unsigned int hooknum) 815 unsigned int hooknum)
825{ 816{
817 struct nf_conntrack_tuple *tuple;
826 enum tcp_conntrack new_state, old_state; 818 enum tcp_conntrack new_state, old_state;
827 enum ip_conntrack_dir dir; 819 enum ip_conntrack_dir dir;
828 struct tcphdr *th, _tcph; 820 struct tcphdr *th, _tcph;
@@ -837,6 +829,7 @@ static int tcp_packet(struct nf_conn *conntrack,
837 dir = CTINFO2DIR(ctinfo); 829 dir = CTINFO2DIR(ctinfo);
838 index = get_conntrack_index(th); 830 index = get_conntrack_index(th);
839 new_state = tcp_conntracks[dir][index][old_state]; 831 new_state = tcp_conntracks[dir][index][old_state];
832 tuple = &conntrack->tuplehash[dir].tuple;
840 833
841 switch (new_state) { 834 switch (new_state) {
842 case TCP_CONNTRACK_IGNORE: 835 case TCP_CONNTRACK_IGNORE:
@@ -880,9 +873,8 @@ static int tcp_packet(struct nf_conn *conntrack,
880 return NF_ACCEPT; 873 return NF_ACCEPT;
881 case TCP_CONNTRACK_MAX: 874 case TCP_CONNTRACK_MAX:
882 /* Invalid packet */ 875 /* Invalid packet */
883 DEBUGP("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n", 876 pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
884 dir, get_conntrack_index(th), 877 dir, get_conntrack_index(th), old_state);
885 old_state);
886 write_unlock_bh(&tcp_lock); 878 write_unlock_bh(&tcp_lock);
887 if (LOG_INVALID(IPPROTO_TCP)) 879 if (LOG_INVALID(IPPROTO_TCP))
888 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 880 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
@@ -933,7 +925,7 @@ static int tcp_packet(struct nf_conn *conntrack,
933 break; 925 break;
934 } 926 }
935 927
936 if (!tcp_in_window(&conntrack->proto.tcp, dir, index, 928 if (!tcp_in_window(conntrack, &conntrack->proto.tcp, dir, index,
937 skb, dataoff, th, pf)) { 929 skb, dataoff, th, pf)) {
938 write_unlock_bh(&tcp_lock); 930 write_unlock_bh(&tcp_lock);
939 return -NF_ACCEPT; 931 return -NF_ACCEPT;
@@ -942,13 +934,12 @@ static int tcp_packet(struct nf_conn *conntrack,
942 /* From now on we have got in-window packets */ 934 /* From now on we have got in-window packets */
943 conntrack->proto.tcp.last_index = index; 935 conntrack->proto.tcp.last_index = index;
944 936
945 DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " 937 pr_debug("tcp_conntracks: ");
946 "syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n", 938 NF_CT_DUMP_TUPLE(tuple);
947 NIPQUAD(iph->saddr), ntohs(th->source), 939 pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
948 NIPQUAD(iph->daddr), ntohs(th->dest), 940 (th->syn ? 1 : 0), (th->ack ? 1 : 0),
949 (th->syn ? 1 : 0), (th->ack ? 1 : 0), 941 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
950 (th->fin ? 1 : 0), (th->rst ? 1 : 0), 942 old_state, new_state);
951 old_state, new_state);
952 943
953 conntrack->proto.tcp.state = new_state; 944 conntrack->proto.tcp.state = new_state;
954 if (old_state != new_state 945 if (old_state != new_state
@@ -997,10 +988,8 @@ static int tcp_new(struct nf_conn *conntrack,
997{ 988{
998 enum tcp_conntrack new_state; 989 enum tcp_conntrack new_state;
999 struct tcphdr *th, _tcph; 990 struct tcphdr *th, _tcph;
1000#ifdef DEBUGP_VARS
1001 struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[0]; 991 struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[0];
1002 struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[1]; 992 struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[1];
1003#endif
1004 993
1005 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph); 994 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
1006 BUG_ON(th == NULL); 995 BUG_ON(th == NULL);
@@ -1012,7 +1001,7 @@ static int tcp_new(struct nf_conn *conntrack,
1012 1001
1013 /* Invalid: delete conntrack */ 1002 /* Invalid: delete conntrack */
1014 if (new_state >= TCP_CONNTRACK_MAX) { 1003 if (new_state >= TCP_CONNTRACK_MAX) {
1015 DEBUGP("nf_ct_tcp: invalid new deleting.\n"); 1004 pr_debug("nf_ct_tcp: invalid new deleting.\n");
1016 return 0; 1005 return 0;
1017 } 1006 }
1018 1007
@@ -1065,12 +1054,12 @@ static int tcp_new(struct nf_conn *conntrack,
1065 conntrack->proto.tcp.state = TCP_CONNTRACK_NONE; 1054 conntrack->proto.tcp.state = TCP_CONNTRACK_NONE;
1066 conntrack->proto.tcp.last_index = TCP_NONE_SET; 1055 conntrack->proto.tcp.last_index = TCP_NONE_SET;
1067 1056
1068 DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " 1057 pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
1069 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 1058 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
1070 sender->td_end, sender->td_maxend, sender->td_maxwin, 1059 sender->td_end, sender->td_maxend, sender->td_maxwin,
1071 sender->td_scale, 1060 sender->td_scale,
1072 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 1061 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
1073 receiver->td_scale); 1062 receiver->td_scale);
1074 return 1; 1063 return 1;
1075} 1064}
1076 1065
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index eb2d1dc46d45..355d371bac93 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -40,12 +40,6 @@ static u_int16_t ports[MAX_PORTS];
40static unsigned int ports_c; 40static unsigned int ports_c;
41module_param_array(ports, ushort, &ports_c, 0400); 41module_param_array(ports, ushort, &ports_c, 0400);
42 42
43#if 0
44#define DEBUGP printk
45#else
46#define DEBUGP(format, args...)
47#endif
48
49struct sane_request { 43struct sane_request {
50 __be32 RPC_code; 44 __be32 RPC_code;
51#define SANE_NET_START 7 /* RPC code */ 45#define SANE_NET_START 7 /* RPC code */
@@ -125,15 +119,15 @@ static int help(struct sk_buff **pskb,
125 ct_sane_info->state = SANE_STATE_NORMAL; 119 ct_sane_info->state = SANE_STATE_NORMAL;
126 120
127 if (datalen < sizeof(struct sane_reply_net_start)) { 121 if (datalen < sizeof(struct sane_reply_net_start)) {
128 DEBUGP("nf_ct_sane: NET_START reply too short\n"); 122 pr_debug("nf_ct_sane: NET_START reply too short\n");
129 goto out; 123 goto out;
130 } 124 }
131 125
132 reply = (struct sane_reply_net_start *)sb_ptr; 126 reply = (struct sane_reply_net_start *)sb_ptr;
133 if (reply->status != htonl(SANE_STATUS_SUCCESS)) { 127 if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
134 /* saned refused the command */ 128 /* saned refused the command */
135 DEBUGP("nf_ct_sane: unsuccessful SANE_STATUS = %u\n", 129 pr_debug("nf_ct_sane: unsuccessful SANE_STATUS = %u\n",
136 ntohl(reply->status)); 130 ntohl(reply->status));
137 goto out; 131 goto out;
138 } 132 }
139 133
@@ -141,35 +135,32 @@ static int help(struct sk_buff **pskb,
141 if (reply->zero != 0) 135 if (reply->zero != 0)
142 goto out; 136 goto out;
143 137
144 exp = nf_conntrack_expect_alloc(ct); 138 exp = nf_ct_expect_alloc(ct);
145 if (exp == NULL) { 139 if (exp == NULL) {
146 ret = NF_DROP; 140 ret = NF_DROP;
147 goto out; 141 goto out;
148 } 142 }
149 143
150 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 144 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
151 nf_conntrack_expect_init(exp, family, 145 nf_ct_expect_init(exp, family, &tuple->src.u3, &tuple->dst.u3,
152 &tuple->src.u3, &tuple->dst.u3, 146 IPPROTO_TCP, NULL, &reply->port);
153 IPPROTO_TCP,
154 NULL, &reply->port);
155 147
156 DEBUGP("nf_ct_sane: expect: "); 148 pr_debug("nf_ct_sane: expect: ");
157 NF_CT_DUMP_TUPLE(&exp->tuple); 149 NF_CT_DUMP_TUPLE(&exp->tuple);
158 NF_CT_DUMP_TUPLE(&exp->mask);
159 150
160 /* Can't expect this? Best to drop packet now. */ 151 /* Can't expect this? Best to drop packet now. */
161 if (nf_conntrack_expect_related(exp) != 0) 152 if (nf_ct_expect_related(exp) != 0)
162 ret = NF_DROP; 153 ret = NF_DROP;
163 154
164 nf_conntrack_expect_put(exp); 155 nf_ct_expect_put(exp);
165 156
166out: 157out:
167 spin_unlock_bh(&nf_sane_lock); 158 spin_unlock_bh(&nf_sane_lock);
168 return ret; 159 return ret;
169} 160}
170 161
171static struct nf_conntrack_helper sane[MAX_PORTS][2]; 162static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly;
172static char sane_names[MAX_PORTS][2][sizeof("sane-65535")]; 163static char sane_names[MAX_PORTS][2][sizeof("sane-65535")] __read_mostly;
173 164
174/* don't make this __exit, since it's called from __init ! */ 165/* don't make this __exit, since it's called from __init ! */
175static void nf_conntrack_sane_fini(void) 166static void nf_conntrack_sane_fini(void)
@@ -178,9 +169,9 @@ static void nf_conntrack_sane_fini(void)
178 169
179 for (i = 0; i < ports_c; i++) { 170 for (i = 0; i < ports_c; i++) {
180 for (j = 0; j < 2; j++) { 171 for (j = 0; j < 2; j++) {
181 DEBUGP("nf_ct_sane: unregistering helper for pf: %d " 172 pr_debug("nf_ct_sane: unregistering helper for pf: %d "
182 "port: %d\n", 173 "port: %d\n",
183 sane[i][j].tuple.src.l3num, ports[i]); 174 sane[i][j].tuple.src.l3num, ports[i]);
184 nf_conntrack_helper_unregister(&sane[i][j]); 175 nf_conntrack_helper_unregister(&sane[i][j]);
185 } 176 }
186 } 177 }
@@ -208,8 +199,6 @@ static int __init nf_conntrack_sane_init(void)
208 for (j = 0; j < 2; j++) { 199 for (j = 0; j < 2; j++) {
209 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]); 200 sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
210 sane[i][j].tuple.dst.protonum = IPPROTO_TCP; 201 sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
211 sane[i][j].mask.src.u.tcp.port = 0xFFFF;
212 sane[i][j].mask.dst.protonum = 0xFF;
213 sane[i][j].max_expected = 1; 202 sane[i][j].max_expected = 1;
214 sane[i][j].timeout = 5 * 60; /* 5 Minutes */ 203 sane[i][j].timeout = 5 * 60; /* 5 Minutes */
215 sane[i][j].me = THIS_MODULE; 204 sane[i][j].me = THIS_MODULE;
@@ -221,9 +210,9 @@ static int __init nf_conntrack_sane_init(void)
221 sprintf(tmpname, "sane-%d", ports[i]); 210 sprintf(tmpname, "sane-%d", ports[i]);
222 sane[i][j].name = tmpname; 211 sane[i][j].name = tmpname;
223 212
224 DEBUGP("nf_ct_sane: registering helper for pf: %d " 213 pr_debug("nf_ct_sane: registering helper for pf: %d "
225 "port: %d\n", 214 "port: %d\n",
226 sane[i][j].tuple.src.l3num, ports[i]); 215 sane[i][j].tuple.src.l3num, ports[i]);
227 ret = nf_conntrack_helper_register(&sane[i][j]); 216 ret = nf_conntrack_helper_register(&sane[i][j]);
228 if (ret) { 217 if (ret) {
229 printk(KERN_ERR "nf_ct_sane: failed to " 218 printk(KERN_ERR "nf_ct_sane: failed to "
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 1b5c6c1055f7..1276a442f10c 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -21,12 +21,6 @@
21#include <net/netfilter/nf_conntrack_helper.h> 21#include <net/netfilter/nf_conntrack_helper.h>
22#include <linux/netfilter/nf_conntrack_sip.h> 22#include <linux/netfilter/nf_conntrack_sip.h>
23 23
24#if 0
25#define DEBUGP printk
26#else
27#define DEBUGP(format, args...)
28#endif
29
30MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>"); 25MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
32MODULE_DESCRIPTION("SIP connection tracking helper"); 26MODULE_DESCRIPTION("SIP connection tracking helper");
@@ -285,7 +279,7 @@ static int epaddr_len(struct nf_conn *ct, const char *dptr,
285 const char *aux = dptr; 279 const char *aux = dptr;
286 280
287 if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { 281 if (!parse_addr(ct, dptr, &dptr, &addr, limit)) {
288 DEBUGP("ip: %s parse failed.!\n", dptr); 282 pr_debug("ip: %s parse failed.!\n", dptr);
289 return 0; 283 return 0;
290 } 284 }
291 285
@@ -344,8 +338,8 @@ int ct_sip_get_info(struct nf_conn *ct,
344 ct_sip_lnlen(dptr, limit), 338 ct_sip_lnlen(dptr, limit),
345 hnfo->case_sensitive); 339 hnfo->case_sensitive);
346 if (!aux) { 340 if (!aux) {
347 DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, 341 pr_debug("'%s' not found in '%s'.\n", hnfo->ln_str,
348 hnfo->lname); 342 hnfo->lname);
349 return -1; 343 return -1;
350 } 344 }
351 aux += hnfo->ln_strlen; 345 aux += hnfo->ln_strlen;
@@ -356,11 +350,11 @@ int ct_sip_get_info(struct nf_conn *ct,
356 350
357 *matchoff = (aux - k) + shift; 351 *matchoff = (aux - k) + shift;
358 352
359 DEBUGP("%s match succeeded! - len: %u\n", hnfo->lname, 353 pr_debug("%s match succeeded! - len: %u\n", hnfo->lname,
360 *matchlen); 354 *matchlen);
361 return 1; 355 return 1;
362 } 356 }
363 DEBUGP("%s header not found.\n", hnfo->lname); 357 pr_debug("%s header not found.\n", hnfo->lname);
364 return 0; 358 return 0;
365} 359}
366EXPORT_SYMBOL_GPL(ct_sip_get_info); 360EXPORT_SYMBOL_GPL(ct_sip_get_info);
@@ -378,23 +372,23 @@ static int set_expected_rtp(struct sk_buff **pskb,
378 int ret; 372 int ret;
379 typeof(nf_nat_sdp_hook) nf_nat_sdp; 373 typeof(nf_nat_sdp_hook) nf_nat_sdp;
380 374
381 exp = nf_conntrack_expect_alloc(ct); 375 exp = nf_ct_expect_alloc(ct);
382 if (exp == NULL) 376 if (exp == NULL)
383 return NF_DROP; 377 return NF_DROP;
384 nf_conntrack_expect_init(exp, family, 378 nf_ct_expect_init(exp, family,
385 &ct->tuplehash[!dir].tuple.src.u3, addr, 379 &ct->tuplehash[!dir].tuple.src.u3, addr,
386 IPPROTO_UDP, NULL, &port); 380 IPPROTO_UDP, NULL, &port);
387 381
388 nf_nat_sdp = rcu_dereference(nf_nat_sdp_hook); 382 nf_nat_sdp = rcu_dereference(nf_nat_sdp_hook);
389 if (nf_nat_sdp && ct->status & IPS_NAT_MASK) 383 if (nf_nat_sdp && ct->status & IPS_NAT_MASK)
390 ret = nf_nat_sdp(pskb, ctinfo, exp, dptr); 384 ret = nf_nat_sdp(pskb, ctinfo, exp, dptr);
391 else { 385 else {
392 if (nf_conntrack_expect_related(exp) != 0) 386 if (nf_ct_expect_related(exp) != 0)
393 ret = NF_DROP; 387 ret = NF_DROP;
394 else 388 else
395 ret = NF_ACCEPT; 389 ret = NF_ACCEPT;
396 } 390 }
397 nf_conntrack_expect_put(exp); 391 nf_ct_expect_put(exp);
398 392
399 return ret; 393 return ret;
400} 394}
@@ -424,7 +418,7 @@ static int sip_help(struct sk_buff **pskb,
424 if (!skb_is_nonlinear(*pskb)) 418 if (!skb_is_nonlinear(*pskb))
425 dptr = (*pskb)->data + dataoff; 419 dptr = (*pskb)->data + dataoff;
426 else { 420 else {
427 DEBUGP("Copy of skbuff not supported yet.\n"); 421 pr_debug("Copy of skbuff not supported yet.\n");
428 goto out; 422 goto out;
429 } 423 }
430 424
@@ -506,9 +500,6 @@ static int __init nf_conntrack_sip_init(void)
506 for (j = 0; j < 2; j++) { 500 for (j = 0; j < 2; j++) {
507 sip[i][j].tuple.dst.protonum = IPPROTO_UDP; 501 sip[i][j].tuple.dst.protonum = IPPROTO_UDP;
508 sip[i][j].tuple.src.u.udp.port = htons(ports[i]); 502 sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
509 sip[i][j].mask.src.l3num = 0xFFFF;
510 sip[i][j].mask.src.u.udp.port = htons(0xFFFF);
511 sip[i][j].mask.dst.protonum = 0xFF;
512 sip[i][j].max_expected = 2; 503 sip[i][j].max_expected = 2;
513 sip[i][j].timeout = 3 * 60; /* 3 minutes */ 504 sip[i][j].timeout = 3 * 60; /* 3 minutes */
514 sip[i][j].me = THIS_MODULE; 505 sip[i][j].me = THIS_MODULE;
@@ -521,7 +512,7 @@ static int __init nf_conntrack_sip_init(void)
521 sprintf(tmpname, "sip-%u", i); 512 sprintf(tmpname, "sip-%u", i);
522 sip[i][j].name = tmpname; 513 sip[i][j].name = tmpname;
523 514
524 DEBUGP("port #%u: %u\n", i, ports[i]); 515 pr_debug("port #%u: %u\n", i, ports[i]);
525 516
526 ret = nf_conntrack_helper_register(&sip[i][j]); 517 ret = nf_conntrack_helper_register(&sip[i][j]);
527 if (ret) { 518 if (ret) {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 45baeb0e30f9..ffb6ff8c3528 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -25,12 +25,6 @@
25#include <net/netfilter/nf_conntrack_expect.h> 25#include <net/netfilter/nf_conntrack_expect.h>
26#include <net/netfilter/nf_conntrack_helper.h> 26#include <net/netfilter/nf_conntrack_helper.h>
27 27
28#if 0
29#define DEBUGP printk
30#else
31#define DEBUGP(format, args...)
32#endif
33
34MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
35 29
36#ifdef CONFIG_PROC_FS 30#ifdef CONFIG_PROC_FS
@@ -60,35 +54,36 @@ struct ct_iter_state {
60 unsigned int bucket; 54 unsigned int bucket;
61}; 55};
62 56
63static struct list_head *ct_get_first(struct seq_file *seq) 57static struct hlist_node *ct_get_first(struct seq_file *seq)
64{ 58{
65 struct ct_iter_state *st = seq->private; 59 struct ct_iter_state *st = seq->private;
66 60
67 for (st->bucket = 0; 61 for (st->bucket = 0;
68 st->bucket < nf_conntrack_htable_size; 62 st->bucket < nf_conntrack_htable_size;
69 st->bucket++) { 63 st->bucket++) {
70 if (!list_empty(&nf_conntrack_hash[st->bucket])) 64 if (!hlist_empty(&nf_conntrack_hash[st->bucket]))
71 return nf_conntrack_hash[st->bucket].next; 65 return nf_conntrack_hash[st->bucket].first;
72 } 66 }
73 return NULL; 67 return NULL;
74} 68}
75 69
76static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head) 70static struct hlist_node *ct_get_next(struct seq_file *seq,
71 struct hlist_node *head)
77{ 72{
78 struct ct_iter_state *st = seq->private; 73 struct ct_iter_state *st = seq->private;
79 74
80 head = head->next; 75 head = head->next;
81 while (head == &nf_conntrack_hash[st->bucket]) { 76 while (head == NULL) {
82 if (++st->bucket >= nf_conntrack_htable_size) 77 if (++st->bucket >= nf_conntrack_htable_size)
83 return NULL; 78 return NULL;
84 head = nf_conntrack_hash[st->bucket].next; 79 head = nf_conntrack_hash[st->bucket].first;
85 } 80 }
86 return head; 81 return head;
87} 82}
88 83
89static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos) 84static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
90{ 85{
91 struct list_head *head = ct_get_first(seq); 86 struct hlist_node *head = ct_get_first(seq);
92 87
93 if (head) 88 if (head)
94 while (pos && (head = ct_get_next(seq, head))) 89 while (pos && (head = ct_get_next(seq, head)))
@@ -190,7 +185,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
190 return 0; 185 return 0;
191} 186}
192 187
193static struct seq_operations ct_seq_ops = { 188static const struct seq_operations ct_seq_ops = {
194 .start = ct_seq_start, 189 .start = ct_seq_start,
195 .next = ct_seq_next, 190 .next = ct_seq_next,
196 .stop = ct_seq_stop, 191 .stop = ct_seq_stop,
@@ -294,7 +289,7 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
294 return 0; 289 return 0;
295} 290}
296 291
297static struct seq_operations ct_cpu_seq_ops = { 292static const struct seq_operations ct_cpu_seq_ops = {
298 .start = ct_cpu_seq_start, 293 .start = ct_cpu_seq_start,
299 .next = ct_cpu_seq_next, 294 .next = ct_cpu_seq_next,
300 .stop = ct_cpu_seq_stop, 295 .stop = ct_cpu_seq_stop,
@@ -371,7 +366,14 @@ static ctl_table nf_ct_sysctl_table[] = {
371 .extra1 = &log_invalid_proto_min, 366 .extra1 = &log_invalid_proto_min,
372 .extra2 = &log_invalid_proto_max, 367 .extra2 = &log_invalid_proto_max,
373 }, 368 },
374 369 {
370 .ctl_name = CTL_UNNUMBERED,
371 .procname = "nf_conntrack_expect_max",
372 .data = &nf_ct_expect_max,
373 .maxlen = sizeof(int),
374 .mode = 0644,
375 .proc_handler = &proc_dointvec,
376 },
375 { .ctl_name = 0 } 377 { .ctl_name = 0 }
376}; 378};
377 379
@@ -410,7 +412,7 @@ EXPORT_SYMBOL_GPL(nf_ct_log_invalid);
410static int __init nf_conntrack_standalone_init(void) 412static int __init nf_conntrack_standalone_init(void)
411{ 413{
412#ifdef CONFIG_PROC_FS 414#ifdef CONFIG_PROC_FS
413 struct proc_dir_entry *proc, *proc_exp, *proc_stat; 415 struct proc_dir_entry *proc, *proc_stat;
414#endif 416#endif
415 int ret = 0; 417 int ret = 0;
416 418
@@ -422,13 +424,9 @@ static int __init nf_conntrack_standalone_init(void)
422 proc = proc_net_fops_create("nf_conntrack", 0440, &ct_file_ops); 424 proc = proc_net_fops_create("nf_conntrack", 0440, &ct_file_ops);
423 if (!proc) goto cleanup_init; 425 if (!proc) goto cleanup_init;
424 426
425 proc_exp = proc_net_fops_create("nf_conntrack_expect", 0440,
426 &exp_file_ops);
427 if (!proc_exp) goto cleanup_proc;
428
429 proc_stat = create_proc_entry("nf_conntrack", S_IRUGO, proc_net_stat); 427 proc_stat = create_proc_entry("nf_conntrack", S_IRUGO, proc_net_stat);
430 if (!proc_stat) 428 if (!proc_stat)
431 goto cleanup_proc_exp; 429 goto cleanup_proc;
432 430
433 proc_stat->proc_fops = &ct_cpu_seq_fops; 431 proc_stat->proc_fops = &ct_cpu_seq_fops;
434 proc_stat->owner = THIS_MODULE; 432 proc_stat->owner = THIS_MODULE;
@@ -448,8 +446,6 @@ static int __init nf_conntrack_standalone_init(void)
448#endif 446#endif
449#ifdef CONFIG_PROC_FS 447#ifdef CONFIG_PROC_FS
450 remove_proc_entry("nf_conntrack", proc_net_stat); 448 remove_proc_entry("nf_conntrack", proc_net_stat);
451 cleanup_proc_exp:
452 proc_net_remove("nf_conntrack_expect");
453 cleanup_proc: 449 cleanup_proc:
454 proc_net_remove("nf_conntrack"); 450 proc_net_remove("nf_conntrack");
455 cleanup_init: 451 cleanup_init:
@@ -465,7 +461,6 @@ static void __exit nf_conntrack_standalone_fini(void)
465#endif 461#endif
466#ifdef CONFIG_PROC_FS 462#ifdef CONFIG_PROC_FS
467 remove_proc_entry("nf_conntrack", proc_net_stat); 463 remove_proc_entry("nf_conntrack", proc_net_stat);
468 proc_net_remove("nf_conntrack_expect");
469 proc_net_remove("nf_conntrack"); 464 proc_net_remove("nf_conntrack");
470#endif /* CNFIG_PROC_FS */ 465#endif /* CNFIG_PROC_FS */
471 nf_conntrack_cleanup(); 466 nf_conntrack_cleanup();
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 37c4542e3112..cc19506cf2f8 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -29,13 +29,6 @@ static int ports_c;
29module_param_array(ports, ushort, &ports_c, 0400); 29module_param_array(ports, ushort, &ports_c, 0400);
30MODULE_PARM_DESC(ports, "Port numbers of TFTP servers"); 30MODULE_PARM_DESC(ports, "Port numbers of TFTP servers");
31 31
32#if 0
33#define DEBUGP(format, args...) printk("%s:%s:" format, \
34 __FILE__, __FUNCTION__ , ## args)
35#else
36#define DEBUGP(format, args...)
37#endif
38
39unsigned int (*nf_nat_tftp_hook)(struct sk_buff **pskb, 32unsigned int (*nf_nat_tftp_hook)(struct sk_buff **pskb,
40 enum ip_conntrack_info ctinfo, 33 enum ip_conntrack_info ctinfo,
41 struct nf_conntrack_expect *exp) __read_mostly; 34 struct nf_conntrack_expect *exp) __read_mostly;
@@ -62,39 +55,35 @@ static int tftp_help(struct sk_buff **pskb,
62 case TFTP_OPCODE_READ: 55 case TFTP_OPCODE_READ:
63 case TFTP_OPCODE_WRITE: 56 case TFTP_OPCODE_WRITE:
64 /* RRQ and WRQ works the same way */ 57 /* RRQ and WRQ works the same way */
65 DEBUGP("");
66 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 58 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
67 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 59 NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
68 60
69 exp = nf_conntrack_expect_alloc(ct); 61 exp = nf_ct_expect_alloc(ct);
70 if (exp == NULL) 62 if (exp == NULL)
71 return NF_DROP; 63 return NF_DROP;
72 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 64 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
73 nf_conntrack_expect_init(exp, family, 65 nf_ct_expect_init(exp, family, &tuple->src.u3, &tuple->dst.u3,
74 &tuple->src.u3, &tuple->dst.u3, 66 IPPROTO_UDP, NULL, &tuple->dst.u.udp.port);
75 IPPROTO_UDP,
76 NULL, &tuple->dst.u.udp.port);
77 67
78 DEBUGP("expect: "); 68 pr_debug("expect: ");
79 NF_CT_DUMP_TUPLE(&exp->tuple); 69 NF_CT_DUMP_TUPLE(&exp->tuple);
80 NF_CT_DUMP_TUPLE(&exp->mask);
81 70
82 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); 71 nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
83 if (nf_nat_tftp && ct->status & IPS_NAT_MASK) 72 if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
84 ret = nf_nat_tftp(pskb, ctinfo, exp); 73 ret = nf_nat_tftp(pskb, ctinfo, exp);
85 else if (nf_conntrack_expect_related(exp) != 0) 74 else if (nf_ct_expect_related(exp) != 0)
86 ret = NF_DROP; 75 ret = NF_DROP;
87 nf_conntrack_expect_put(exp); 76 nf_ct_expect_put(exp);
88 break; 77 break;
89 case TFTP_OPCODE_DATA: 78 case TFTP_OPCODE_DATA:
90 case TFTP_OPCODE_ACK: 79 case TFTP_OPCODE_ACK:
91 DEBUGP("Data/ACK opcode\n"); 80 pr_debug("Data/ACK opcode\n");
92 break; 81 break;
93 case TFTP_OPCODE_ERROR: 82 case TFTP_OPCODE_ERROR:
94 DEBUGP("Error opcode\n"); 83 pr_debug("Error opcode\n");
95 break; 84 break;
96 default: 85 default:
97 DEBUGP("Unknown opcode\n"); 86 pr_debug("Unknown opcode\n");
98 } 87 }
99 return ret; 88 return ret;
100} 89}
@@ -128,9 +117,6 @@ static int __init nf_conntrack_tftp_init(void)
128 for (j = 0; j < 2; j++) { 117 for (j = 0; j < 2; j++) {
129 tftp[i][j].tuple.dst.protonum = IPPROTO_UDP; 118 tftp[i][j].tuple.dst.protonum = IPPROTO_UDP;
130 tftp[i][j].tuple.src.u.udp.port = htons(ports[i]); 119 tftp[i][j].tuple.src.u.udp.port = htons(ports[i]);
131 tftp[i][j].mask.src.l3num = 0xFFFF;
132 tftp[i][j].mask.dst.protonum = 0xFF;
133 tftp[i][j].mask.src.u.udp.port = htons(0xFFFF);
134 tftp[i][j].max_expected = 1; 120 tftp[i][j].max_expected = 1;
135 tftp[i][j].timeout = 5 * 60; /* 5 minutes */ 121 tftp[i][j].timeout = 5 * 60; /* 5 minutes */
136 tftp[i][j].me = THIS_MODULE; 122 tftp[i][j].me = THIS_MODULE;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 91b220cf5a1f..94985792b79a 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -140,7 +140,7 @@ static int seq_show(struct seq_file *s, void *v)
140 return seq_printf(s, "%2lld %s\n", *pos, logger->name); 140 return seq_printf(s, "%2lld %s\n", *pos, logger->name);
141} 141}
142 142
143static struct seq_operations nflog_seq_ops = { 143static const struct seq_operations nflog_seq_ops = {
144 .start = seq_start, 144 .start = seq_start,
145 .next = seq_next, 145 .next = seq_next,
146 .stop = seq_stop, 146 .stop = seq_stop,
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index b1f2ace96f6d..a481a349f7bf 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -17,7 +17,7 @@
17 */ 17 */
18static struct nf_queue_handler *queue_handler[NPROTO]; 18static struct nf_queue_handler *queue_handler[NPROTO];
19 19
20static DEFINE_RWLOCK(queue_handler_lock); 20static DEFINE_MUTEX(queue_handler_mutex);
21 21
22/* return EBUSY when somebody else is registered, return EEXIST if the 22/* return EBUSY when somebody else is registered, return EEXIST if the
23 * same handler is registered, return 0 in case of success. */ 23 * same handler is registered, return 0 in case of success. */
@@ -28,30 +28,37 @@ int nf_register_queue_handler(int pf, struct nf_queue_handler *qh)
28 if (pf >= NPROTO) 28 if (pf >= NPROTO)
29 return -EINVAL; 29 return -EINVAL;
30 30
31 write_lock_bh(&queue_handler_lock); 31 mutex_lock(&queue_handler_mutex);
32 if (queue_handler[pf] == qh) 32 if (queue_handler[pf] == qh)
33 ret = -EEXIST; 33 ret = -EEXIST;
34 else if (queue_handler[pf]) 34 else if (queue_handler[pf])
35 ret = -EBUSY; 35 ret = -EBUSY;
36 else { 36 else {
37 queue_handler[pf] = qh; 37 rcu_assign_pointer(queue_handler[pf], qh);
38 ret = 0; 38 ret = 0;
39 } 39 }
40 write_unlock_bh(&queue_handler_lock); 40 mutex_unlock(&queue_handler_mutex);
41 41
42 return ret; 42 return ret;
43} 43}
44EXPORT_SYMBOL(nf_register_queue_handler); 44EXPORT_SYMBOL(nf_register_queue_handler);
45 45
46/* The caller must flush their queue before this */ 46/* The caller must flush their queue before this */
47int nf_unregister_queue_handler(int pf) 47int nf_unregister_queue_handler(int pf, struct nf_queue_handler *qh)
48{ 48{
49 if (pf >= NPROTO) 49 if (pf >= NPROTO)
50 return -EINVAL; 50 return -EINVAL;
51 51
52 write_lock_bh(&queue_handler_lock); 52 mutex_lock(&queue_handler_mutex);
53 queue_handler[pf] = NULL; 53 if (queue_handler[pf] != qh) {
54 write_unlock_bh(&queue_handler_lock); 54 mutex_unlock(&queue_handler_mutex);
55 return -EINVAL;
56 }
57
58 rcu_assign_pointer(queue_handler[pf], NULL);
59 mutex_unlock(&queue_handler_mutex);
60
61 synchronize_rcu();
55 62
56 return 0; 63 return 0;
57} 64}
@@ -61,12 +68,14 @@ void nf_unregister_queue_handlers(struct nf_queue_handler *qh)
61{ 68{
62 int pf; 69 int pf;
63 70
64 write_lock_bh(&queue_handler_lock); 71 mutex_lock(&queue_handler_mutex);
65 for (pf = 0; pf < NPROTO; pf++) { 72 for (pf = 0; pf < NPROTO; pf++) {
66 if (queue_handler[pf] == qh) 73 if (queue_handler[pf] == qh)
67 queue_handler[pf] = NULL; 74 rcu_assign_pointer(queue_handler[pf], NULL);
68 } 75 }
69 write_unlock_bh(&queue_handler_lock); 76 mutex_unlock(&queue_handler_mutex);
77
78 synchronize_rcu();
70} 79}
71EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); 80EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
72 81
@@ -89,18 +98,21 @@ static int __nf_queue(struct sk_buff *skb,
89 struct net_device *physoutdev = NULL; 98 struct net_device *physoutdev = NULL;
90#endif 99#endif
91 struct nf_afinfo *afinfo; 100 struct nf_afinfo *afinfo;
101 struct nf_queue_handler *qh;
92 102
93 /* QUEUE == DROP if noone is waiting, to be safe. */ 103 /* QUEUE == DROP if noone is waiting, to be safe. */
94 read_lock(&queue_handler_lock); 104 rcu_read_lock();
95 if (!queue_handler[pf]) { 105
96 read_unlock(&queue_handler_lock); 106 qh = rcu_dereference(queue_handler[pf]);
107 if (!qh) {
108 rcu_read_unlock();
97 kfree_skb(skb); 109 kfree_skb(skb);
98 return 1; 110 return 1;
99 } 111 }
100 112
101 afinfo = nf_get_afinfo(pf); 113 afinfo = nf_get_afinfo(pf);
102 if (!afinfo) { 114 if (!afinfo) {
103 read_unlock(&queue_handler_lock); 115 rcu_read_unlock();
104 kfree_skb(skb); 116 kfree_skb(skb);
105 return 1; 117 return 1;
106 } 118 }
@@ -110,7 +122,7 @@ static int __nf_queue(struct sk_buff *skb,
110 if (net_ratelimit()) 122 if (net_ratelimit())
111 printk(KERN_ERR "OOM queueing packet %p\n", 123 printk(KERN_ERR "OOM queueing packet %p\n",
112 skb); 124 skb);
113 read_unlock(&queue_handler_lock); 125 rcu_read_unlock();
114 kfree_skb(skb); 126 kfree_skb(skb);
115 return 1; 127 return 1;
116 } 128 }
@@ -120,7 +132,7 @@ static int __nf_queue(struct sk_buff *skb,
120 132
121 /* If it's going away, ignore hook. */ 133 /* If it's going away, ignore hook. */
122 if (!try_module_get(info->elem->owner)) { 134 if (!try_module_get(info->elem->owner)) {
123 read_unlock(&queue_handler_lock); 135 rcu_read_unlock();
124 kfree(info); 136 kfree(info);
125 return 0; 137 return 0;
126 } 138 }
@@ -138,10 +150,9 @@ static int __nf_queue(struct sk_buff *skb,
138 } 150 }
139#endif 151#endif
140 afinfo->saveroute(skb, info); 152 afinfo->saveroute(skb, info);
141 status = queue_handler[pf]->outfn(skb, info, queuenum, 153 status = qh->outfn(skb, info, queuenum, qh->data);
142 queue_handler[pf]->data);
143 154
144 read_unlock(&queue_handler_lock); 155 rcu_read_unlock();
145 156
146 if (status < 0) { 157 if (status < 0) {
147 /* James M doesn't say fuck enough. */ 158 /* James M doesn't say fuck enough. */
@@ -308,18 +319,18 @@ static int seq_show(struct seq_file *s, void *v)
308 loff_t *pos = v; 319 loff_t *pos = v;
309 struct nf_queue_handler *qh; 320 struct nf_queue_handler *qh;
310 321
311 read_lock_bh(&queue_handler_lock); 322 rcu_read_lock();
312 qh = queue_handler[*pos]; 323 qh = rcu_dereference(queue_handler[*pos]);
313 if (!qh) 324 if (!qh)
314 ret = seq_printf(s, "%2lld NONE\n", *pos); 325 ret = seq_printf(s, "%2lld NONE\n", *pos);
315 else 326 else
316 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name); 327 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
317 read_unlock_bh(&queue_handler_lock); 328 rcu_read_unlock();
318 329
319 return ret; 330 return ret;
320} 331}
321 332
322static struct seq_operations nfqueue_seq_ops = { 333static const struct seq_operations nfqueue_seq_ops = {
323 .start = seq_start, 334 .start = seq_start,
324 .next = seq_next, 335 .next = seq_next,
325 .stop = seq_stop, 336 .stop = seq_stop,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e32e30e7a17c..e185a5b55913 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -962,7 +962,7 @@ static int seq_show(struct seq_file *s, void *v)
962 inst->flushtimeout, atomic_read(&inst->use)); 962 inst->flushtimeout, atomic_read(&inst->use));
963} 963}
964 964
965static struct seq_operations nful_seq_ops = { 965static const struct seq_operations nful_seq_ops = {
966 .start = seq_start, 966 .start = seq_start,
967 .next = seq_next, 967 .next = seq_next,
968 .stop = seq_stop, 968 .stop = seq_stop,
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7a97bec67729..bb65a38c816c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -913,9 +913,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
913 case NFQNL_CFG_CMD_PF_UNBIND: 913 case NFQNL_CFG_CMD_PF_UNBIND:
914 QDEBUG("unregistering queue handler for pf=%u\n", 914 QDEBUG("unregistering queue handler for pf=%u\n",
915 ntohs(cmd->pf)); 915 ntohs(cmd->pf));
916 /* This is a bug and a feature. We can unregister 916 ret = nf_unregister_queue_handler(ntohs(cmd->pf), &nfqh);
917 * other handlers(!) */
918 ret = nf_unregister_queue_handler(ntohs(cmd->pf));
919 break; 917 break;
920 default: 918 default:
921 ret = -EINVAL; 919 ret = -EINVAL;
@@ -1050,7 +1048,7 @@ static int seq_show(struct seq_file *s, void *v)
1050 atomic_read(&inst->use)); 1048 atomic_read(&inst->use));
1051} 1049}
1052 1050
1053static struct seq_operations nfqnl_seq_ops = { 1051static const struct seq_operations nfqnl_seq_ops = {
1054 .start = seq_start, 1052 .start = seq_start,
1055 .next = seq_next, 1053 .next = seq_next,
1056 .stop = seq_stop, 1054 .stop = seq_stop,
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 0eb2504b89b5..cc2baa6d5a7a 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -320,8 +320,8 @@ int xt_check_match(const struct xt_match *match, unsigned short family,
320 return -EINVAL; 320 return -EINVAL;
321 } 321 }
322 if (match->hooks && (hook_mask & ~match->hooks) != 0) { 322 if (match->hooks && (hook_mask & ~match->hooks) != 0) {
323 printk("%s_tables: %s match: bad hook_mask %u\n", 323 printk("%s_tables: %s match: bad hook_mask %u/%u\n",
324 xt_prefix[family], match->name, hook_mask); 324 xt_prefix[family], match->name, hook_mask, match->hooks);
325 return -EINVAL; 325 return -EINVAL;
326 } 326 }
327 if (match->proto && (match->proto != proto || inv_proto)) { 327 if (match->proto && (match->proto != proto || inv_proto)) {
@@ -410,8 +410,9 @@ int xt_check_target(const struct xt_target *target, unsigned short family,
410 return -EINVAL; 410 return -EINVAL;
411 } 411 }
412 if (target->hooks && (hook_mask & ~target->hooks) != 0) { 412 if (target->hooks && (hook_mask & ~target->hooks) != 0) {
413 printk("%s_tables: %s target: bad hook_mask %u\n", 413 printk("%s_tables: %s target: bad hook_mask %u/%u\n",
414 xt_prefix[family], target->name, hook_mask); 414 xt_prefix[family], target->name, hook_mask,
415 target->hooks);
415 return -EINVAL; 416 return -EINVAL;
416 } 417 }
417 if (target->proto && (target->proto != proto || inv_proto)) { 418 if (target->proto && (target->proto != proto || inv_proto)) {
@@ -744,7 +745,7 @@ static int xt_name_seq_show(struct seq_file *seq, void *v)
744 return 0; 745 return 0;
745} 746}
746 747
747static struct seq_operations xt_tgt_seq_ops = { 748static const struct seq_operations xt_tgt_seq_ops = {
748 .start = xt_tgt_seq_start, 749 .start = xt_tgt_seq_start,
749 .next = xt_tgt_seq_next, 750 .next = xt_tgt_seq_next,
750 .stop = xt_tgt_seq_stop, 751 .stop = xt_tgt_seq_stop,
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index 30884833e665..519428566829 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -39,7 +39,7 @@ target(struct sk_buff **pskb,
39 return XT_CONTINUE; 39 return XT_CONTINUE;
40} 40}
41 41
42static struct xt_target xt_classify_target[] = { 42static struct xt_target xt_classify_target[] __read_mostly = {
43 { 43 {
44 .family = AF_INET, 44 .family = AF_INET,
45 .name = "CLASSIFY", 45 .name = "CLASSIFY",
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
index b03ce009d0bf..5a00c5444334 100644
--- a/net/netfilter/xt_CONNMARK.c
+++ b/net/netfilter/xt_CONNMARK.c
@@ -76,33 +76,33 @@ target(struct sk_buff **pskb,
76 return XT_CONTINUE; 76 return XT_CONTINUE;
77} 77}
78 78
79static int 79static bool
80checkentry(const char *tablename, 80checkentry(const char *tablename,
81 const void *entry, 81 const void *entry,
82 const struct xt_target *target, 82 const struct xt_target *target,
83 void *targinfo, 83 void *targinfo,
84 unsigned int hook_mask) 84 unsigned int hook_mask)
85{ 85{
86 struct xt_connmark_target_info *matchinfo = targinfo; 86 const struct xt_connmark_target_info *matchinfo = targinfo;
87 87
88 if (nf_ct_l3proto_try_module_get(target->family) < 0) { 88 if (nf_ct_l3proto_try_module_get(target->family) < 0) {
89 printk(KERN_WARNING "can't load conntrack support for " 89 printk(KERN_WARNING "can't load conntrack support for "
90 "proto=%d\n", target->family); 90 "proto=%d\n", target->family);
91 return 0; 91 return false;
92 } 92 }
93 if (matchinfo->mode == XT_CONNMARK_RESTORE) { 93 if (matchinfo->mode == XT_CONNMARK_RESTORE) {
94 if (strcmp(tablename, "mangle") != 0) { 94 if (strcmp(tablename, "mangle") != 0) {
95 printk(KERN_WARNING "CONNMARK: restore can only be " 95 printk(KERN_WARNING "CONNMARK: restore can only be "
96 "called from \"mangle\" table, not \"%s\"\n", 96 "called from \"mangle\" table, not \"%s\"\n",
97 tablename); 97 tablename);
98 return 0; 98 return false;
99 } 99 }
100 } 100 }
101 if (matchinfo->mark > 0xffffffff || matchinfo->mask > 0xffffffff) { 101 if (matchinfo->mark > 0xffffffff || matchinfo->mask > 0xffffffff) {
102 printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n"); 102 printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n");
103 return 0; 103 return false;
104 } 104 }
105 return 1; 105 return true;
106} 106}
107 107
108static void 108static void
@@ -121,7 +121,7 @@ struct compat_xt_connmark_target_info {
121 121
122static void compat_from_user(void *dst, void *src) 122static void compat_from_user(void *dst, void *src)
123{ 123{
124 struct compat_xt_connmark_target_info *cm = src; 124 const struct compat_xt_connmark_target_info *cm = src;
125 struct xt_connmark_target_info m = { 125 struct xt_connmark_target_info m = {
126 .mark = cm->mark, 126 .mark = cm->mark,
127 .mask = cm->mask, 127 .mask = cm->mask,
@@ -132,7 +132,7 @@ static void compat_from_user(void *dst, void *src)
132 132
133static int compat_to_user(void __user *dst, void *src) 133static int compat_to_user(void __user *dst, void *src)
134{ 134{
135 struct xt_connmark_target_info *m = src; 135 const struct xt_connmark_target_info *m = src;
136 struct compat_xt_connmark_target_info cm = { 136 struct compat_xt_connmark_target_info cm = {
137 .mark = m->mark, 137 .mark = m->mark,
138 .mask = m->mask, 138 .mask = m->mask,
@@ -142,7 +142,7 @@ static int compat_to_user(void __user *dst, void *src)
142} 142}
143#endif /* CONFIG_COMPAT */ 143#endif /* CONFIG_COMPAT */
144 144
145static struct xt_target xt_connmark_target[] = { 145static struct xt_target xt_connmark_target[] __read_mostly = {
146 { 146 {
147 .name = "CONNMARK", 147 .name = "CONNMARK",
148 .family = AF_INET, 148 .family = AF_INET,
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index 81c0c58bab47..63d73138c1b9 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -33,7 +33,7 @@ MODULE_ALIAS("ip6t_CONNSECMARK");
33 * If the packet has a security mark and the connection does not, copy 33 * If the packet has a security mark and the connection does not, copy
34 * the security mark from the packet to the connection. 34 * the security mark from the packet to the connection.
35 */ 35 */
36static void secmark_save(struct sk_buff *skb) 36static void secmark_save(const struct sk_buff *skb)
37{ 37{
38 if (skb->secmark) { 38 if (skb->secmark) {
39 struct nf_conn *ct; 39 struct nf_conn *ct;
@@ -85,16 +85,16 @@ static unsigned int target(struct sk_buff **pskb, const struct net_device *in,
85 return XT_CONTINUE; 85 return XT_CONTINUE;
86} 86}
87 87
88static int checkentry(const char *tablename, const void *entry, 88static bool checkentry(const char *tablename, const void *entry,
89 const struct xt_target *target, void *targinfo, 89 const struct xt_target *target, void *targinfo,
90 unsigned int hook_mask) 90 unsigned int hook_mask)
91{ 91{
92 struct xt_connsecmark_target_info *info = targinfo; 92 const struct xt_connsecmark_target_info *info = targinfo;
93 93
94 if (nf_ct_l3proto_try_module_get(target->family) < 0) { 94 if (nf_ct_l3proto_try_module_get(target->family) < 0) {
95 printk(KERN_WARNING "can't load conntrack support for " 95 printk(KERN_WARNING "can't load conntrack support for "
96 "proto=%d\n", target->family); 96 "proto=%d\n", target->family);
97 return 0; 97 return false;
98 } 98 }
99 switch (info->mode) { 99 switch (info->mode) {
100 case CONNSECMARK_SAVE: 100 case CONNSECMARK_SAVE:
@@ -103,10 +103,10 @@ static int checkentry(const char *tablename, const void *entry,
103 103
104 default: 104 default:
105 printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode); 105 printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode);
106 return 0; 106 return false;
107 } 107 }
108 108
109 return 1; 109 return true;
110} 110}
111 111
112static void 112static void
@@ -115,7 +115,7 @@ destroy(const struct xt_target *target, void *targinfo)
115 nf_ct_l3proto_module_put(target->family); 115 nf_ct_l3proto_module_put(target->family);
116} 116}
117 117
118static struct xt_target xt_connsecmark_target[] = { 118static struct xt_target xt_connsecmark_target[] __read_mostly = {
119 { 119 {
120 .name = "CONNSECMARK", 120 .name = "CONNSECMARK",
121 .family = AF_INET, 121 .family = AF_INET,
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index 9f2f2201f6ae..798ab731009d 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -66,22 +66,22 @@ static unsigned int target6(struct sk_buff **pskb,
66 return XT_CONTINUE; 66 return XT_CONTINUE;
67} 67}
68 68
69static int checkentry(const char *tablename, 69static bool checkentry(const char *tablename,
70 const void *e_void, 70 const void *e_void,
71 const struct xt_target *target, 71 const struct xt_target *target,
72 void *targinfo, 72 void *targinfo,
73 unsigned int hook_mask) 73 unsigned int hook_mask)
74{ 74{
75 const u_int8_t dscp = ((struct xt_DSCP_info *)targinfo)->dscp; 75 const u_int8_t dscp = ((struct xt_DSCP_info *)targinfo)->dscp;
76 76
77 if ((dscp > XT_DSCP_MAX)) { 77 if (dscp > XT_DSCP_MAX) {
78 printk(KERN_WARNING "DSCP: dscp %x out of range\n", dscp); 78 printk(KERN_WARNING "DSCP: dscp %x out of range\n", dscp);
79 return 0; 79 return false;
80 } 80 }
81 return 1; 81 return true;
82} 82}
83 83
84static struct xt_target xt_dscp_target[] = { 84static struct xt_target xt_dscp_target[] __read_mostly = {
85 { 85 {
86 .name = "DSCP", 86 .name = "DSCP",
87 .family = AF_INET, 87 .family = AF_INET,
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
index 43817808d865..f30fe0baf7de 100644
--- a/net/netfilter/xt_MARK.c
+++ b/net/netfilter/xt_MARK.c
@@ -65,43 +65,43 @@ target_v1(struct sk_buff **pskb,
65} 65}
66 66
67 67
68static int 68static bool
69checkentry_v0(const char *tablename, 69checkentry_v0(const char *tablename,
70 const void *entry, 70 const void *entry,
71 const struct xt_target *target, 71 const struct xt_target *target,
72 void *targinfo, 72 void *targinfo,
73 unsigned int hook_mask) 73 unsigned int hook_mask)
74{ 74{
75 struct xt_mark_target_info *markinfo = targinfo; 75 const struct xt_mark_target_info *markinfo = targinfo;
76 76
77 if (markinfo->mark > 0xffffffff) { 77 if (markinfo->mark > 0xffffffff) {
78 printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n"); 78 printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
79 return 0; 79 return false;
80 } 80 }
81 return 1; 81 return true;
82} 82}
83 83
84static int 84static bool
85checkentry_v1(const char *tablename, 85checkentry_v1(const char *tablename,
86 const void *entry, 86 const void *entry,
87 const struct xt_target *target, 87 const struct xt_target *target,
88 void *targinfo, 88 void *targinfo,
89 unsigned int hook_mask) 89 unsigned int hook_mask)
90{ 90{
91 struct xt_mark_target_info_v1 *markinfo = targinfo; 91 const struct xt_mark_target_info_v1 *markinfo = targinfo;
92 92
93 if (markinfo->mode != XT_MARK_SET 93 if (markinfo->mode != XT_MARK_SET
94 && markinfo->mode != XT_MARK_AND 94 && markinfo->mode != XT_MARK_AND
95 && markinfo->mode != XT_MARK_OR) { 95 && markinfo->mode != XT_MARK_OR) {
96 printk(KERN_WARNING "MARK: unknown mode %u\n", 96 printk(KERN_WARNING "MARK: unknown mode %u\n",
97 markinfo->mode); 97 markinfo->mode);
98 return 0; 98 return false;
99 } 99 }
100 if (markinfo->mark > 0xffffffff) { 100 if (markinfo->mark > 0xffffffff) {
101 printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n"); 101 printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
102 return 0; 102 return false;
103 } 103 }
104 return 1; 104 return true;
105} 105}
106 106
107#ifdef CONFIG_COMPAT 107#ifdef CONFIG_COMPAT
@@ -114,7 +114,7 @@ struct compat_xt_mark_target_info_v1 {
114 114
115static void compat_from_user_v1(void *dst, void *src) 115static void compat_from_user_v1(void *dst, void *src)
116{ 116{
117 struct compat_xt_mark_target_info_v1 *cm = src; 117 const struct compat_xt_mark_target_info_v1 *cm = src;
118 struct xt_mark_target_info_v1 m = { 118 struct xt_mark_target_info_v1 m = {
119 .mark = cm->mark, 119 .mark = cm->mark,
120 .mode = cm->mode, 120 .mode = cm->mode,
@@ -124,7 +124,7 @@ static void compat_from_user_v1(void *dst, void *src)
124 124
125static int compat_to_user_v1(void __user *dst, void *src) 125static int compat_to_user_v1(void __user *dst, void *src)
126{ 126{
127 struct xt_mark_target_info_v1 *m = src; 127 const struct xt_mark_target_info_v1 *m = src;
128 struct compat_xt_mark_target_info_v1 cm = { 128 struct compat_xt_mark_target_info_v1 cm = {
129 .mark = m->mark, 129 .mark = m->mark,
130 .mode = m->mode, 130 .mode = m->mode,
@@ -133,7 +133,7 @@ static int compat_to_user_v1(void __user *dst, void *src)
133} 133}
134#endif /* CONFIG_COMPAT */ 134#endif /* CONFIG_COMPAT */
135 135
136static struct xt_target xt_mark_target[] = { 136static struct xt_target xt_mark_target[] __read_mostly = {
137 { 137 {
138 .name = "MARK", 138 .name = "MARK",
139 .family = AF_INET, 139 .family = AF_INET,
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index 901ed7abaa1b..d3594c7ccb26 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -38,21 +38,21 @@ nflog_target(struct sk_buff **pskb,
38 return XT_CONTINUE; 38 return XT_CONTINUE;
39} 39}
40 40
41static int 41static bool
42nflog_checkentry(const char *tablename, const void *entry, 42nflog_checkentry(const char *tablename, const void *entry,
43 const struct xt_target *target, void *targetinfo, 43 const struct xt_target *target, void *targetinfo,
44 unsigned int hookmask) 44 unsigned int hookmask)
45{ 45{
46 struct xt_nflog_info *info = targetinfo; 46 const struct xt_nflog_info *info = targetinfo;
47 47
48 if (info->flags & ~XT_NFLOG_MASK) 48 if (info->flags & ~XT_NFLOG_MASK)
49 return 0; 49 return false;
50 if (info->prefix[sizeof(info->prefix) - 1] != '\0') 50 if (info->prefix[sizeof(info->prefix) - 1] != '\0')
51 return 0; 51 return false;
52 return 1; 52 return true;
53} 53}
54 54
55static struct xt_target xt_nflog_target[] = { 55static struct xt_target xt_nflog_target[] __read_mostly = {
56 { 56 {
57 .name = "NFLOG", 57 .name = "NFLOG",
58 .family = AF_INET, 58 .family = AF_INET,
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 201155b316e0..13f59f3e8c38 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -36,7 +36,7 @@ target(struct sk_buff **pskb,
36 return NF_QUEUE_NR(tinfo->queuenum); 36 return NF_QUEUE_NR(tinfo->queuenum);
37} 37}
38 38
39static struct xt_target xt_nfqueue_target[] = { 39static struct xt_target xt_nfqueue_target[] __read_mostly = {
40 { 40 {
41 .name = "NFQUEUE", 41 .name = "NFQUEUE",
42 .family = AF_INET, 42 .family = AF_INET,
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
index 5085fb3d1e2d..b7d6312fccc7 100644
--- a/net/netfilter/xt_NOTRACK.c
+++ b/net/netfilter/xt_NOTRACK.c
@@ -33,7 +33,7 @@ target(struct sk_buff **pskb,
33 return XT_CONTINUE; 33 return XT_CONTINUE;
34} 34}
35 35
36static struct xt_target xt_notrack_target[] = { 36static struct xt_target xt_notrack_target[] __read_mostly = {
37 { 37 {
38 .name = "NOTRACK", 38 .name = "NOTRACK",
39 .family = AF_INET, 39 .family = AF_INET,
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 705f0e830a79..c83779a941a1 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -51,7 +51,7 @@ static unsigned int target(struct sk_buff **pskb, const struct net_device *in,
51 return XT_CONTINUE; 51 return XT_CONTINUE;
52} 52}
53 53
54static int checkentry_selinux(struct xt_secmark_target_info *info) 54static bool checkentry_selinux(struct xt_secmark_target_info *info)
55{ 55{
56 int err; 56 int err;
57 struct xt_secmark_target_selinux_info *sel = &info->u.sel; 57 struct xt_secmark_target_selinux_info *sel = &info->u.sel;
@@ -63,53 +63,53 @@ static int checkentry_selinux(struct xt_secmark_target_info *info)
63 if (err == -EINVAL) 63 if (err == -EINVAL)
64 printk(KERN_INFO PFX "invalid SELinux context \'%s\'\n", 64 printk(KERN_INFO PFX "invalid SELinux context \'%s\'\n",
65 sel->selctx); 65 sel->selctx);
66 return 0; 66 return false;
67 } 67 }
68 68
69 if (!sel->selsid) { 69 if (!sel->selsid) {
70 printk(KERN_INFO PFX "unable to map SELinux context \'%s\'\n", 70 printk(KERN_INFO PFX "unable to map SELinux context \'%s\'\n",
71 sel->selctx); 71 sel->selctx);
72 return 0; 72 return false;
73 } 73 }
74 74
75 err = selinux_relabel_packet_permission(sel->selsid); 75 err = selinux_relabel_packet_permission(sel->selsid);
76 if (err) { 76 if (err) {
77 printk(KERN_INFO PFX "unable to obtain relabeling permission\n"); 77 printk(KERN_INFO PFX "unable to obtain relabeling permission\n");
78 return 0; 78 return false;
79 } 79 }
80 80
81 return 1; 81 return true;
82} 82}
83 83
84static int checkentry(const char *tablename, const void *entry, 84static bool checkentry(const char *tablename, const void *entry,
85 const struct xt_target *target, void *targinfo, 85 const struct xt_target *target, void *targinfo,
86 unsigned int hook_mask) 86 unsigned int hook_mask)
87{ 87{
88 struct xt_secmark_target_info *info = targinfo; 88 struct xt_secmark_target_info *info = targinfo;
89 89
90 if (mode && mode != info->mode) { 90 if (mode && mode != info->mode) {
91 printk(KERN_INFO PFX "mode already set to %hu cannot mix with " 91 printk(KERN_INFO PFX "mode already set to %hu cannot mix with "
92 "rules for mode %hu\n", mode, info->mode); 92 "rules for mode %hu\n", mode, info->mode);
93 return 0; 93 return false;
94 } 94 }
95 95
96 switch (info->mode) { 96 switch (info->mode) {
97 case SECMARK_MODE_SEL: 97 case SECMARK_MODE_SEL:
98 if (!checkentry_selinux(info)) 98 if (!checkentry_selinux(info))
99 return 0; 99 return false;
100 break; 100 break;
101 101
102 default: 102 default:
103 printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode); 103 printk(KERN_INFO PFX "invalid mode: %hu\n", info->mode);
104 return 0; 104 return false;
105 } 105 }
106 106
107 if (!mode) 107 if (!mode)
108 mode = info->mode; 108 mode = info->mode;
109 return 1; 109 return true;
110} 110}
111 111
112static struct xt_target xt_secmark_target[] = { 112static struct xt_target xt_secmark_target[] __read_mostly = {
113 { 113 {
114 .name = "SECMARK", 114 .name = "SECMARK",
115 .family = AF_INET, 115 .family = AF_INET,
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 15fe8f649510..d40f7e4b1289 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -93,7 +93,7 @@ tcpmss_mangle_packet(struct sk_buff **pskb,
93 return 0; 93 return 0;
94 94
95 opt[i+2] = (newmss & 0xff00) >> 8; 95 opt[i+2] = (newmss & 0xff00) >> 8;
96 opt[i+3] = (newmss & 0x00ff); 96 opt[i+3] = newmss & 0x00ff;
97 97
98 nf_proto_csum_replace2(&tcph->check, *pskb, 98 nf_proto_csum_replace2(&tcph->check, *pskb,
99 htons(oldmss), htons(newmss), 0); 99 htons(oldmss), htons(newmss), 0);
@@ -126,7 +126,7 @@ tcpmss_mangle_packet(struct sk_buff **pskb,
126 opt[0] = TCPOPT_MSS; 126 opt[0] = TCPOPT_MSS;
127 opt[1] = TCPOLEN_MSS; 127 opt[1] = TCPOLEN_MSS;
128 opt[2] = (newmss & 0xff00) >> 8; 128 opt[2] = (newmss & 0xff00) >> 8;
129 opt[3] = (newmss & 0x00ff); 129 opt[3] = newmss & 0x00ff;
130 130
131 nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0); 131 nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0);
132 132
@@ -197,19 +197,19 @@ xt_tcpmss_target6(struct sk_buff **pskb,
197#define TH_SYN 0x02 197#define TH_SYN 0x02
198 198
199/* Must specify -p tcp --syn */ 199/* Must specify -p tcp --syn */
200static inline int find_syn_match(const struct xt_entry_match *m) 200static inline bool find_syn_match(const struct xt_entry_match *m)
201{ 201{
202 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data; 202 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
203 203
204 if (strcmp(m->u.kernel.match->name, "tcp") == 0 && 204 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
205 tcpinfo->flg_cmp & TH_SYN && 205 tcpinfo->flg_cmp & TH_SYN &&
206 !(tcpinfo->invflags & XT_TCP_INV_FLAGS)) 206 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
207 return 1; 207 return true;
208 208
209 return 0; 209 return false;
210} 210}
211 211
212static int 212static bool
213xt_tcpmss_checkentry4(const char *tablename, 213xt_tcpmss_checkentry4(const char *tablename,
214 const void *entry, 214 const void *entry,
215 const struct xt_target *target, 215 const struct xt_target *target,
@@ -225,16 +225,16 @@ xt_tcpmss_checkentry4(const char *tablename,
225 (1 << NF_IP_POST_ROUTING))) != 0) { 225 (1 << NF_IP_POST_ROUTING))) != 0) {
226 printk("xt_TCPMSS: path-MTU clamping only supported in " 226 printk("xt_TCPMSS: path-MTU clamping only supported in "
227 "FORWARD, OUTPUT and POSTROUTING hooks\n"); 227 "FORWARD, OUTPUT and POSTROUTING hooks\n");
228 return 0; 228 return false;
229 } 229 }
230 if (IPT_MATCH_ITERATE(e, find_syn_match)) 230 if (IPT_MATCH_ITERATE(e, find_syn_match))
231 return 1; 231 return true;
232 printk("xt_TCPMSS: Only works on TCP SYN packets\n"); 232 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
233 return 0; 233 return false;
234} 234}
235 235
236#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) 236#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
237static int 237static bool
238xt_tcpmss_checkentry6(const char *tablename, 238xt_tcpmss_checkentry6(const char *tablename,
239 const void *entry, 239 const void *entry,
240 const struct xt_target *target, 240 const struct xt_target *target,
@@ -250,16 +250,16 @@ xt_tcpmss_checkentry6(const char *tablename,
250 (1 << NF_IP6_POST_ROUTING))) != 0) { 250 (1 << NF_IP6_POST_ROUTING))) != 0) {
251 printk("xt_TCPMSS: path-MTU clamping only supported in " 251 printk("xt_TCPMSS: path-MTU clamping only supported in "
252 "FORWARD, OUTPUT and POSTROUTING hooks\n"); 252 "FORWARD, OUTPUT and POSTROUTING hooks\n");
253 return 0; 253 return false;
254 } 254 }
255 if (IP6T_MATCH_ITERATE(e, find_syn_match)) 255 if (IP6T_MATCH_ITERATE(e, find_syn_match))
256 return 1; 256 return true;
257 printk("xt_TCPMSS: Only works on TCP SYN packets\n"); 257 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
258 return 0; 258 return false;
259} 259}
260#endif 260#endif
261 261
262static struct xt_target xt_tcpmss_reg[] = { 262static struct xt_target xt_tcpmss_reg[] __read_mostly = {
263 { 263 {
264 .family = AF_INET, 264 .family = AF_INET,
265 .name = "TCPMSS", 265 .name = "TCPMSS",
diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c
new file mode 100644
index 000000000000..4df2dedcc0b5
--- /dev/null
+++ b/net/netfilter/xt_TRACE.c
@@ -0,0 +1,53 @@
1/* This is a module which is used to mark packets for tracing.
2 */
3#include <linux/module.h>
4#include <linux/skbuff.h>
5
6#include <linux/netfilter/x_tables.h>
7
8MODULE_LICENSE("GPL");
9MODULE_ALIAS("ipt_TRACE");
10MODULE_ALIAS("ip6t_TRACE");
11
12static unsigned int
13target(struct sk_buff **pskb,
14 const struct net_device *in,
15 const struct net_device *out,
16 unsigned int hooknum,
17 const struct xt_target *target,
18 const void *targinfo)
19{
20 (*pskb)->nf_trace = 1;
21 return XT_CONTINUE;
22}
23
24static struct xt_target xt_trace_target[] __read_mostly = {
25 {
26 .name = "TRACE",
27 .family = AF_INET,
28 .target = target,
29 .table = "raw",
30 .me = THIS_MODULE,
31 },
32 {
33 .name = "TRACE",
34 .family = AF_INET6,
35 .target = target,
36 .table = "raw",
37 .me = THIS_MODULE,
38 },
39};
40
41static int __init xt_trace_init(void)
42{
43 return xt_register_targets(xt_trace_target,
44 ARRAY_SIZE(xt_trace_target));
45}
46
47static void __exit xt_trace_fini(void)
48{
49 xt_unregister_targets(xt_trace_target, ARRAY_SIZE(xt_trace_target));
50}
51
52module_init(xt_trace_init);
53module_exit(xt_trace_fini);
diff --git a/net/netfilter/xt_comment.c b/net/netfilter/xt_comment.c
index 7db492d65220..64bcdb0fe1e6 100644
--- a/net/netfilter/xt_comment.c
+++ b/net/netfilter/xt_comment.c
@@ -15,7 +15,7 @@ MODULE_LICENSE("GPL");
15MODULE_ALIAS("ipt_comment"); 15MODULE_ALIAS("ipt_comment");
16MODULE_ALIAS("ip6t_comment"); 16MODULE_ALIAS("ip6t_comment");
17 17
18static int 18static bool
19match(const struct sk_buff *skb, 19match(const struct sk_buff *skb,
20 const struct net_device *in, 20 const struct net_device *in,
21 const struct net_device *out, 21 const struct net_device *out,
@@ -23,13 +23,13 @@ match(const struct sk_buff *skb,
23 const void *matchinfo, 23 const void *matchinfo,
24 int offset, 24 int offset,
25 unsigned int protooff, 25 unsigned int protooff,
26 int *hotdrop) 26 bool *hotdrop)
27{ 27{
28 /* We always match */ 28 /* We always match */
29 return 1; 29 return true;
30} 30}
31 31
32static struct xt_match xt_comment_match[] = { 32static struct xt_match xt_comment_match[] __read_mostly = {
33 { 33 {
34 .name = "comment", 34 .name = "comment",
35 .family = AF_INET, 35 .family = AF_INET,
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 804afe55e141..dd4d79b8fc9d 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -15,7 +15,7 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
15MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection"); 15MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection");
16MODULE_ALIAS("ipt_connbytes"); 16MODULE_ALIAS("ipt_connbytes");
17 17
18static int 18static bool
19match(const struct sk_buff *skb, 19match(const struct sk_buff *skb,
20 const struct net_device *in, 20 const struct net_device *in,
21 const struct net_device *out, 21 const struct net_device *out,
@@ -23,10 +23,10 @@ match(const struct sk_buff *skb,
23 const void *matchinfo, 23 const void *matchinfo,
24 int offset, 24 int offset,
25 unsigned int protoff, 25 unsigned int protoff,
26 int *hotdrop) 26 bool *hotdrop)
27{ 27{
28 const struct xt_connbytes_info *sinfo = matchinfo; 28 const struct xt_connbytes_info *sinfo = matchinfo;
29 struct nf_conn *ct; 29 const struct nf_conn *ct;
30 enum ip_conntrack_info ctinfo; 30 enum ip_conntrack_info ctinfo;
31 u_int64_t what = 0; /* initialize to make gcc happy */ 31 u_int64_t what = 0; /* initialize to make gcc happy */
32 u_int64_t bytes = 0; 32 u_int64_t bytes = 0;
@@ -35,7 +35,7 @@ match(const struct sk_buff *skb,
35 35
36 ct = nf_ct_get(skb, &ctinfo); 36 ct = nf_ct_get(skb, &ctinfo);
37 if (!ct) 37 if (!ct)
38 return 0; 38 return false;
39 counters = ct->counters; 39 counters = ct->counters;
40 40
41 switch (sinfo->what) { 41 switch (sinfo->what) {
@@ -90,36 +90,36 @@ match(const struct sk_buff *skb,
90 } 90 }
91 91
92 if (sinfo->count.to) 92 if (sinfo->count.to)
93 return (what <= sinfo->count.to && what >= sinfo->count.from); 93 return what <= sinfo->count.to && what >= sinfo->count.from;
94 else 94 else
95 return (what >= sinfo->count.from); 95 return what >= sinfo->count.from;
96} 96}
97 97
98static int check(const char *tablename, 98static bool check(const char *tablename,
99 const void *ip, 99 const void *ip,
100 const struct xt_match *match, 100 const struct xt_match *match,
101 void *matchinfo, 101 void *matchinfo,
102 unsigned int hook_mask) 102 unsigned int hook_mask)
103{ 103{
104 const struct xt_connbytes_info *sinfo = matchinfo; 104 const struct xt_connbytes_info *sinfo = matchinfo;
105 105
106 if (sinfo->what != XT_CONNBYTES_PKTS && 106 if (sinfo->what != XT_CONNBYTES_PKTS &&
107 sinfo->what != XT_CONNBYTES_BYTES && 107 sinfo->what != XT_CONNBYTES_BYTES &&
108 sinfo->what != XT_CONNBYTES_AVGPKT) 108 sinfo->what != XT_CONNBYTES_AVGPKT)
109 return 0; 109 return false;
110 110
111 if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL && 111 if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL &&
112 sinfo->direction != XT_CONNBYTES_DIR_REPLY && 112 sinfo->direction != XT_CONNBYTES_DIR_REPLY &&
113 sinfo->direction != XT_CONNBYTES_DIR_BOTH) 113 sinfo->direction != XT_CONNBYTES_DIR_BOTH)
114 return 0; 114 return false;
115 115
116 if (nf_ct_l3proto_try_module_get(match->family) < 0) { 116 if (nf_ct_l3proto_try_module_get(match->family) < 0) {
117 printk(KERN_WARNING "can't load conntrack support for " 117 printk(KERN_WARNING "can't load conntrack support for "
118 "proto=%d\n", match->family); 118 "proto=%d\n", match->family);
119 return 0; 119 return false;
120 } 120 }
121 121
122 return 1; 122 return true;
123} 123}
124 124
125static void 125static void
@@ -128,7 +128,7 @@ destroy(const struct xt_match *match, void *matchinfo)
128 nf_ct_l3proto_module_put(match->family); 128 nf_ct_l3proto_module_put(match->family);
129} 129}
130 130
131static struct xt_match xt_connbytes_match[] = { 131static struct xt_match xt_connbytes_match[] __read_mostly = {
132 { 132 {
133 .name = "connbytes", 133 .name = "connbytes",
134 .family = AF_INET, 134 .family = AF_INET,
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index e1803256c792..e73fa9b46cf7 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -30,7 +30,7 @@ MODULE_DESCRIPTION("IP tables connmark match module");
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31MODULE_ALIAS("ipt_connmark"); 31MODULE_ALIAS("ipt_connmark");
32 32
33static int 33static bool
34match(const struct sk_buff *skb, 34match(const struct sk_buff *skb,
35 const struct net_device *in, 35 const struct net_device *in,
36 const struct net_device *out, 36 const struct net_device *out,
@@ -38,38 +38,38 @@ match(const struct sk_buff *skb,
38 const void *matchinfo, 38 const void *matchinfo,
39 int offset, 39 int offset,
40 unsigned int protoff, 40 unsigned int protoff,
41 int *hotdrop) 41 bool *hotdrop)
42{ 42{
43 const struct xt_connmark_info *info = matchinfo; 43 const struct xt_connmark_info *info = matchinfo;
44 struct nf_conn *ct; 44 const struct nf_conn *ct;
45 enum ip_conntrack_info ctinfo; 45 enum ip_conntrack_info ctinfo;
46 46
47 ct = nf_ct_get(skb, &ctinfo); 47 ct = nf_ct_get(skb, &ctinfo);
48 if (!ct) 48 if (!ct)
49 return 0; 49 return false;
50 50
51 return (((ct->mark) & info->mask) == info->mark) ^ info->invert; 51 return ((ct->mark & info->mask) == info->mark) ^ info->invert;
52} 52}
53 53
54static int 54static bool
55checkentry(const char *tablename, 55checkentry(const char *tablename,
56 const void *ip, 56 const void *ip,
57 const struct xt_match *match, 57 const struct xt_match *match,
58 void *matchinfo, 58 void *matchinfo,
59 unsigned int hook_mask) 59 unsigned int hook_mask)
60{ 60{
61 struct xt_connmark_info *cm = matchinfo; 61 const struct xt_connmark_info *cm = matchinfo;
62 62
63 if (cm->mark > 0xffffffff || cm->mask > 0xffffffff) { 63 if (cm->mark > 0xffffffff || cm->mask > 0xffffffff) {
64 printk(KERN_WARNING "connmark: only support 32bit mark\n"); 64 printk(KERN_WARNING "connmark: only support 32bit mark\n");
65 return 0; 65 return false;
66 } 66 }
67 if (nf_ct_l3proto_try_module_get(match->family) < 0) { 67 if (nf_ct_l3proto_try_module_get(match->family) < 0) {
68 printk(KERN_WARNING "can't load conntrack support for " 68 printk(KERN_WARNING "can't load conntrack support for "
69 "proto=%d\n", match->family); 69 "proto=%d\n", match->family);
70 return 0; 70 return false;
71 } 71 }
72 return 1; 72 return true;
73} 73}
74 74
75static void 75static void
@@ -88,7 +88,7 @@ struct compat_xt_connmark_info {
88 88
89static void compat_from_user(void *dst, void *src) 89static void compat_from_user(void *dst, void *src)
90{ 90{
91 struct compat_xt_connmark_info *cm = src; 91 const struct compat_xt_connmark_info *cm = src;
92 struct xt_connmark_info m = { 92 struct xt_connmark_info m = {
93 .mark = cm->mark, 93 .mark = cm->mark,
94 .mask = cm->mask, 94 .mask = cm->mask,
@@ -99,7 +99,7 @@ static void compat_from_user(void *dst, void *src)
99 99
100static int compat_to_user(void __user *dst, void *src) 100static int compat_to_user(void __user *dst, void *src)
101{ 101{
102 struct xt_connmark_info *m = src; 102 const struct xt_connmark_info *m = src;
103 struct compat_xt_connmark_info cm = { 103 struct compat_xt_connmark_info cm = {
104 .mark = m->mark, 104 .mark = m->mark,
105 .mask = m->mask, 105 .mask = m->mask,
@@ -109,7 +109,7 @@ static int compat_to_user(void __user *dst, void *src)
109} 109}
110#endif /* CONFIG_COMPAT */ 110#endif /* CONFIG_COMPAT */
111 111
112static struct xt_match xt_connmark_match[] = { 112static struct xt_match xt_connmark_match[] __read_mostly = {
113 { 113 {
114 .name = "connmark", 114 .name = "connmark",
115 .family = AF_INET, 115 .family = AF_INET,
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 189ded5f378b..ca4b69f020a8 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -19,7 +19,7 @@ MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
19MODULE_DESCRIPTION("iptables connection tracking match module"); 19MODULE_DESCRIPTION("iptables connection tracking match module");
20MODULE_ALIAS("ipt_conntrack"); 20MODULE_ALIAS("ipt_conntrack");
21 21
22static int 22static bool
23match(const struct sk_buff *skb, 23match(const struct sk_buff *skb,
24 const struct net_device *in, 24 const struct net_device *in,
25 const struct net_device *out, 25 const struct net_device *out,
@@ -27,14 +27,14 @@ match(const struct sk_buff *skb,
27 const void *matchinfo, 27 const void *matchinfo,
28 int offset, 28 int offset,
29 unsigned int protoff, 29 unsigned int protoff,
30 int *hotdrop) 30 bool *hotdrop)
31{ 31{
32 const struct xt_conntrack_info *sinfo = matchinfo; 32 const struct xt_conntrack_info *sinfo = matchinfo;
33 struct nf_conn *ct; 33 const struct nf_conn *ct;
34 enum ip_conntrack_info ctinfo; 34 enum ip_conntrack_info ctinfo;
35 unsigned int statebit; 35 unsigned int statebit;
36 36
37 ct = nf_ct_get((struct sk_buff *)skb, &ctinfo); 37 ct = nf_ct_get(skb, &ctinfo);
38 38
39#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg)) 39#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
40 40
@@ -54,53 +54,53 @@ match(const struct sk_buff *skb,
54 } 54 }
55 if (FWINV((statebit & sinfo->statemask) == 0, 55 if (FWINV((statebit & sinfo->statemask) == 0,
56 XT_CONNTRACK_STATE)) 56 XT_CONNTRACK_STATE))
57 return 0; 57 return false;
58 } 58 }
59 59
60 if (ct == NULL) { 60 if (ct == NULL) {
61 if (sinfo->flags & ~XT_CONNTRACK_STATE) 61 if (sinfo->flags & ~XT_CONNTRACK_STATE)
62 return 0; 62 return false;
63 return 1; 63 return true;
64 } 64 }
65 65
66 if (sinfo->flags & XT_CONNTRACK_PROTO && 66 if (sinfo->flags & XT_CONNTRACK_PROTO &&
67 FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != 67 FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum !=
68 sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, 68 sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum,
69 XT_CONNTRACK_PROTO)) 69 XT_CONNTRACK_PROTO))
70 return 0; 70 return false;
71 71
72 if (sinfo->flags & XT_CONNTRACK_ORIGSRC && 72 if (sinfo->flags & XT_CONNTRACK_ORIGSRC &&
73 FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip & 73 FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip &
74 sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != 74 sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
75 sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, 75 sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip,
76 XT_CONNTRACK_ORIGSRC)) 76 XT_CONNTRACK_ORIGSRC))
77 return 0; 77 return false;
78 78
79 if (sinfo->flags & XT_CONNTRACK_ORIGDST && 79 if (sinfo->flags & XT_CONNTRACK_ORIGDST &&
80 FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip & 80 FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip &
81 sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != 81 sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
82 sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, 82 sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip,
83 XT_CONNTRACK_ORIGDST)) 83 XT_CONNTRACK_ORIGDST))
84 return 0; 84 return false;
85 85
86 if (sinfo->flags & XT_CONNTRACK_REPLSRC && 86 if (sinfo->flags & XT_CONNTRACK_REPLSRC &&
87 FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip & 87 FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip &
88 sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != 88 sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) !=
89 sinfo->tuple[IP_CT_DIR_REPLY].src.ip, 89 sinfo->tuple[IP_CT_DIR_REPLY].src.ip,
90 XT_CONNTRACK_REPLSRC)) 90 XT_CONNTRACK_REPLSRC))
91 return 0; 91 return false;
92 92
93 if (sinfo->flags & XT_CONNTRACK_REPLDST && 93 if (sinfo->flags & XT_CONNTRACK_REPLDST &&
94 FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip & 94 FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip &
95 sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != 95 sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) !=
96 sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, 96 sinfo->tuple[IP_CT_DIR_REPLY].dst.ip,
97 XT_CONNTRACK_REPLDST)) 97 XT_CONNTRACK_REPLDST))
98 return 0; 98 return false;
99 99
100 if (sinfo->flags & XT_CONNTRACK_STATUS && 100 if (sinfo->flags & XT_CONNTRACK_STATUS &&
101 FWINV((ct->status & sinfo->statusmask) == 0, 101 FWINV((ct->status & sinfo->statusmask) == 0,
102 XT_CONNTRACK_STATUS)) 102 XT_CONNTRACK_STATUS))
103 return 0; 103 return false;
104 104
105 if(sinfo->flags & XT_CONNTRACK_EXPIRES) { 105 if(sinfo->flags & XT_CONNTRACK_EXPIRES) {
106 unsigned long expires = timer_pending(&ct->timeout) ? 106 unsigned long expires = timer_pending(&ct->timeout) ?
@@ -109,12 +109,12 @@ match(const struct sk_buff *skb,
109 if (FWINV(!(expires >= sinfo->expires_min && 109 if (FWINV(!(expires >= sinfo->expires_min &&
110 expires <= sinfo->expires_max), 110 expires <= sinfo->expires_max),
111 XT_CONNTRACK_EXPIRES)) 111 XT_CONNTRACK_EXPIRES))
112 return 0; 112 return false;
113 } 113 }
114 return 1; 114 return true;
115} 115}
116 116
117static int 117static bool
118checkentry(const char *tablename, 118checkentry(const char *tablename,
119 const void *ip, 119 const void *ip,
120 const struct xt_match *match, 120 const struct xt_match *match,
@@ -124,9 +124,9 @@ checkentry(const char *tablename,
124 if (nf_ct_l3proto_try_module_get(match->family) < 0) { 124 if (nf_ct_l3proto_try_module_get(match->family) < 0) {
125 printk(KERN_WARNING "can't load conntrack support for " 125 printk(KERN_WARNING "can't load conntrack support for "
126 "proto=%d\n", match->family); 126 "proto=%d\n", match->family);
127 return 0; 127 return false;
128 } 128 }
129 return 1; 129 return true;
130} 130}
131 131
132static void destroy(const struct xt_match *match, void *matchinfo) 132static void destroy(const struct xt_match *match, void *matchinfo)
@@ -150,7 +150,7 @@ struct compat_xt_conntrack_info
150 150
151static void compat_from_user(void *dst, void *src) 151static void compat_from_user(void *dst, void *src)
152{ 152{
153 struct compat_xt_conntrack_info *cm = src; 153 const struct compat_xt_conntrack_info *cm = src;
154 struct xt_conntrack_info m = { 154 struct xt_conntrack_info m = {
155 .statemask = cm->statemask, 155 .statemask = cm->statemask,
156 .statusmask = cm->statusmask, 156 .statusmask = cm->statusmask,
@@ -167,7 +167,7 @@ static void compat_from_user(void *dst, void *src)
167 167
168static int compat_to_user(void __user *dst, void *src) 168static int compat_to_user(void __user *dst, void *src)
169{ 169{
170 struct xt_conntrack_info *m = src; 170 const struct xt_conntrack_info *m = src;
171 struct compat_xt_conntrack_info cm = { 171 struct compat_xt_conntrack_info cm = {
172 .statemask = m->statemask, 172 .statemask = m->statemask,
173 .statusmask = m->statusmask, 173 .statusmask = m->statusmask,
@@ -183,7 +183,7 @@ static int compat_to_user(void __user *dst, void *src)
183} 183}
184#endif 184#endif
185 185
186static struct xt_match conntrack_match = { 186static struct xt_match conntrack_match __read_mostly = {
187 .name = "conntrack", 187 .name = "conntrack",
188 .match = match, 188 .match = match,
189 .checkentry = checkentry, 189 .checkentry = checkentry,
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c
index 2c9c0dee8aaf..83224ec89cc0 100644
--- a/net/netfilter/xt_dccp.c
+++ b/net/netfilter/xt_dccp.c
@@ -31,40 +31,40 @@ MODULE_ALIAS("ipt_dccp");
31static unsigned char *dccp_optbuf; 31static unsigned char *dccp_optbuf;
32static DEFINE_SPINLOCK(dccp_buflock); 32static DEFINE_SPINLOCK(dccp_buflock);
33 33
34static inline int 34static inline bool
35dccp_find_option(u_int8_t option, 35dccp_find_option(u_int8_t option,
36 const struct sk_buff *skb, 36 const struct sk_buff *skb,
37 unsigned int protoff, 37 unsigned int protoff,
38 const struct dccp_hdr *dh, 38 const struct dccp_hdr *dh,
39 int *hotdrop) 39 bool *hotdrop)
40{ 40{
41 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */ 41 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
42 unsigned char *op; 42 const unsigned char *op;
43 unsigned int optoff = __dccp_hdr_len(dh); 43 unsigned int optoff = __dccp_hdr_len(dh);
44 unsigned int optlen = dh->dccph_doff*4 - __dccp_hdr_len(dh); 44 unsigned int optlen = dh->dccph_doff*4 - __dccp_hdr_len(dh);
45 unsigned int i; 45 unsigned int i;
46 46
47 if (dh->dccph_doff * 4 < __dccp_hdr_len(dh)) { 47 if (dh->dccph_doff * 4 < __dccp_hdr_len(dh)) {
48 *hotdrop = 1; 48 *hotdrop = true;
49 return 0; 49 return false;
50 } 50 }
51 51
52 if (!optlen) 52 if (!optlen)
53 return 0; 53 return false;
54 54
55 spin_lock_bh(&dccp_buflock); 55 spin_lock_bh(&dccp_buflock);
56 op = skb_header_pointer(skb, protoff + optoff, optlen, dccp_optbuf); 56 op = skb_header_pointer(skb, protoff + optoff, optlen, dccp_optbuf);
57 if (op == NULL) { 57 if (op == NULL) {
58 /* If we don't have the whole header, drop packet. */ 58 /* If we don't have the whole header, drop packet. */
59 spin_unlock_bh(&dccp_buflock); 59 spin_unlock_bh(&dccp_buflock);
60 *hotdrop = 1; 60 *hotdrop = true;
61 return 0; 61 return false;
62 } 62 }
63 63
64 for (i = 0; i < optlen; ) { 64 for (i = 0; i < optlen; ) {
65 if (op[i] == option) { 65 if (op[i] == option) {
66 spin_unlock_bh(&dccp_buflock); 66 spin_unlock_bh(&dccp_buflock);
67 return 1; 67 return true;
68 } 68 }
69 69
70 if (op[i] < 2) 70 if (op[i] < 2)
@@ -74,24 +74,24 @@ dccp_find_option(u_int8_t option,
74 } 74 }
75 75
76 spin_unlock_bh(&dccp_buflock); 76 spin_unlock_bh(&dccp_buflock);
77 return 0; 77 return false;
78} 78}
79 79
80 80
81static inline int 81static inline bool
82match_types(const struct dccp_hdr *dh, u_int16_t typemask) 82match_types(const struct dccp_hdr *dh, u_int16_t typemask)
83{ 83{
84 return (typemask & (1 << dh->dccph_type)); 84 return typemask & (1 << dh->dccph_type);
85} 85}
86 86
87static inline int 87static inline bool
88match_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff, 88match_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff,
89 const struct dccp_hdr *dh, int *hotdrop) 89 const struct dccp_hdr *dh, bool *hotdrop)
90{ 90{
91 return dccp_find_option(option, skb, protoff, dh, hotdrop); 91 return dccp_find_option(option, skb, protoff, dh, hotdrop);
92} 92}
93 93
94static int 94static bool
95match(const struct sk_buff *skb, 95match(const struct sk_buff *skb,
96 const struct net_device *in, 96 const struct net_device *in,
97 const struct net_device *out, 97 const struct net_device *out,
@@ -99,25 +99,25 @@ match(const struct sk_buff *skb,
99 const void *matchinfo, 99 const void *matchinfo,
100 int offset, 100 int offset,
101 unsigned int protoff, 101 unsigned int protoff,
102 int *hotdrop) 102 bool *hotdrop)
103{ 103{
104 const struct xt_dccp_info *info = matchinfo; 104 const struct xt_dccp_info *info = matchinfo;
105 struct dccp_hdr _dh, *dh; 105 struct dccp_hdr _dh, *dh;
106 106
107 if (offset) 107 if (offset)
108 return 0; 108 return false;
109 109
110 dh = skb_header_pointer(skb, protoff, sizeof(_dh), &_dh); 110 dh = skb_header_pointer(skb, protoff, sizeof(_dh), &_dh);
111 if (dh == NULL) { 111 if (dh == NULL) {
112 *hotdrop = 1; 112 *hotdrop = true;
113 return 0; 113 return false;
114 } 114 }
115 115
116 return DCCHECK(((ntohs(dh->dccph_sport) >= info->spts[0]) 116 return DCCHECK(ntohs(dh->dccph_sport) >= info->spts[0]
117 && (ntohs(dh->dccph_sport) <= info->spts[1])), 117 && ntohs(dh->dccph_sport) <= info->spts[1],
118 XT_DCCP_SRC_PORTS, info->flags, info->invflags) 118 XT_DCCP_SRC_PORTS, info->flags, info->invflags)
119 && DCCHECK(((ntohs(dh->dccph_dport) >= info->dpts[0]) 119 && DCCHECK(ntohs(dh->dccph_dport) >= info->dpts[0]
120 && (ntohs(dh->dccph_dport) <= info->dpts[1])), 120 && ntohs(dh->dccph_dport) <= info->dpts[1],
121 XT_DCCP_DEST_PORTS, info->flags, info->invflags) 121 XT_DCCP_DEST_PORTS, info->flags, info->invflags)
122 && DCCHECK(match_types(dh, info->typemask), 122 && DCCHECK(match_types(dh, info->typemask),
123 XT_DCCP_TYPE, info->flags, info->invflags) 123 XT_DCCP_TYPE, info->flags, info->invflags)
@@ -126,7 +126,7 @@ match(const struct sk_buff *skb,
126 XT_DCCP_OPTION, info->flags, info->invflags); 126 XT_DCCP_OPTION, info->flags, info->invflags);
127} 127}
128 128
129static int 129static bool
130checkentry(const char *tablename, 130checkentry(const char *tablename,
131 const void *inf, 131 const void *inf,
132 const struct xt_match *match, 132 const struct xt_match *match,
@@ -140,7 +140,7 @@ checkentry(const char *tablename,
140 && !(info->invflags & ~info->flags); 140 && !(info->invflags & ~info->flags);
141} 141}
142 142
143static struct xt_match xt_dccp_match[] = { 143static struct xt_match xt_dccp_match[] __read_mostly = {
144 { 144 {
145 .name = "dccp", 145 .name = "dccp",
146 .family = AF_INET, 146 .family = AF_INET,
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c
index 56b247ecc283..dde6d66e0d33 100644
--- a/net/netfilter/xt_dscp.c
+++ b/net/netfilter/xt_dscp.c
@@ -22,14 +22,14 @@ MODULE_LICENSE("GPL");
22MODULE_ALIAS("ipt_dscp"); 22MODULE_ALIAS("ipt_dscp");
23MODULE_ALIAS("ip6t_dscp"); 23MODULE_ALIAS("ip6t_dscp");
24 24
25static int match(const struct sk_buff *skb, 25static bool match(const struct sk_buff *skb,
26 const struct net_device *in, 26 const struct net_device *in,
27 const struct net_device *out, 27 const struct net_device *out,
28 const struct xt_match *match, 28 const struct xt_match *match,
29 const void *matchinfo, 29 const void *matchinfo,
30 int offset, 30 int offset,
31 unsigned int protoff, 31 unsigned int protoff,
32 int *hotdrop) 32 bool *hotdrop)
33{ 33{
34 const struct xt_dscp_info *info = matchinfo; 34 const struct xt_dscp_info *info = matchinfo;
35 u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; 35 u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
@@ -37,14 +37,14 @@ static int match(const struct sk_buff *skb,
37 return (dscp == info->dscp) ^ !!info->invert; 37 return (dscp == info->dscp) ^ !!info->invert;
38} 38}
39 39
40static int match6(const struct sk_buff *skb, 40static bool match6(const struct sk_buff *skb,
41 const struct net_device *in, 41 const struct net_device *in,
42 const struct net_device *out, 42 const struct net_device *out,
43 const struct xt_match *match, 43 const struct xt_match *match,
44 const void *matchinfo, 44 const void *matchinfo,
45 int offset, 45 int offset,
46 unsigned int protoff, 46 unsigned int protoff,
47 int *hotdrop) 47 bool *hotdrop)
48{ 48{
49 const struct xt_dscp_info *info = matchinfo; 49 const struct xt_dscp_info *info = matchinfo;
50 u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT; 50 u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
@@ -52,23 +52,23 @@ static int match6(const struct sk_buff *skb,
52 return (dscp == info->dscp) ^ !!info->invert; 52 return (dscp == info->dscp) ^ !!info->invert;
53} 53}
54 54
55static int checkentry(const char *tablename, 55static bool checkentry(const char *tablename,
56 const void *info, 56 const void *info,
57 const struct xt_match *match, 57 const struct xt_match *match,
58 void *matchinfo, 58 void *matchinfo,
59 unsigned int hook_mask) 59 unsigned int hook_mask)
60{ 60{
61 const u_int8_t dscp = ((struct xt_dscp_info *)matchinfo)->dscp; 61 const u_int8_t dscp = ((struct xt_dscp_info *)matchinfo)->dscp;
62 62
63 if (dscp > XT_DSCP_MAX) { 63 if (dscp > XT_DSCP_MAX) {
64 printk(KERN_ERR "xt_dscp: dscp %x out of range\n", dscp); 64 printk(KERN_ERR "xt_dscp: dscp %x out of range\n", dscp);
65 return 0; 65 return false;
66 } 66 }
67 67
68 return 1; 68 return true;
69} 69}
70 70
71static struct xt_match xt_dscp_match[] = { 71static struct xt_match xt_dscp_match[] __read_mostly = {
72 { 72 {
73 .name = "dscp", 73 .name = "dscp",
74 .family = AF_INET, 74 .family = AF_INET,
diff --git a/net/netfilter/xt_esp.c b/net/netfilter/xt_esp.c
index 7c95f149d942..b11378e001b6 100644
--- a/net/netfilter/xt_esp.c
+++ b/net/netfilter/xt_esp.c
@@ -31,10 +31,10 @@ MODULE_ALIAS("ip6t_esp");
31#endif 31#endif
32 32
33/* Returns 1 if the spi is matched by the range, 0 otherwise */ 33/* Returns 1 if the spi is matched by the range, 0 otherwise */
34static inline int 34static inline bool
35spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) 35spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
36{ 36{
37 int r = 0; 37 bool r;
38 duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ', 38 duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
39 min, spi, max); 39 min, spi, max);
40 r = (spi >= min && spi <= max) ^ invert; 40 r = (spi >= min && spi <= max) ^ invert;
@@ -42,7 +42,7 @@ spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert)
42 return r; 42 return r;
43} 43}
44 44
45static int 45static bool
46match(const struct sk_buff *skb, 46match(const struct sk_buff *skb,
47 const struct net_device *in, 47 const struct net_device *in,
48 const struct net_device *out, 48 const struct net_device *out,
@@ -50,14 +50,14 @@ match(const struct sk_buff *skb,
50 const void *matchinfo, 50 const void *matchinfo,
51 int offset, 51 int offset,
52 unsigned int protoff, 52 unsigned int protoff,
53 int *hotdrop) 53 bool *hotdrop)
54{ 54{
55 struct ip_esp_hdr _esp, *eh; 55 struct ip_esp_hdr _esp, *eh;
56 const struct xt_esp *espinfo = matchinfo; 56 const struct xt_esp *espinfo = matchinfo;
57 57
58 /* Must not be a fragment. */ 58 /* Must not be a fragment. */
59 if (offset) 59 if (offset)
60 return 0; 60 return false;
61 61
62 eh = skb_header_pointer(skb, protoff, sizeof(_esp), &_esp); 62 eh = skb_header_pointer(skb, protoff, sizeof(_esp), &_esp);
63 if (eh == NULL) { 63 if (eh == NULL) {
@@ -65,8 +65,8 @@ match(const struct sk_buff *skb,
65 * can't. Hence, no choice but to drop. 65 * can't. Hence, no choice but to drop.
66 */ 66 */
67 duprintf("Dropping evil ESP tinygram.\n"); 67 duprintf("Dropping evil ESP tinygram.\n");
68 *hotdrop = 1; 68 *hotdrop = true;
69 return 0; 69 return false;
70 } 70 }
71 71
72 return spi_match(espinfo->spis[0], espinfo->spis[1], ntohl(eh->spi), 72 return spi_match(espinfo->spis[0], espinfo->spis[1], ntohl(eh->spi),
@@ -74,7 +74,7 @@ match(const struct sk_buff *skb,
74} 74}
75 75
76/* Called when user tries to insert an entry of this type. */ 76/* Called when user tries to insert an entry of this type. */
77static int 77static bool
78checkentry(const char *tablename, 78checkentry(const char *tablename,
79 const void *ip_void, 79 const void *ip_void,
80 const struct xt_match *match, 80 const struct xt_match *match,
@@ -85,13 +85,13 @@ checkentry(const char *tablename,
85 85
86 if (espinfo->invflags & ~XT_ESP_INV_MASK) { 86 if (espinfo->invflags & ~XT_ESP_INV_MASK) {
87 duprintf("xt_esp: unknown flags %X\n", espinfo->invflags); 87 duprintf("xt_esp: unknown flags %X\n", espinfo->invflags);
88 return 0; 88 return false;
89 } 89 }
90 90
91 return 1; 91 return true;
92} 92}
93 93
94static struct xt_match xt_esp_match[] = { 94static struct xt_match xt_esp_match[] __read_mostly = {
95 { 95 {
96 .name = "esp", 96 .name = "esp",
97 .family = AF_INET, 97 .family = AF_INET,
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index d3043fa32ebc..d6b3d01975b6 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -94,7 +94,8 @@ static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
94static HLIST_HEAD(hashlimit_htables); 94static HLIST_HEAD(hashlimit_htables);
95static struct kmem_cache *hashlimit_cachep __read_mostly; 95static struct kmem_cache *hashlimit_cachep __read_mostly;
96 96
97static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) 97static inline bool dst_cmp(const struct dsthash_ent *ent,
98 const struct dsthash_dst *b)
98{ 99{
99 return !memcmp(&ent->dst, b, sizeof(ent->dst)); 100 return !memcmp(&ent->dst, b, sizeof(ent->dst));
100} 101}
@@ -106,7 +107,8 @@ hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
106} 107}
107 108
108static struct dsthash_ent * 109static struct dsthash_ent *
109dsthash_find(const struct xt_hashlimit_htable *ht, struct dsthash_dst *dst) 110dsthash_find(const struct xt_hashlimit_htable *ht,
111 const struct dsthash_dst *dst)
110{ 112{
111 struct dsthash_ent *ent; 113 struct dsthash_ent *ent;
112 struct hlist_node *pos; 114 struct hlist_node *pos;
@@ -122,7 +124,8 @@ dsthash_find(const struct xt_hashlimit_htable *ht, struct dsthash_dst *dst)
122 124
123/* allocate dsthash_ent, initialize dst, put in htable and lock it */ 125/* allocate dsthash_ent, initialize dst, put in htable and lock it */
124static struct dsthash_ent * 126static struct dsthash_ent *
125dsthash_alloc_init(struct xt_hashlimit_htable *ht, struct dsthash_dst *dst) 127dsthash_alloc_init(struct xt_hashlimit_htable *ht,
128 const struct dsthash_dst *dst)
126{ 129{
127 struct dsthash_ent *ent; 130 struct dsthash_ent *ent;
128 131
@@ -227,19 +230,21 @@ static int htable_create(struct xt_hashlimit_info *minfo, int family)
227 return 0; 230 return 0;
228} 231}
229 232
230static int select_all(struct xt_hashlimit_htable *ht, struct dsthash_ent *he) 233static bool select_all(const struct xt_hashlimit_htable *ht,
234 const struct dsthash_ent *he)
231{ 235{
232 return 1; 236 return 1;
233} 237}
234 238
235static int select_gc(struct xt_hashlimit_htable *ht, struct dsthash_ent *he) 239static bool select_gc(const struct xt_hashlimit_htable *ht,
240 const struct dsthash_ent *he)
236{ 241{
237 return (jiffies >= he->expires); 242 return jiffies >= he->expires;
238} 243}
239 244
240static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, 245static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
241 int (*select)(struct xt_hashlimit_htable *ht, 246 bool (*select)(const struct xt_hashlimit_htable *ht,
242 struct dsthash_ent *he)) 247 const struct dsthash_ent *he))
243{ 248{
244 unsigned int i; 249 unsigned int i;
245 250
@@ -282,7 +287,8 @@ static void htable_destroy(struct xt_hashlimit_htable *hinfo)
282 vfree(hinfo); 287 vfree(hinfo);
283} 288}
284 289
285static struct xt_hashlimit_htable *htable_find_get(char *name, int family) 290static struct xt_hashlimit_htable *htable_find_get(const char *name,
291 int family)
286{ 292{
287 struct xt_hashlimit_htable *hinfo; 293 struct xt_hashlimit_htable *hinfo;
288 struct hlist_node *pos; 294 struct hlist_node *pos;
@@ -367,7 +373,8 @@ static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
367} 373}
368 374
369static int 375static int
370hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst, 376hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
377 struct dsthash_dst *dst,
371 const struct sk_buff *skb, unsigned int protoff) 378 const struct sk_buff *skb, unsigned int protoff)
372{ 379{
373 __be16 _ports[2], *ports; 380 __be16 _ports[2], *ports;
@@ -432,7 +439,7 @@ hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst,
432 return 0; 439 return 0;
433} 440}
434 441
435static int 442static bool
436hashlimit_match(const struct sk_buff *skb, 443hashlimit_match(const struct sk_buff *skb,
437 const struct net_device *in, 444 const struct net_device *in,
438 const struct net_device *out, 445 const struct net_device *out,
@@ -440,10 +447,10 @@ hashlimit_match(const struct sk_buff *skb,
440 const void *matchinfo, 447 const void *matchinfo,
441 int offset, 448 int offset,
442 unsigned int protoff, 449 unsigned int protoff,
443 int *hotdrop) 450 bool *hotdrop)
444{ 451{
445 struct xt_hashlimit_info *r = 452 const struct xt_hashlimit_info *r =
446 ((struct xt_hashlimit_info *)matchinfo)->u.master; 453 ((const struct xt_hashlimit_info *)matchinfo)->u.master;
447 struct xt_hashlimit_htable *hinfo = r->hinfo; 454 struct xt_hashlimit_htable *hinfo = r->hinfo;
448 unsigned long now = jiffies; 455 unsigned long now = jiffies;
449 struct dsthash_ent *dh; 456 struct dsthash_ent *dh;
@@ -478,20 +485,20 @@ hashlimit_match(const struct sk_buff *skb,
478 /* We're underlimit. */ 485 /* We're underlimit. */
479 dh->rateinfo.credit -= dh->rateinfo.cost; 486 dh->rateinfo.credit -= dh->rateinfo.cost;
480 spin_unlock_bh(&hinfo->lock); 487 spin_unlock_bh(&hinfo->lock);
481 return 1; 488 return true;
482 } 489 }
483 490
484 spin_unlock_bh(&hinfo->lock); 491 spin_unlock_bh(&hinfo->lock);
485 492
486 /* default case: we're overlimit, thus don't match */ 493 /* default case: we're overlimit, thus don't match */
487 return 0; 494 return false;
488 495
489hotdrop: 496hotdrop:
490 *hotdrop = 1; 497 *hotdrop = true;
491 return 0; 498 return false;
492} 499}
493 500
494static int 501static bool
495hashlimit_checkentry(const char *tablename, 502hashlimit_checkentry(const char *tablename,
496 const void *inf, 503 const void *inf,
497 const struct xt_match *match, 504 const struct xt_match *match,
@@ -505,20 +512,20 @@ hashlimit_checkentry(const char *tablename,
505 user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) { 512 user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) {
506 printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n", 513 printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
507 r->cfg.avg, r->cfg.burst); 514 r->cfg.avg, r->cfg.burst);
508 return 0; 515 return false;
509 } 516 }
510 if (r->cfg.mode == 0 || 517 if (r->cfg.mode == 0 ||
511 r->cfg.mode > (XT_HASHLIMIT_HASH_DPT | 518 r->cfg.mode > (XT_HASHLIMIT_HASH_DPT |
512 XT_HASHLIMIT_HASH_DIP | 519 XT_HASHLIMIT_HASH_DIP |
513 XT_HASHLIMIT_HASH_SIP | 520 XT_HASHLIMIT_HASH_SIP |
514 XT_HASHLIMIT_HASH_SPT)) 521 XT_HASHLIMIT_HASH_SPT))
515 return 0; 522 return false;
516 if (!r->cfg.gc_interval) 523 if (!r->cfg.gc_interval)
517 return 0; 524 return false;
518 if (!r->cfg.expire) 525 if (!r->cfg.expire)
519 return 0; 526 return false;
520 if (r->name[sizeof(r->name) - 1] != '\0') 527 if (r->name[sizeof(r->name) - 1] != '\0')
521 return 0; 528 return false;
522 529
523 /* This is the best we've got: We cannot release and re-grab lock, 530 /* This is the best we've got: We cannot release and re-grab lock,
524 * since checkentry() is called before x_tables.c grabs xt_mutex. 531 * since checkentry() is called before x_tables.c grabs xt_mutex.
@@ -530,19 +537,19 @@ hashlimit_checkentry(const char *tablename,
530 r->hinfo = htable_find_get(r->name, match->family); 537 r->hinfo = htable_find_get(r->name, match->family);
531 if (!r->hinfo && htable_create(r, match->family) != 0) { 538 if (!r->hinfo && htable_create(r, match->family) != 0) {
532 mutex_unlock(&hlimit_mutex); 539 mutex_unlock(&hlimit_mutex);
533 return 0; 540 return false;
534 } 541 }
535 mutex_unlock(&hlimit_mutex); 542 mutex_unlock(&hlimit_mutex);
536 543
537 /* Ugly hack: For SMP, we only want to use one set */ 544 /* Ugly hack: For SMP, we only want to use one set */
538 r->u.master = r; 545 r->u.master = r;
539 return 1; 546 return true;
540} 547}
541 548
542static void 549static void
543hashlimit_destroy(const struct xt_match *match, void *matchinfo) 550hashlimit_destroy(const struct xt_match *match, void *matchinfo)
544{ 551{
545 struct xt_hashlimit_info *r = matchinfo; 552 const struct xt_hashlimit_info *r = matchinfo;
546 553
547 htable_put(r->hinfo); 554 htable_put(r->hinfo);
548} 555}
@@ -571,7 +578,7 @@ static int compat_to_user(void __user *dst, void *src)
571} 578}
572#endif 579#endif
573 580
574static struct xt_match xt_hashlimit[] = { 581static struct xt_match xt_hashlimit[] __read_mostly = {
575 { 582 {
576 .name = "hashlimit", 583 .name = "hashlimit",
577 .family = AF_INET, 584 .family = AF_INET,
@@ -694,7 +701,7 @@ static int dl_seq_show(struct seq_file *s, void *v)
694 return 0; 701 return 0;
695} 702}
696 703
697static struct seq_operations dl_seq_ops = { 704static const struct seq_operations dl_seq_ops = {
698 .start = dl_seq_start, 705 .start = dl_seq_start,
699 .next = dl_seq_next, 706 .next = dl_seq_next,
700 .stop = dl_seq_stop, 707 .stop = dl_seq_stop,
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c
index c139b2f43a10..0a1f4c6bcdef 100644
--- a/net/netfilter/xt_helper.c
+++ b/net/netfilter/xt_helper.c
@@ -22,13 +22,8 @@ MODULE_DESCRIPTION("iptables helper match module");
22MODULE_ALIAS("ipt_helper"); 22MODULE_ALIAS("ipt_helper");
23MODULE_ALIAS("ip6t_helper"); 23MODULE_ALIAS("ip6t_helper");
24 24
25#if 0
26#define DEBUGP printk
27#else
28#define DEBUGP(format, args...)
29#endif
30 25
31static int 26static bool
32match(const struct sk_buff *skb, 27match(const struct sk_buff *skb,
33 const struct net_device *in, 28 const struct net_device *in,
34 const struct net_device *out, 29 const struct net_device *out,
@@ -36,61 +31,51 @@ match(const struct sk_buff *skb,
36 const void *matchinfo, 31 const void *matchinfo,
37 int offset, 32 int offset,
38 unsigned int protoff, 33 unsigned int protoff,
39 int *hotdrop) 34 bool *hotdrop)
40{ 35{
41 const struct xt_helper_info *info = matchinfo; 36 const struct xt_helper_info *info = matchinfo;
42 struct nf_conn *ct; 37 const struct nf_conn *ct;
43 struct nf_conn_help *master_help; 38 const struct nf_conn_help *master_help;
39 const struct nf_conntrack_helper *helper;
44 enum ip_conntrack_info ctinfo; 40 enum ip_conntrack_info ctinfo;
45 int ret = info->invert; 41 bool ret = info->invert;
46 42
47 ct = nf_ct_get((struct sk_buff *)skb, &ctinfo); 43 ct = nf_ct_get(skb, &ctinfo);
48 if (!ct) { 44 if (!ct || !ct->master)
49 DEBUGP("xt_helper: Eek! invalid conntrack?\n");
50 return ret; 45 return ret;
51 }
52
53 if (!ct->master) {
54 DEBUGP("xt_helper: conntrack %p has no master\n", ct);
55 return ret;
56 }
57 46
58 read_lock_bh(&nf_conntrack_lock);
59 master_help = nfct_help(ct->master); 47 master_help = nfct_help(ct->master);
60 if (!master_help || !master_help->helper) { 48 if (!master_help)
61 DEBUGP("xt_helper: master ct %p has no helper\n", 49 return ret;
62 exp->expectant);
63 goto out_unlock;
64 }
65 50
66 DEBUGP("master's name = %s , info->name = %s\n", 51 /* rcu_read_lock()ed by nf_hook_slow */
67 ct->master->helper->name, info->name); 52 helper = rcu_dereference(master_help->helper);
53 if (!helper)
54 return ret;
68 55
69 if (info->name[0] == '\0') 56 if (info->name[0] == '\0')
70 ret ^= 1; 57 ret = !ret;
71 else 58 else
72 ret ^= !strncmp(master_help->helper->name, info->name, 59 ret ^= !strncmp(master_help->helper->name, info->name,
73 strlen(master_help->helper->name)); 60 strlen(master_help->helper->name));
74out_unlock:
75 read_unlock_bh(&nf_conntrack_lock);
76 return ret; 61 return ret;
77} 62}
78 63
79static int check(const char *tablename, 64static bool check(const char *tablename,
80 const void *inf, 65 const void *inf,
81 const struct xt_match *match, 66 const struct xt_match *match,
82 void *matchinfo, 67 void *matchinfo,
83 unsigned int hook_mask) 68 unsigned int hook_mask)
84{ 69{
85 struct xt_helper_info *info = matchinfo; 70 struct xt_helper_info *info = matchinfo;
86 71
87 if (nf_ct_l3proto_try_module_get(match->family) < 0) { 72 if (nf_ct_l3proto_try_module_get(match->family) < 0) {
88 printk(KERN_WARNING "can't load conntrack support for " 73 printk(KERN_WARNING "can't load conntrack support for "
89 "proto=%d\n", match->family); 74 "proto=%d\n", match->family);
90 return 0; 75 return false;
91 } 76 }
92 info->name[29] = '\0'; 77 info->name[29] = '\0';
93 return 1; 78 return true;
94} 79}
95 80
96static void 81static void
@@ -99,7 +84,7 @@ destroy(const struct xt_match *match, void *matchinfo)
99 nf_ct_l3proto_module_put(match->family); 84 nf_ct_l3proto_module_put(match->family);
100} 85}
101 86
102static struct xt_match xt_helper_match[] = { 87static struct xt_match xt_helper_match[] __read_mostly = {
103 { 88 {
104 .name = "helper", 89 .name = "helper",
105 .family = AF_INET, 90 .family = AF_INET,
diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
index 77288c5ada78..3dad173d9735 100644
--- a/net/netfilter/xt_length.c
+++ b/net/netfilter/xt_length.c
@@ -20,7 +20,7 @@ MODULE_LICENSE("GPL");
20MODULE_ALIAS("ipt_length"); 20MODULE_ALIAS("ipt_length");
21MODULE_ALIAS("ip6t_length"); 21MODULE_ALIAS("ip6t_length");
22 22
23static int 23static bool
24match(const struct sk_buff *skb, 24match(const struct sk_buff *skb,
25 const struct net_device *in, 25 const struct net_device *in,
26 const struct net_device *out, 26 const struct net_device *out,
@@ -28,7 +28,7 @@ match(const struct sk_buff *skb,
28 const void *matchinfo, 28 const void *matchinfo,
29 int offset, 29 int offset,
30 unsigned int protoff, 30 unsigned int protoff,
31 int *hotdrop) 31 bool *hotdrop)
32{ 32{
33 const struct xt_length_info *info = matchinfo; 33 const struct xt_length_info *info = matchinfo;
34 u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len); 34 u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len);
@@ -36,7 +36,7 @@ match(const struct sk_buff *skb,
36 return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; 36 return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
37} 37}
38 38
39static int 39static bool
40match6(const struct sk_buff *skb, 40match6(const struct sk_buff *skb,
41 const struct net_device *in, 41 const struct net_device *in,
42 const struct net_device *out, 42 const struct net_device *out,
@@ -44,16 +44,16 @@ match6(const struct sk_buff *skb,
44 const void *matchinfo, 44 const void *matchinfo,
45 int offset, 45 int offset,
46 unsigned int protoff, 46 unsigned int protoff,
47 int *hotdrop) 47 bool *hotdrop)
48{ 48{
49 const struct xt_length_info *info = matchinfo; 49 const struct xt_length_info *info = matchinfo;
50 const u_int16_t pktlen = (ntohs(ipv6_hdr(skb)->payload_len) + 50 const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) +
51 sizeof(struct ipv6hdr)); 51 sizeof(struct ipv6hdr);
52 52
53 return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; 53 return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
54} 54}
55 55
56static struct xt_match xt_length_match[] = { 56static struct xt_match xt_length_match[] __read_mostly = {
57 { 57 {
58 .name = "length", 58 .name = "length",
59 .family = AF_INET, 59 .family = AF_INET,
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 571a72ab89ad..4fcca797150f 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -57,7 +57,7 @@ static DEFINE_SPINLOCK(limit_lock);
57 57
58#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) 58#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
59 59
60static int 60static bool
61ipt_limit_match(const struct sk_buff *skb, 61ipt_limit_match(const struct sk_buff *skb,
62 const struct net_device *in, 62 const struct net_device *in,
63 const struct net_device *out, 63 const struct net_device *out,
@@ -65,9 +65,10 @@ ipt_limit_match(const struct sk_buff *skb,
65 const void *matchinfo, 65 const void *matchinfo,
66 int offset, 66 int offset,
67 unsigned int protoff, 67 unsigned int protoff,
68 int *hotdrop) 68 bool *hotdrop)
69{ 69{
70 struct xt_rateinfo *r = ((struct xt_rateinfo *)matchinfo)->master; 70 struct xt_rateinfo *r =
71 ((const struct xt_rateinfo *)matchinfo)->master;
71 unsigned long now = jiffies; 72 unsigned long now = jiffies;
72 73
73 spin_lock_bh(&limit_lock); 74 spin_lock_bh(&limit_lock);
@@ -79,11 +80,11 @@ ipt_limit_match(const struct sk_buff *skb,
79 /* We're not limited. */ 80 /* We're not limited. */
80 r->credit -= r->cost; 81 r->credit -= r->cost;
81 spin_unlock_bh(&limit_lock); 82 spin_unlock_bh(&limit_lock);
82 return 1; 83 return true;
83 } 84 }
84 85
85 spin_unlock_bh(&limit_lock); 86 spin_unlock_bh(&limit_lock);
86 return 0; 87 return false;
87} 88}
88 89
89/* Precision saver. */ 90/* Precision saver. */
@@ -98,7 +99,7 @@ user2credits(u_int32_t user)
98 return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE; 99 return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE;
99} 100}
100 101
101static int 102static bool
102ipt_limit_checkentry(const char *tablename, 103ipt_limit_checkentry(const char *tablename,
103 const void *inf, 104 const void *inf,
104 const struct xt_match *match, 105 const struct xt_match *match,
@@ -112,7 +113,7 @@ ipt_limit_checkentry(const char *tablename,
112 || user2credits(r->avg * r->burst) < user2credits(r->avg)) { 113 || user2credits(r->avg * r->burst) < user2credits(r->avg)) {
113 printk("Overflow in xt_limit, try lower: %u/%u\n", 114 printk("Overflow in xt_limit, try lower: %u/%u\n",
114 r->avg, r->burst); 115 r->avg, r->burst);
115 return 0; 116 return false;
116 } 117 }
117 118
118 /* For SMP, we only want to use one set of counters. */ 119 /* For SMP, we only want to use one set of counters. */
@@ -125,7 +126,7 @@ ipt_limit_checkentry(const char *tablename,
125 r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ 126 r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
126 r->cost = user2credits(r->avg); 127 r->cost = user2credits(r->avg);
127 } 128 }
128 return 1; 129 return true;
129} 130}
130 131
131#ifdef CONFIG_COMPAT 132#ifdef CONFIG_COMPAT
@@ -144,7 +145,7 @@ struct compat_xt_rateinfo {
144 * master pointer, which does not need to be preserved. */ 145 * master pointer, which does not need to be preserved. */
145static void compat_from_user(void *dst, void *src) 146static void compat_from_user(void *dst, void *src)
146{ 147{
147 struct compat_xt_rateinfo *cm = src; 148 const struct compat_xt_rateinfo *cm = src;
148 struct xt_rateinfo m = { 149 struct xt_rateinfo m = {
149 .avg = cm->avg, 150 .avg = cm->avg,
150 .burst = cm->burst, 151 .burst = cm->burst,
@@ -158,7 +159,7 @@ static void compat_from_user(void *dst, void *src)
158 159
159static int compat_to_user(void __user *dst, void *src) 160static int compat_to_user(void __user *dst, void *src)
160{ 161{
161 struct xt_rateinfo *m = src; 162 const struct xt_rateinfo *m = src;
162 struct compat_xt_rateinfo cm = { 163 struct compat_xt_rateinfo cm = {
163 .avg = m->avg, 164 .avg = m->avg,
164 .burst = m->burst, 165 .burst = m->burst,
@@ -172,7 +173,7 @@ static int compat_to_user(void __user *dst, void *src)
172} 173}
173#endif /* CONFIG_COMPAT */ 174#endif /* CONFIG_COMPAT */
174 175
175static struct xt_match xt_limit_match[] = { 176static struct xt_match xt_limit_match[] __read_mostly = {
176 { 177 {
177 .name = "limit", 178 .name = "limit",
178 .family = AF_INET, 179 .family = AF_INET,
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c
index 1d3a1d98b885..00490d777a0f 100644
--- a/net/netfilter/xt_mac.c
+++ b/net/netfilter/xt_mac.c
@@ -24,7 +24,7 @@ MODULE_DESCRIPTION("iptables mac matching module");
24MODULE_ALIAS("ipt_mac"); 24MODULE_ALIAS("ipt_mac");
25MODULE_ALIAS("ip6t_mac"); 25MODULE_ALIAS("ip6t_mac");
26 26
27static int 27static bool
28match(const struct sk_buff *skb, 28match(const struct sk_buff *skb,
29 const struct net_device *in, 29 const struct net_device *in,
30 const struct net_device *out, 30 const struct net_device *out,
@@ -32,19 +32,19 @@ match(const struct sk_buff *skb,
32 const void *matchinfo, 32 const void *matchinfo,
33 int offset, 33 int offset,
34 unsigned int protoff, 34 unsigned int protoff,
35 int *hotdrop) 35 bool *hotdrop)
36{ 36{
37 const struct xt_mac_info *info = matchinfo; 37 const struct xt_mac_info *info = matchinfo;
38 38
39 /* Is mac pointer valid? */ 39 /* Is mac pointer valid? */
40 return (skb_mac_header(skb) >= skb->head && 40 return skb_mac_header(skb) >= skb->head &&
41 (skb_mac_header(skb) + ETH_HLEN) <= skb->data 41 skb_mac_header(skb) + ETH_HLEN <= skb->data
42 /* If so, compare... */ 42 /* If so, compare... */
43 && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr)) 43 && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr))
44 ^ info->invert)); 44 ^ info->invert);
45} 45}
46 46
47static struct xt_match xt_mac_match[] = { 47static struct xt_match xt_mac_match[] __read_mostly = {
48 { 48 {
49 .name = "mac", 49 .name = "mac",
50 .family = AF_INET, 50 .family = AF_INET,
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 39911dddb011..c02a7f8f3925 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -19,7 +19,7 @@ MODULE_DESCRIPTION("iptables mark matching module");
19MODULE_ALIAS("ipt_mark"); 19MODULE_ALIAS("ipt_mark");
20MODULE_ALIAS("ip6t_mark"); 20MODULE_ALIAS("ip6t_mark");
21 21
22static int 22static bool
23match(const struct sk_buff *skb, 23match(const struct sk_buff *skb,
24 const struct net_device *in, 24 const struct net_device *in,
25 const struct net_device *out, 25 const struct net_device *out,
@@ -27,14 +27,14 @@ match(const struct sk_buff *skb,
27 const void *matchinfo, 27 const void *matchinfo,
28 int offset, 28 int offset,
29 unsigned int protoff, 29 unsigned int protoff,
30 int *hotdrop) 30 bool *hotdrop)
31{ 31{
32 const struct xt_mark_info *info = matchinfo; 32 const struct xt_mark_info *info = matchinfo;
33 33
34 return ((skb->mark & info->mask) == info->mark) ^ info->invert; 34 return ((skb->mark & info->mask) == info->mark) ^ info->invert;
35} 35}
36 36
37static int 37static bool
38checkentry(const char *tablename, 38checkentry(const char *tablename,
39 const void *entry, 39 const void *entry,
40 const struct xt_match *match, 40 const struct xt_match *match,
@@ -45,9 +45,9 @@ checkentry(const char *tablename,
45 45
46 if (minfo->mark > 0xffffffff || minfo->mask > 0xffffffff) { 46 if (minfo->mark > 0xffffffff || minfo->mask > 0xffffffff) {
47 printk(KERN_WARNING "mark: only supports 32bit mark\n"); 47 printk(KERN_WARNING "mark: only supports 32bit mark\n");
48 return 0; 48 return false;
49 } 49 }
50 return 1; 50 return true;
51} 51}
52 52
53#ifdef CONFIG_COMPAT 53#ifdef CONFIG_COMPAT
@@ -60,7 +60,7 @@ struct compat_xt_mark_info {
60 60
61static void compat_from_user(void *dst, void *src) 61static void compat_from_user(void *dst, void *src)
62{ 62{
63 struct compat_xt_mark_info *cm = src; 63 const struct compat_xt_mark_info *cm = src;
64 struct xt_mark_info m = { 64 struct xt_mark_info m = {
65 .mark = cm->mark, 65 .mark = cm->mark,
66 .mask = cm->mask, 66 .mask = cm->mask,
@@ -71,7 +71,7 @@ static void compat_from_user(void *dst, void *src)
71 71
72static int compat_to_user(void __user *dst, void *src) 72static int compat_to_user(void __user *dst, void *src)
73{ 73{
74 struct xt_mark_info *m = src; 74 const struct xt_mark_info *m = src;
75 struct compat_xt_mark_info cm = { 75 struct compat_xt_mark_info cm = {
76 .mark = m->mark, 76 .mark = m->mark,
77 .mask = m->mask, 77 .mask = m->mask,
@@ -81,7 +81,7 @@ static int compat_to_user(void __user *dst, void *src)
81} 81}
82#endif /* CONFIG_COMPAT */ 82#endif /* CONFIG_COMPAT */
83 83
84static struct xt_match xt_mark_match[] = { 84static struct xt_match xt_mark_match[] __read_mostly = {
85 { 85 {
86 .name = "mark", 86 .name = "mark",
87 .family = AF_INET, 87 .family = AF_INET,
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c
index 4dce2a81702a..e8ae10284acd 100644
--- a/net/netfilter/xt_multiport.c
+++ b/net/netfilter/xt_multiport.c
@@ -33,24 +33,24 @@ MODULE_ALIAS("ip6t_multiport");
33#endif 33#endif
34 34
35/* Returns 1 if the port is matched by the test, 0 otherwise. */ 35/* Returns 1 if the port is matched by the test, 0 otherwise. */
36static inline int 36static inline bool
37ports_match(const u_int16_t *portlist, enum xt_multiport_flags flags, 37ports_match(const u_int16_t *portlist, enum xt_multiport_flags flags,
38 u_int8_t count, u_int16_t src, u_int16_t dst) 38 u_int8_t count, u_int16_t src, u_int16_t dst)
39{ 39{
40 unsigned int i; 40 unsigned int i;
41 for (i = 0; i < count; i++) { 41 for (i = 0; i < count; i++) {
42 if (flags != XT_MULTIPORT_DESTINATION && portlist[i] == src) 42 if (flags != XT_MULTIPORT_DESTINATION && portlist[i] == src)
43 return 1; 43 return true;
44 44
45 if (flags != XT_MULTIPORT_SOURCE && portlist[i] == dst) 45 if (flags != XT_MULTIPORT_SOURCE && portlist[i] == dst)
46 return 1; 46 return true;
47 } 47 }
48 48
49 return 0; 49 return false;
50} 50}
51 51
52/* Returns 1 if the port is matched by the test, 0 otherwise. */ 52/* Returns 1 if the port is matched by the test, 0 otherwise. */
53static inline int 53static inline bool
54ports_match_v1(const struct xt_multiport_v1 *minfo, 54ports_match_v1(const struct xt_multiport_v1 *minfo,
55 u_int16_t src, u_int16_t dst) 55 u_int16_t src, u_int16_t dst)
56{ 56{
@@ -67,34 +67,34 @@ ports_match_v1(const struct xt_multiport_v1 *minfo,
67 67
68 if (minfo->flags == XT_MULTIPORT_SOURCE 68 if (minfo->flags == XT_MULTIPORT_SOURCE
69 && src >= s && src <= e) 69 && src >= s && src <= e)
70 return 1 ^ minfo->invert; 70 return true ^ minfo->invert;
71 if (minfo->flags == XT_MULTIPORT_DESTINATION 71 if (minfo->flags == XT_MULTIPORT_DESTINATION
72 && dst >= s && dst <= e) 72 && dst >= s && dst <= e)
73 return 1 ^ minfo->invert; 73 return true ^ minfo->invert;
74 if (minfo->flags == XT_MULTIPORT_EITHER 74 if (minfo->flags == XT_MULTIPORT_EITHER
75 && ((dst >= s && dst <= e) 75 && ((dst >= s && dst <= e)
76 || (src >= s && src <= e))) 76 || (src >= s && src <= e)))
77 return 1 ^ minfo->invert; 77 return true ^ minfo->invert;
78 } else { 78 } else {
79 /* exact port matching */ 79 /* exact port matching */
80 duprintf("src or dst matches with %d?\n", s); 80 duprintf("src or dst matches with %d?\n", s);
81 81
82 if (minfo->flags == XT_MULTIPORT_SOURCE 82 if (minfo->flags == XT_MULTIPORT_SOURCE
83 && src == s) 83 && src == s)
84 return 1 ^ minfo->invert; 84 return true ^ minfo->invert;
85 if (minfo->flags == XT_MULTIPORT_DESTINATION 85 if (minfo->flags == XT_MULTIPORT_DESTINATION
86 && dst == s) 86 && dst == s)
87 return 1 ^ minfo->invert; 87 return true ^ minfo->invert;
88 if (minfo->flags == XT_MULTIPORT_EITHER 88 if (minfo->flags == XT_MULTIPORT_EITHER
89 && (src == s || dst == s)) 89 && (src == s || dst == s))
90 return 1 ^ minfo->invert; 90 return true ^ minfo->invert;
91 } 91 }
92 } 92 }
93 93
94 return minfo->invert; 94 return minfo->invert;
95} 95}
96 96
97static int 97static bool
98match(const struct sk_buff *skb, 98match(const struct sk_buff *skb,
99 const struct net_device *in, 99 const struct net_device *in,
100 const struct net_device *out, 100 const struct net_device *out,
@@ -102,13 +102,13 @@ match(const struct sk_buff *skb,
102 const void *matchinfo, 102 const void *matchinfo,
103 int offset, 103 int offset,
104 unsigned int protoff, 104 unsigned int protoff,
105 int *hotdrop) 105 bool *hotdrop)
106{ 106{
107 __be16 _ports[2], *pptr; 107 __be16 _ports[2], *pptr;
108 const struct xt_multiport *multiinfo = matchinfo; 108 const struct xt_multiport *multiinfo = matchinfo;
109 109
110 if (offset) 110 if (offset)
111 return 0; 111 return false;
112 112
113 pptr = skb_header_pointer(skb, protoff, sizeof(_ports), _ports); 113 pptr = skb_header_pointer(skb, protoff, sizeof(_ports), _ports);
114 if (pptr == NULL) { 114 if (pptr == NULL) {
@@ -116,8 +116,8 @@ match(const struct sk_buff *skb,
116 * can't. Hence, no choice but to drop. 116 * can't. Hence, no choice but to drop.
117 */ 117 */
118 duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n"); 118 duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
119 *hotdrop = 1; 119 *hotdrop = true;
120 return 0; 120 return false;
121 } 121 }
122 122
123 return ports_match(multiinfo->ports, 123 return ports_match(multiinfo->ports,
@@ -125,7 +125,7 @@ match(const struct sk_buff *skb,
125 ntohs(pptr[0]), ntohs(pptr[1])); 125 ntohs(pptr[0]), ntohs(pptr[1]));
126} 126}
127 127
128static int 128static bool
129match_v1(const struct sk_buff *skb, 129match_v1(const struct sk_buff *skb,
130 const struct net_device *in, 130 const struct net_device *in,
131 const struct net_device *out, 131 const struct net_device *out,
@@ -133,13 +133,13 @@ match_v1(const struct sk_buff *skb,
133 const void *matchinfo, 133 const void *matchinfo,
134 int offset, 134 int offset,
135 unsigned int protoff, 135 unsigned int protoff,
136 int *hotdrop) 136 bool *hotdrop)
137{ 137{
138 __be16 _ports[2], *pptr; 138 __be16 _ports[2], *pptr;
139 const struct xt_multiport_v1 *multiinfo = matchinfo; 139 const struct xt_multiport_v1 *multiinfo = matchinfo;
140 140
141 if (offset) 141 if (offset)
142 return 0; 142 return false;
143 143
144 pptr = skb_header_pointer(skb, protoff, sizeof(_ports), _ports); 144 pptr = skb_header_pointer(skb, protoff, sizeof(_ports), _ports);
145 if (pptr == NULL) { 145 if (pptr == NULL) {
@@ -147,14 +147,14 @@ match_v1(const struct sk_buff *skb,
147 * can't. Hence, no choice but to drop. 147 * can't. Hence, no choice but to drop.
148 */ 148 */
149 duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n"); 149 duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
150 *hotdrop = 1; 150 *hotdrop = true;
151 return 0; 151 return false;
152 } 152 }
153 153
154 return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1])); 154 return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1]));
155} 155}
156 156
157static inline int 157static inline bool
158check(u_int16_t proto, 158check(u_int16_t proto,
159 u_int8_t ip_invflags, 159 u_int8_t ip_invflags,
160 u_int8_t match_flags, 160 u_int8_t match_flags,
@@ -172,7 +172,7 @@ check(u_int16_t proto,
172} 172}
173 173
174/* Called when user tries to insert an entry of this type. */ 174/* Called when user tries to insert an entry of this type. */
175static int 175static bool
176checkentry(const char *tablename, 176checkentry(const char *tablename,
177 const void *info, 177 const void *info,
178 const struct xt_match *match, 178 const struct xt_match *match,
@@ -186,7 +186,7 @@ checkentry(const char *tablename,
186 multiinfo->count); 186 multiinfo->count);
187} 187}
188 188
189static int 189static bool
190checkentry_v1(const char *tablename, 190checkentry_v1(const char *tablename,
191 const void *info, 191 const void *info,
192 const struct xt_match *match, 192 const struct xt_match *match,
@@ -200,7 +200,7 @@ checkentry_v1(const char *tablename,
200 multiinfo->count); 200 multiinfo->count);
201} 201}
202 202
203static int 203static bool
204checkentry6(const char *tablename, 204checkentry6(const char *tablename,
205 const void *info, 205 const void *info,
206 const struct xt_match *match, 206 const struct xt_match *match,
@@ -214,7 +214,7 @@ checkentry6(const char *tablename,
214 multiinfo->count); 214 multiinfo->count);
215} 215}
216 216
217static int 217static bool
218checkentry6_v1(const char *tablename, 218checkentry6_v1(const char *tablename,
219 const void *info, 219 const void *info,
220 const struct xt_match *match, 220 const struct xt_match *match,
@@ -228,7 +228,7 @@ checkentry6_v1(const char *tablename,
228 multiinfo->count); 228 multiinfo->count);
229} 229}
230 230
231static struct xt_match xt_multiport_match[] = { 231static struct xt_match xt_multiport_match[] __read_mostly = {
232 { 232 {
233 .name = "multiport", 233 .name = "multiport",
234 .family = AF_INET, 234 .family = AF_INET,
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 35a0fe200c39..f47cab7a696d 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -14,8 +14,6 @@
14#include <linux/netfilter/xt_physdev.h> 14#include <linux/netfilter/xt_physdev.h>
15#include <linux/netfilter/x_tables.h> 15#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter_bridge.h> 16#include <linux/netfilter_bridge.h>
17#define MATCH 1
18#define NOMATCH 0
19 17
20MODULE_LICENSE("GPL"); 18MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); 19MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
@@ -23,7 +21,7 @@ MODULE_DESCRIPTION("iptables bridge physical device match module");
23MODULE_ALIAS("ipt_physdev"); 21MODULE_ALIAS("ipt_physdev");
24MODULE_ALIAS("ip6t_physdev"); 22MODULE_ALIAS("ip6t_physdev");
25 23
26static int 24static bool
27match(const struct sk_buff *skb, 25match(const struct sk_buff *skb,
28 const struct net_device *in, 26 const struct net_device *in,
29 const struct net_device *out, 27 const struct net_device *out,
@@ -31,14 +29,14 @@ match(const struct sk_buff *skb,
31 const void *matchinfo, 29 const void *matchinfo,
32 int offset, 30 int offset,
33 unsigned int protoff, 31 unsigned int protoff,
34 int *hotdrop) 32 bool *hotdrop)
35{ 33{
36 int i; 34 int i;
37 static const char nulldevname[IFNAMSIZ]; 35 static const char nulldevname[IFNAMSIZ];
38 const struct xt_physdev_info *info = matchinfo; 36 const struct xt_physdev_info *info = matchinfo;
39 unsigned int ret; 37 bool ret;
40 const char *indev, *outdev; 38 const char *indev, *outdev;
41 struct nf_bridge_info *nf_bridge; 39 const struct nf_bridge_info *nf_bridge;
42 40
43 /* Not a bridged IP packet or no info available yet: 41 /* Not a bridged IP packet or no info available yet:
44 * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if 42 * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
@@ -47,61 +45,61 @@ match(const struct sk_buff *skb,
47 /* Return MATCH if the invert flags of the used options are on */ 45 /* Return MATCH if the invert flags of the used options are on */
48 if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) && 46 if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
49 !(info->invert & XT_PHYSDEV_OP_BRIDGED)) 47 !(info->invert & XT_PHYSDEV_OP_BRIDGED))
50 return NOMATCH; 48 return false;
51 if ((info->bitmask & XT_PHYSDEV_OP_ISIN) && 49 if ((info->bitmask & XT_PHYSDEV_OP_ISIN) &&
52 !(info->invert & XT_PHYSDEV_OP_ISIN)) 50 !(info->invert & XT_PHYSDEV_OP_ISIN))
53 return NOMATCH; 51 return false;
54 if ((info->bitmask & XT_PHYSDEV_OP_ISOUT) && 52 if ((info->bitmask & XT_PHYSDEV_OP_ISOUT) &&
55 !(info->invert & XT_PHYSDEV_OP_ISOUT)) 53 !(info->invert & XT_PHYSDEV_OP_ISOUT))
56 return NOMATCH; 54 return false;
57 if ((info->bitmask & XT_PHYSDEV_OP_IN) && 55 if ((info->bitmask & XT_PHYSDEV_OP_IN) &&
58 !(info->invert & XT_PHYSDEV_OP_IN)) 56 !(info->invert & XT_PHYSDEV_OP_IN))
59 return NOMATCH; 57 return false;
60 if ((info->bitmask & XT_PHYSDEV_OP_OUT) && 58 if ((info->bitmask & XT_PHYSDEV_OP_OUT) &&
61 !(info->invert & XT_PHYSDEV_OP_OUT)) 59 !(info->invert & XT_PHYSDEV_OP_OUT))
62 return NOMATCH; 60 return false;
63 return MATCH; 61 return true;
64 } 62 }
65 63
66 /* This only makes sense in the FORWARD and POSTROUTING chains */ 64 /* This only makes sense in the FORWARD and POSTROUTING chains */
67 if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) && 65 if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
68 (!!(nf_bridge->mask & BRNF_BRIDGED) ^ 66 (!!(nf_bridge->mask & BRNF_BRIDGED) ^
69 !(info->invert & XT_PHYSDEV_OP_BRIDGED))) 67 !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
70 return NOMATCH; 68 return false;
71 69
72 if ((info->bitmask & XT_PHYSDEV_OP_ISIN && 70 if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
73 (!nf_bridge->physindev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) || 71 (!nf_bridge->physindev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) ||
74 (info->bitmask & XT_PHYSDEV_OP_ISOUT && 72 (info->bitmask & XT_PHYSDEV_OP_ISOUT &&
75 (!nf_bridge->physoutdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT)))) 73 (!nf_bridge->physoutdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT))))
76 return NOMATCH; 74 return false;
77 75
78 if (!(info->bitmask & XT_PHYSDEV_OP_IN)) 76 if (!(info->bitmask & XT_PHYSDEV_OP_IN))
79 goto match_outdev; 77 goto match_outdev;
80 indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname; 78 indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
81 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) { 79 for (i = 0, ret = false; i < IFNAMSIZ/sizeof(unsigned int); i++) {
82 ret |= (((const unsigned int *)indev)[i] 80 ret |= (((const unsigned int *)indev)[i]
83 ^ ((const unsigned int *)info->physindev)[i]) 81 ^ ((const unsigned int *)info->physindev)[i])
84 & ((const unsigned int *)info->in_mask)[i]; 82 & ((const unsigned int *)info->in_mask)[i];
85 } 83 }
86 84
87 if ((ret == 0) ^ !(info->invert & XT_PHYSDEV_OP_IN)) 85 if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
88 return NOMATCH; 86 return false;
89 87
90match_outdev: 88match_outdev:
91 if (!(info->bitmask & XT_PHYSDEV_OP_OUT)) 89 if (!(info->bitmask & XT_PHYSDEV_OP_OUT))
92 return MATCH; 90 return true;
93 outdev = nf_bridge->physoutdev ? 91 outdev = nf_bridge->physoutdev ?
94 nf_bridge->physoutdev->name : nulldevname; 92 nf_bridge->physoutdev->name : nulldevname;
95 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) { 93 for (i = 0, ret = false; i < IFNAMSIZ/sizeof(unsigned int); i++) {
96 ret |= (((const unsigned int *)outdev)[i] 94 ret |= (((const unsigned int *)outdev)[i]
97 ^ ((const unsigned int *)info->physoutdev)[i]) 95 ^ ((const unsigned int *)info->physoutdev)[i])
98 & ((const unsigned int *)info->out_mask)[i]; 96 & ((const unsigned int *)info->out_mask)[i];
99 } 97 }
100 98
101 return (ret != 0) ^ !(info->invert & XT_PHYSDEV_OP_OUT); 99 return ret ^ !(info->invert & XT_PHYSDEV_OP_OUT);
102} 100}
103 101
104static int 102static bool
105checkentry(const char *tablename, 103checkentry(const char *tablename,
106 const void *ip, 104 const void *ip,
107 const struct xt_match *match, 105 const struct xt_match *match,
@@ -112,7 +110,7 @@ checkentry(const char *tablename,
112 110
113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || 111 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
114 info->bitmask & ~XT_PHYSDEV_OP_MASK) 112 info->bitmask & ~XT_PHYSDEV_OP_MASK)
115 return 0; 113 return false;
116 if (info->bitmask & XT_PHYSDEV_OP_OUT && 114 if (info->bitmask & XT_PHYSDEV_OP_OUT &&
117 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || 115 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
118 info->invert & XT_PHYSDEV_OP_BRIDGED) && 116 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
@@ -122,12 +120,12 @@ checkentry(const char *tablename,
122 "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " 120 "OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
123 "traffic is not supported anymore.\n"); 121 "traffic is not supported anymore.\n");
124 if (hook_mask & (1 << NF_IP_LOCAL_OUT)) 122 if (hook_mask & (1 << NF_IP_LOCAL_OUT))
125 return 0; 123 return false;
126 } 124 }
127 return 1; 125 return true;
128} 126}
129 127
130static struct xt_match xt_physdev_match[] = { 128static struct xt_match xt_physdev_match[] __read_mostly = {
131 { 129 {
132 .name = "physdev", 130 .name = "physdev",
133 .family = AF_INET, 131 .family = AF_INET,
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c
index e1409fc5c288..a52925f12f35 100644
--- a/net/netfilter/xt_pkttype.c
+++ b/net/netfilter/xt_pkttype.c
@@ -21,29 +21,29 @@ MODULE_DESCRIPTION("IP tables match to match on linklayer packet type");
21MODULE_ALIAS("ipt_pkttype"); 21MODULE_ALIAS("ipt_pkttype");
22MODULE_ALIAS("ip6t_pkttype"); 22MODULE_ALIAS("ip6t_pkttype");
23 23
24static int match(const struct sk_buff *skb, 24static bool match(const struct sk_buff *skb,
25 const struct net_device *in, 25 const struct net_device *in,
26 const struct net_device *out, 26 const struct net_device *out,
27 const struct xt_match *match, 27 const struct xt_match *match,
28 const void *matchinfo, 28 const void *matchinfo,
29 int offset, 29 int offset,
30 unsigned int protoff, 30 unsigned int protoff,
31 int *hotdrop) 31 bool *hotdrop)
32{ 32{
33 u_int8_t type; 33 u_int8_t type;
34 const struct xt_pkttype_info *info = matchinfo; 34 const struct xt_pkttype_info *info = matchinfo;
35 35
36 if (skb->pkt_type == PACKET_LOOPBACK) 36 if (skb->pkt_type == PACKET_LOOPBACK)
37 type = (MULTICAST(ip_hdr(skb)->daddr) 37 type = MULTICAST(ip_hdr(skb)->daddr)
38 ? PACKET_MULTICAST 38 ? PACKET_MULTICAST
39 : PACKET_BROADCAST); 39 : PACKET_BROADCAST;
40 else 40 else
41 type = skb->pkt_type; 41 type = skb->pkt_type;
42 42
43 return (type == info->pkttype) ^ info->invert; 43 return (type == info->pkttype) ^ info->invert;
44} 44}
45 45
46static struct xt_match xt_pkttype_match[] = { 46static struct xt_match xt_pkttype_match[] __read_mostly = {
47 { 47 {
48 .name = "pkttype", 48 .name = "pkttype",
49 .family = AF_INET, 49 .family = AF_INET,
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 15b45a95ec13..6d6d3b7fcbb5 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -20,7 +20,7 @@ MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
20MODULE_DESCRIPTION("Xtables IPsec policy matching module"); 20MODULE_DESCRIPTION("Xtables IPsec policy matching module");
21MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
22 22
23static inline int 23static inline bool
24xt_addr_cmp(const union xt_policy_addr *a1, const union xt_policy_addr *m, 24xt_addr_cmp(const union xt_policy_addr *a1, const union xt_policy_addr *m,
25 const union xt_policy_addr *a2, unsigned short family) 25 const union xt_policy_addr *a2, unsigned short family)
26{ 26{
@@ -30,11 +30,11 @@ xt_addr_cmp(const union xt_policy_addr *a1, const union xt_policy_addr *m,
30 case AF_INET6: 30 case AF_INET6:
31 return !ipv6_masked_addr_cmp(&a1->a6, &m->a6, &a2->a6); 31 return !ipv6_masked_addr_cmp(&a1->a6, &m->a6, &a2->a6);
32 } 32 }
33 return 0; 33 return false;
34} 34}
35 35
36static inline int 36static inline bool
37match_xfrm_state(struct xfrm_state *x, const struct xt_policy_elem *e, 37match_xfrm_state(const struct xfrm_state *x, const struct xt_policy_elem *e,
38 unsigned short family) 38 unsigned short family)
39{ 39{
40#define MATCH_ADDR(x,y,z) (!e->match.x || \ 40#define MATCH_ADDR(x,y,z) (!e->match.x || \
@@ -55,7 +55,7 @@ match_policy_in(const struct sk_buff *skb, const struct xt_policy_info *info,
55 unsigned short family) 55 unsigned short family)
56{ 56{
57 const struct xt_policy_elem *e; 57 const struct xt_policy_elem *e;
58 struct sec_path *sp = skb->sp; 58 const struct sec_path *sp = skb->sp;
59 int strict = info->flags & XT_POLICY_MATCH_STRICT; 59 int strict = info->flags & XT_POLICY_MATCH_STRICT;
60 int i, pos; 60 int i, pos;
61 61
@@ -85,7 +85,7 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
85 unsigned short family) 85 unsigned short family)
86{ 86{
87 const struct xt_policy_elem *e; 87 const struct xt_policy_elem *e;
88 struct dst_entry *dst = skb->dst; 88 const struct dst_entry *dst = skb->dst;
89 int strict = info->flags & XT_POLICY_MATCH_STRICT; 89 int strict = info->flags & XT_POLICY_MATCH_STRICT;
90 int i, pos; 90 int i, pos;
91 91
@@ -108,14 +108,14 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
108 return strict ? i == info->len : 0; 108 return strict ? i == info->len : 0;
109} 109}
110 110
111static int match(const struct sk_buff *skb, 111static bool match(const struct sk_buff *skb,
112 const struct net_device *in, 112 const struct net_device *in,
113 const struct net_device *out, 113 const struct net_device *out,
114 const struct xt_match *match, 114 const struct xt_match *match,
115 const void *matchinfo, 115 const void *matchinfo,
116 int offset, 116 int offset,
117 unsigned int protoff, 117 unsigned int protoff,
118 int *hotdrop) 118 bool *hotdrop)
119{ 119{
120 const struct xt_policy_info *info = matchinfo; 120 const struct xt_policy_info *info = matchinfo;
121 int ret; 121 int ret;
@@ -126,45 +126,45 @@ static int match(const struct sk_buff *skb,
126 ret = match_policy_out(skb, info, match->family); 126 ret = match_policy_out(skb, info, match->family);
127 127
128 if (ret < 0) 128 if (ret < 0)
129 ret = info->flags & XT_POLICY_MATCH_NONE ? 1 : 0; 129 ret = info->flags & XT_POLICY_MATCH_NONE ? true : false;
130 else if (info->flags & XT_POLICY_MATCH_NONE) 130 else if (info->flags & XT_POLICY_MATCH_NONE)
131 ret = 0; 131 ret = false;
132 132
133 return ret; 133 return ret;
134} 134}
135 135
136static int checkentry(const char *tablename, const void *ip_void, 136static bool checkentry(const char *tablename, const void *ip_void,
137 const struct xt_match *match, 137 const struct xt_match *match,
138 void *matchinfo, unsigned int hook_mask) 138 void *matchinfo, unsigned int hook_mask)
139{ 139{
140 struct xt_policy_info *info = matchinfo; 140 struct xt_policy_info *info = matchinfo;
141 141
142 if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { 142 if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
143 printk(KERN_ERR "xt_policy: neither incoming nor " 143 printk(KERN_ERR "xt_policy: neither incoming nor "
144 "outgoing policy selected\n"); 144 "outgoing policy selected\n");
145 return 0; 145 return false;
146 } 146 }
147 /* hook values are equal for IPv4 and IPv6 */ 147 /* hook values are equal for IPv4 and IPv6 */
148 if (hook_mask & (1 << NF_IP_PRE_ROUTING | 1 << NF_IP_LOCAL_IN) 148 if (hook_mask & (1 << NF_IP_PRE_ROUTING | 1 << NF_IP_LOCAL_IN)
149 && info->flags & XT_POLICY_MATCH_OUT) { 149 && info->flags & XT_POLICY_MATCH_OUT) {
150 printk(KERN_ERR "xt_policy: output policy not valid in " 150 printk(KERN_ERR "xt_policy: output policy not valid in "
151 "PRE_ROUTING and INPUT\n"); 151 "PRE_ROUTING and INPUT\n");
152 return 0; 152 return false;
153 } 153 }
154 if (hook_mask & (1 << NF_IP_POST_ROUTING | 1 << NF_IP_LOCAL_OUT) 154 if (hook_mask & (1 << NF_IP_POST_ROUTING | 1 << NF_IP_LOCAL_OUT)
155 && info->flags & XT_POLICY_MATCH_IN) { 155 && info->flags & XT_POLICY_MATCH_IN) {
156 printk(KERN_ERR "xt_policy: input policy not valid in " 156 printk(KERN_ERR "xt_policy: input policy not valid in "
157 "POST_ROUTING and OUTPUT\n"); 157 "POST_ROUTING and OUTPUT\n");
158 return 0; 158 return false;
159 } 159 }
160 if (info->len > XT_POLICY_MAX_ELEM) { 160 if (info->len > XT_POLICY_MAX_ELEM) {
161 printk(KERN_ERR "xt_policy: too many policy elements\n"); 161 printk(KERN_ERR "xt_policy: too many policy elements\n");
162 return 0; 162 return false;
163 } 163 }
164 return 1; 164 return true;
165} 165}
166 166
167static struct xt_match xt_policy_match[] = { 167static struct xt_match xt_policy_match[] __read_mostly = {
168 { 168 {
169 .name = "policy", 169 .name = "policy",
170 .family = AF_INET, 170 .family = AF_INET,
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index bfdde06ca0b7..dae97445b87b 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -16,19 +16,20 @@ MODULE_ALIAS("ip6t_quota");
16 16
17static DEFINE_SPINLOCK(quota_lock); 17static DEFINE_SPINLOCK(quota_lock);
18 18
19static int 19static bool
20match(const struct sk_buff *skb, 20match(const struct sk_buff *skb,
21 const struct net_device *in, const struct net_device *out, 21 const struct net_device *in, const struct net_device *out,
22 const struct xt_match *match, const void *matchinfo, 22 const struct xt_match *match, const void *matchinfo,
23 int offset, unsigned int protoff, int *hotdrop) 23 int offset, unsigned int protoff, bool *hotdrop)
24{ 24{
25 struct xt_quota_info *q = ((struct xt_quota_info *)matchinfo)->master; 25 struct xt_quota_info *q =
26 int ret = q->flags & XT_QUOTA_INVERT ? 1 : 0; 26 ((const struct xt_quota_info *)matchinfo)->master;
27 bool ret = q->flags & XT_QUOTA_INVERT;
27 28
28 spin_lock_bh(&quota_lock); 29 spin_lock_bh(&quota_lock);
29 if (q->quota >= skb->len) { 30 if (q->quota >= skb->len) {
30 q->quota -= skb->len; 31 q->quota -= skb->len;
31 ret ^= 1; 32 ret = !ret;
32 } else { 33 } else {
33 /* we do not allow even small packets from now on */ 34 /* we do not allow even small packets from now on */
34 q->quota = 0; 35 q->quota = 0;
@@ -38,21 +39,21 @@ match(const struct sk_buff *skb,
38 return ret; 39 return ret;
39} 40}
40 41
41static int 42static bool
42checkentry(const char *tablename, const void *entry, 43checkentry(const char *tablename, const void *entry,
43 const struct xt_match *match, void *matchinfo, 44 const struct xt_match *match, void *matchinfo,
44 unsigned int hook_mask) 45 unsigned int hook_mask)
45{ 46{
46 struct xt_quota_info *q = (struct xt_quota_info *)matchinfo; 47 struct xt_quota_info *q = matchinfo;
47 48
48 if (q->flags & ~XT_QUOTA_MASK) 49 if (q->flags & ~XT_QUOTA_MASK)
49 return 0; 50 return false;
50 /* For SMP, we only want to use one set of counters. */ 51 /* For SMP, we only want to use one set of counters. */
51 q->master = q; 52 q->master = q;
52 return 1; 53 return true;
53} 54}
54 55
55static struct xt_match xt_quota_match[] = { 56static struct xt_match xt_quota_match[] __read_mostly = {
56 { 57 {
57 .name = "quota", 58 .name = "quota",
58 .family = AF_INET, 59 .family = AF_INET,
diff --git a/net/netfilter/xt_realm.c b/net/netfilter/xt_realm.c
index c2017f8af9c4..cc3e76d77a99 100644
--- a/net/netfilter/xt_realm.c
+++ b/net/netfilter/xt_realm.c
@@ -21,7 +21,7 @@ MODULE_LICENSE("GPL");
21MODULE_DESCRIPTION("X_tables realm match"); 21MODULE_DESCRIPTION("X_tables realm match");
22MODULE_ALIAS("ipt_realm"); 22MODULE_ALIAS("ipt_realm");
23 23
24static int 24static bool
25match(const struct sk_buff *skb, 25match(const struct sk_buff *skb,
26 const struct net_device *in, 26 const struct net_device *in,
27 const struct net_device *out, 27 const struct net_device *out,
@@ -29,15 +29,15 @@ match(const struct sk_buff *skb,
29 const void *matchinfo, 29 const void *matchinfo,
30 int offset, 30 int offset,
31 unsigned int protoff, 31 unsigned int protoff,
32 int *hotdrop) 32 bool *hotdrop)
33{ 33{
34 const struct xt_realm_info *info = matchinfo; 34 const struct xt_realm_info *info = matchinfo;
35 struct dst_entry *dst = skb->dst; 35 const struct dst_entry *dst = skb->dst;
36 36
37 return (info->id == (dst->tclassid & info->mask)) ^ info->invert; 37 return (info->id == (dst->tclassid & info->mask)) ^ info->invert;
38} 38}
39 39
40static struct xt_match realm_match = { 40static struct xt_match realm_match __read_mostly = {
41 .name = "realm", 41 .name = "realm",
42 .match = match, 42 .match = match,
43 .matchsize = sizeof(struct xt_realm_info), 43 .matchsize = sizeof(struct xt_realm_info),
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index f86d8d769d47..c002153b80ab 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -23,7 +23,7 @@ MODULE_ALIAS("ipt_sctp");
23#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \ 23#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
24 || (!!((invflag) & (option)) ^ (cond))) 24 || (!!((invflag) & (option)) ^ (cond)))
25 25
26static int 26static bool
27match_flags(const struct xt_sctp_flag_info *flag_info, 27match_flags(const struct xt_sctp_flag_info *flag_info,
28 const int flag_count, 28 const int flag_count,
29 u_int8_t chunktype, 29 u_int8_t chunktype,
@@ -31,23 +31,21 @@ match_flags(const struct xt_sctp_flag_info *flag_info,
31{ 31{
32 int i; 32 int i;
33 33
34 for (i = 0; i < flag_count; i++) { 34 for (i = 0; i < flag_count; i++)
35 if (flag_info[i].chunktype == chunktype) { 35 if (flag_info[i].chunktype == chunktype)
36 return (chunkflags & flag_info[i].flag_mask) == flag_info[i].flag; 36 return (chunkflags & flag_info[i].flag_mask) == flag_info[i].flag;
37 }
38 }
39 37
40 return 1; 38 return true;
41} 39}
42 40
43static inline int 41static inline bool
44match_packet(const struct sk_buff *skb, 42match_packet(const struct sk_buff *skb,
45 unsigned int offset, 43 unsigned int offset,
46 const u_int32_t *chunkmap, 44 const u_int32_t *chunkmap,
47 int chunk_match_type, 45 int chunk_match_type,
48 const struct xt_sctp_flag_info *flag_info, 46 const struct xt_sctp_flag_info *flag_info,
49 const int flag_count, 47 const int flag_count,
50 int *hotdrop) 48 bool *hotdrop)
51{ 49{
52 u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)]; 50 u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)];
53 sctp_chunkhdr_t _sch, *sch; 51 sctp_chunkhdr_t _sch, *sch;
@@ -56,16 +54,15 @@ match_packet(const struct sk_buff *skb,
56 int i = 0; 54 int i = 0;
57#endif 55#endif
58 56
59 if (chunk_match_type == SCTP_CHUNK_MATCH_ALL) { 57 if (chunk_match_type == SCTP_CHUNK_MATCH_ALL)
60 SCTP_CHUNKMAP_COPY(chunkmapcopy, chunkmap); 58 SCTP_CHUNKMAP_COPY(chunkmapcopy, chunkmap);
61 }
62 59
63 do { 60 do {
64 sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch); 61 sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
65 if (sch == NULL || sch->length == 0) { 62 if (sch == NULL || sch->length == 0) {
66 duprintf("Dropping invalid SCTP packet.\n"); 63 duprintf("Dropping invalid SCTP packet.\n");
67 *hotdrop = 1; 64 *hotdrop = true;
68 return 0; 65 return false;
69 } 66 }
70 67
71 duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", 68 duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n",
@@ -80,28 +77,26 @@ match_packet(const struct sk_buff *skb,
80 case SCTP_CHUNK_MATCH_ANY: 77 case SCTP_CHUNK_MATCH_ANY:
81 if (match_flags(flag_info, flag_count, 78 if (match_flags(flag_info, flag_count,
82 sch->type, sch->flags)) { 79 sch->type, sch->flags)) {
83 return 1; 80 return true;
84 } 81 }
85 break; 82 break;
86 83
87 case SCTP_CHUNK_MATCH_ALL: 84 case SCTP_CHUNK_MATCH_ALL:
88 if (match_flags(flag_info, flag_count, 85 if (match_flags(flag_info, flag_count,
89 sch->type, sch->flags)) { 86 sch->type, sch->flags))
90 SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type); 87 SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type);
91 }
92 break; 88 break;
93 89
94 case SCTP_CHUNK_MATCH_ONLY: 90 case SCTP_CHUNK_MATCH_ONLY:
95 if (!match_flags(flag_info, flag_count, 91 if (!match_flags(flag_info, flag_count,
96 sch->type, sch->flags)) { 92 sch->type, sch->flags))
97 return 0; 93 return false;
98 }
99 break; 94 break;
100 } 95 }
101 } else { 96 } else {
102 switch (chunk_match_type) { 97 switch (chunk_match_type) {
103 case SCTP_CHUNK_MATCH_ONLY: 98 case SCTP_CHUNK_MATCH_ONLY:
104 return 0; 99 return false;
105 } 100 }
106 } 101 }
107 } while (offset < skb->len); 102 } while (offset < skb->len);
@@ -110,16 +105,16 @@ match_packet(const struct sk_buff *skb,
110 case SCTP_CHUNK_MATCH_ALL: 105 case SCTP_CHUNK_MATCH_ALL:
111 return SCTP_CHUNKMAP_IS_CLEAR(chunkmap); 106 return SCTP_CHUNKMAP_IS_CLEAR(chunkmap);
112 case SCTP_CHUNK_MATCH_ANY: 107 case SCTP_CHUNK_MATCH_ANY:
113 return 0; 108 return false;
114 case SCTP_CHUNK_MATCH_ONLY: 109 case SCTP_CHUNK_MATCH_ONLY:
115 return 1; 110 return true;
116 } 111 }
117 112
118 /* This will never be reached, but required to stop compiler whine */ 113 /* This will never be reached, but required to stop compiler whine */
119 return 0; 114 return false;
120} 115}
121 116
122static int 117static bool
123match(const struct sk_buff *skb, 118match(const struct sk_buff *skb,
124 const struct net_device *in, 119 const struct net_device *in,
125 const struct net_device *out, 120 const struct net_device *out,
@@ -127,29 +122,29 @@ match(const struct sk_buff *skb,
127 const void *matchinfo, 122 const void *matchinfo,
128 int offset, 123 int offset,
129 unsigned int protoff, 124 unsigned int protoff,
130 int *hotdrop) 125 bool *hotdrop)
131{ 126{
132 const struct xt_sctp_info *info = matchinfo; 127 const struct xt_sctp_info *info = matchinfo;
133 sctp_sctphdr_t _sh, *sh; 128 sctp_sctphdr_t _sh, *sh;
134 129
135 if (offset) { 130 if (offset) {
136 duprintf("Dropping non-first fragment.. FIXME\n"); 131 duprintf("Dropping non-first fragment.. FIXME\n");
137 return 0; 132 return false;
138 } 133 }
139 134
140 sh = skb_header_pointer(skb, protoff, sizeof(_sh), &_sh); 135 sh = skb_header_pointer(skb, protoff, sizeof(_sh), &_sh);
141 if (sh == NULL) { 136 if (sh == NULL) {
142 duprintf("Dropping evil TCP offset=0 tinygram.\n"); 137 duprintf("Dropping evil TCP offset=0 tinygram.\n");
143 *hotdrop = 1; 138 *hotdrop = true;
144 return 0; 139 return false;
145 } 140 }
146 duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); 141 duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
147 142
148 return SCCHECK(((ntohs(sh->source) >= info->spts[0]) 143 return SCCHECK(ntohs(sh->source) >= info->spts[0]
149 && (ntohs(sh->source) <= info->spts[1])), 144 && ntohs(sh->source) <= info->spts[1],
150 XT_SCTP_SRC_PORTS, info->flags, info->invflags) 145 XT_SCTP_SRC_PORTS, info->flags, info->invflags)
151 && SCCHECK(((ntohs(sh->dest) >= info->dpts[0]) 146 && SCCHECK(ntohs(sh->dest) >= info->dpts[0]
152 && (ntohs(sh->dest) <= info->dpts[1])), 147 && ntohs(sh->dest) <= info->dpts[1],
153 XT_SCTP_DEST_PORTS, info->flags, info->invflags) 148 XT_SCTP_DEST_PORTS, info->flags, info->invflags)
154 && SCCHECK(match_packet(skb, protoff + sizeof (sctp_sctphdr_t), 149 && SCCHECK(match_packet(skb, protoff + sizeof (sctp_sctphdr_t),
155 info->chunkmap, info->chunk_match_type, 150 info->chunkmap, info->chunk_match_type,
@@ -158,7 +153,7 @@ match(const struct sk_buff *skb,
158 XT_SCTP_CHUNK_TYPES, info->flags, info->invflags); 153 XT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
159} 154}
160 155
161static int 156static bool
162checkentry(const char *tablename, 157checkentry(const char *tablename,
163 const void *inf, 158 const void *inf,
164 const struct xt_match *match, 159 const struct xt_match *match,
@@ -177,7 +172,7 @@ checkentry(const char *tablename,
177 | SCTP_CHUNK_MATCH_ONLY))); 172 | SCTP_CHUNK_MATCH_ONLY)));
178} 173}
179 174
180static struct xt_match xt_sctp_match[] = { 175static struct xt_match xt_sctp_match[] __read_mostly = {
181 { 176 {
182 .name = "sctp", 177 .name = "sctp",
183 .family = AF_INET, 178 .family = AF_INET,
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index 149294f7df71..e0a528df19a7 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -20,7 +20,7 @@ MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module");
20MODULE_ALIAS("ipt_state"); 20MODULE_ALIAS("ipt_state");
21MODULE_ALIAS("ip6t_state"); 21MODULE_ALIAS("ip6t_state");
22 22
23static int 23static bool
24match(const struct sk_buff *skb, 24match(const struct sk_buff *skb,
25 const struct net_device *in, 25 const struct net_device *in,
26 const struct net_device *out, 26 const struct net_device *out,
@@ -28,7 +28,7 @@ match(const struct sk_buff *skb,
28 const void *matchinfo, 28 const void *matchinfo,
29 int offset, 29 int offset,
30 unsigned int protoff, 30 unsigned int protoff,
31 int *hotdrop) 31 bool *hotdrop)
32{ 32{
33 const struct xt_state_info *sinfo = matchinfo; 33 const struct xt_state_info *sinfo = matchinfo;
34 enum ip_conntrack_info ctinfo; 34 enum ip_conntrack_info ctinfo;
@@ -44,18 +44,18 @@ match(const struct sk_buff *skb,
44 return (sinfo->statemask & statebit); 44 return (sinfo->statemask & statebit);
45} 45}
46 46
47static int check(const char *tablename, 47static bool check(const char *tablename,
48 const void *inf, 48 const void *inf,
49 const struct xt_match *match, 49 const struct xt_match *match,
50 void *matchinfo, 50 void *matchinfo,
51 unsigned int hook_mask) 51 unsigned int hook_mask)
52{ 52{
53 if (nf_ct_l3proto_try_module_get(match->family) < 0) { 53 if (nf_ct_l3proto_try_module_get(match->family) < 0) {
54 printk(KERN_WARNING "can't load conntrack support for " 54 printk(KERN_WARNING "can't load conntrack support for "
55 "proto=%d\n", match->family); 55 "proto=%d\n", match->family);
56 return 0; 56 return false;
57 } 57 }
58 return 1; 58 return true;
59} 59}
60 60
61static void 61static void
@@ -64,7 +64,7 @@ destroy(const struct xt_match *match, void *matchinfo)
64 nf_ct_l3proto_module_put(match->family); 64 nf_ct_l3proto_module_put(match->family);
65} 65}
66 66
67static struct xt_match xt_state_match[] = { 67static struct xt_match xt_state_match[] __read_mostly = {
68 { 68 {
69 .name = "state", 69 .name = "state",
70 .family = AF_INET, 70 .family = AF_INET,
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 091a9f89f5d5..4089dae4e286 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -24,26 +24,26 @@ MODULE_ALIAS("ip6t_statistic");
24 24
25static DEFINE_SPINLOCK(nth_lock); 25static DEFINE_SPINLOCK(nth_lock);
26 26
27static int 27static bool
28match(const struct sk_buff *skb, 28match(const struct sk_buff *skb,
29 const struct net_device *in, const struct net_device *out, 29 const struct net_device *in, const struct net_device *out,
30 const struct xt_match *match, const void *matchinfo, 30 const struct xt_match *match, const void *matchinfo,
31 int offset, unsigned int protoff, int *hotdrop) 31 int offset, unsigned int protoff, bool *hotdrop)
32{ 32{
33 struct xt_statistic_info *info = (struct xt_statistic_info *)matchinfo; 33 struct xt_statistic_info *info = (struct xt_statistic_info *)matchinfo;
34 int ret = info->flags & XT_STATISTIC_INVERT ? 1 : 0; 34 bool ret = info->flags & XT_STATISTIC_INVERT;
35 35
36 switch (info->mode) { 36 switch (info->mode) {
37 case XT_STATISTIC_MODE_RANDOM: 37 case XT_STATISTIC_MODE_RANDOM:
38 if ((net_random() & 0x7FFFFFFF) < info->u.random.probability) 38 if ((net_random() & 0x7FFFFFFF) < info->u.random.probability)
39 ret ^= 1; 39 ret = !ret;
40 break; 40 break;
41 case XT_STATISTIC_MODE_NTH: 41 case XT_STATISTIC_MODE_NTH:
42 info = info->master; 42 info = info->master;
43 spin_lock_bh(&nth_lock); 43 spin_lock_bh(&nth_lock);
44 if (info->u.nth.count++ == info->u.nth.every) { 44 if (info->u.nth.count++ == info->u.nth.every) {
45 info->u.nth.count = 0; 45 info->u.nth.count = 0;
46 ret ^= 1; 46 ret = !ret;
47 } 47 }
48 spin_unlock_bh(&nth_lock); 48 spin_unlock_bh(&nth_lock);
49 break; 49 break;
@@ -52,21 +52,21 @@ match(const struct sk_buff *skb,
52 return ret; 52 return ret;
53} 53}
54 54
55static int 55static bool
56checkentry(const char *tablename, const void *entry, 56checkentry(const char *tablename, const void *entry,
57 const struct xt_match *match, void *matchinfo, 57 const struct xt_match *match, void *matchinfo,
58 unsigned int hook_mask) 58 unsigned int hook_mask)
59{ 59{
60 struct xt_statistic_info *info = (struct xt_statistic_info *)matchinfo; 60 struct xt_statistic_info *info = matchinfo;
61 61
62 if (info->mode > XT_STATISTIC_MODE_MAX || 62 if (info->mode > XT_STATISTIC_MODE_MAX ||
63 info->flags & ~XT_STATISTIC_MASK) 63 info->flags & ~XT_STATISTIC_MASK)
64 return 0; 64 return false;
65 info->master = info; 65 info->master = info;
66 return 1; 66 return true;
67} 67}
68 68
69static struct xt_match xt_statistic_match[] = { 69static struct xt_match xt_statistic_match[] __read_mostly = {
70 { 70 {
71 .name = "statistic", 71 .name = "statistic",
72 .family = AF_INET, 72 .family = AF_INET,
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index 999a005dbd0c..864133442cda 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -21,14 +21,14 @@ MODULE_LICENSE("GPL");
21MODULE_ALIAS("ipt_string"); 21MODULE_ALIAS("ipt_string");
22MODULE_ALIAS("ip6t_string"); 22MODULE_ALIAS("ip6t_string");
23 23
24static int match(const struct sk_buff *skb, 24static bool match(const struct sk_buff *skb,
25 const struct net_device *in, 25 const struct net_device *in,
26 const struct net_device *out, 26 const struct net_device *out,
27 const struct xt_match *match, 27 const struct xt_match *match,
28 const void *matchinfo, 28 const void *matchinfo,
29 int offset, 29 int offset,
30 unsigned int protoff, 30 unsigned int protoff,
31 int *hotdrop) 31 bool *hotdrop)
32{ 32{
33 const struct xt_string_info *conf = matchinfo; 33 const struct xt_string_info *conf = matchinfo;
34 struct ts_state state; 34 struct ts_state state;
@@ -42,30 +42,30 @@ static int match(const struct sk_buff *skb,
42 42
43#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m) 43#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m)
44 44
45static int checkentry(const char *tablename, 45static bool checkentry(const char *tablename,
46 const void *ip, 46 const void *ip,
47 const struct xt_match *match, 47 const struct xt_match *match,
48 void *matchinfo, 48 void *matchinfo,
49 unsigned int hook_mask) 49 unsigned int hook_mask)
50{ 50{
51 struct xt_string_info *conf = matchinfo; 51 struct xt_string_info *conf = matchinfo;
52 struct ts_config *ts_conf; 52 struct ts_config *ts_conf;
53 53
54 /* Damn, can't handle this case properly with iptables... */ 54 /* Damn, can't handle this case properly with iptables... */
55 if (conf->from_offset > conf->to_offset) 55 if (conf->from_offset > conf->to_offset)
56 return 0; 56 return false;
57 if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') 57 if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0')
58 return 0; 58 return false;
59 if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) 59 if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
60 return 0; 60 return false;
61 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, 61 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
62 GFP_KERNEL, TS_AUTOLOAD); 62 GFP_KERNEL, TS_AUTOLOAD);
63 if (IS_ERR(ts_conf)) 63 if (IS_ERR(ts_conf))
64 return 0; 64 return false;
65 65
66 conf->config = ts_conf; 66 conf->config = ts_conf;
67 67
68 return 1; 68 return true;
69} 69}
70 70
71static void destroy(const struct xt_match *match, void *matchinfo) 71static void destroy(const struct xt_match *match, void *matchinfo)
@@ -73,7 +73,7 @@ static void destroy(const struct xt_match *match, void *matchinfo)
73 textsearch_destroy(STRING_TEXT_PRIV(matchinfo)->config); 73 textsearch_destroy(STRING_TEXT_PRIV(matchinfo)->config);
74} 74}
75 75
76static struct xt_match xt_string_match[] = { 76static struct xt_match xt_string_match[] __read_mostly = {
77 { 77 {
78 .name = "string", 78 .name = "string",
79 .family = AF_INET, 79 .family = AF_INET,
diff --git a/net/netfilter/xt_tcpmss.c b/net/netfilter/xt_tcpmss.c
index 80571d0749f7..cd5f6d758c68 100644
--- a/net/netfilter/xt_tcpmss.c
+++ b/net/netfilter/xt_tcpmss.c
@@ -23,7 +23,7 @@ MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
23MODULE_DESCRIPTION("iptables TCP MSS match module"); 23MODULE_DESCRIPTION("iptables TCP MSS match module");
24MODULE_ALIAS("ipt_tcpmss"); 24MODULE_ALIAS("ipt_tcpmss");
25 25
26static int 26static bool
27match(const struct sk_buff *skb, 27match(const struct sk_buff *skb,
28 const struct net_device *in, 28 const struct net_device *in,
29 const struct net_device *out, 29 const struct net_device *out,
@@ -31,7 +31,7 @@ match(const struct sk_buff *skb,
31 const void *matchinfo, 31 const void *matchinfo,
32 int offset, 32 int offset,
33 unsigned int protoff, 33 unsigned int protoff,
34 int *hotdrop) 34 bool *hotdrop)
35{ 35{
36 const struct xt_tcpmss_match_info *info = matchinfo; 36 const struct xt_tcpmss_match_info *info = matchinfo;
37 struct tcphdr _tcph, *th; 37 struct tcphdr _tcph, *th;
@@ -77,11 +77,11 @@ out:
77 return info->invert; 77 return info->invert;
78 78
79dropit: 79dropit:
80 *hotdrop = 1; 80 *hotdrop = true;
81 return 0; 81 return false;
82} 82}
83 83
84static struct xt_match xt_tcpmss_match[] = { 84static struct xt_match xt_tcpmss_match[] __read_mostly = {
85 { 85 {
86 .name = "tcpmss", 86 .name = "tcpmss",
87 .family = AF_INET, 87 .family = AF_INET,
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index 46414b562a19..ab7d845224fc 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -27,22 +27,19 @@ MODULE_ALIAS("ip6t_tcp");
27 27
28 28
29/* Returns 1 if the port is matched by the range, 0 otherwise */ 29/* Returns 1 if the port is matched by the range, 0 otherwise */
30static inline int 30static inline bool
31port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert) 31port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
32{ 32{
33 int ret; 33 return (port >= min && port <= max) ^ invert;
34
35 ret = (port >= min && port <= max) ^ invert;
36 return ret;
37} 34}
38 35
39static int 36static bool
40tcp_find_option(u_int8_t option, 37tcp_find_option(u_int8_t option,
41 const struct sk_buff *skb, 38 const struct sk_buff *skb,
42 unsigned int protoff, 39 unsigned int protoff,
43 unsigned int optlen, 40 unsigned int optlen,
44 int invert, 41 bool invert,
45 int *hotdrop) 42 bool *hotdrop)
46{ 43{
47 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */ 44 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
48 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op; 45 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
@@ -57,8 +54,8 @@ tcp_find_option(u_int8_t option,
57 op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr), 54 op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr),
58 optlen, _opt); 55 optlen, _opt);
59 if (op == NULL) { 56 if (op == NULL) {
60 *hotdrop = 1; 57 *hotdrop = true;
61 return 0; 58 return false;
62 } 59 }
63 60
64 for (i = 0; i < optlen; ) { 61 for (i = 0; i < optlen; ) {
@@ -70,7 +67,7 @@ tcp_find_option(u_int8_t option,
70 return invert; 67 return invert;
71} 68}
72 69
73static int 70static bool
74tcp_match(const struct sk_buff *skb, 71tcp_match(const struct sk_buff *skb,
75 const struct net_device *in, 72 const struct net_device *in,
76 const struct net_device *out, 73 const struct net_device *out,
@@ -78,7 +75,7 @@ tcp_match(const struct sk_buff *skb,
78 const void *matchinfo, 75 const void *matchinfo,
79 int offset, 76 int offset,
80 unsigned int protoff, 77 unsigned int protoff,
81 int *hotdrop) 78 bool *hotdrop)
82{ 79{
83 struct tcphdr _tcph, *th; 80 struct tcphdr _tcph, *th;
84 const struct xt_tcp *tcpinfo = matchinfo; 81 const struct xt_tcp *tcpinfo = matchinfo;
@@ -92,51 +89,51 @@ tcp_match(const struct sk_buff *skb,
92 */ 89 */
93 if (offset == 1) { 90 if (offset == 1) {
94 duprintf("Dropping evil TCP offset=1 frag.\n"); 91 duprintf("Dropping evil TCP offset=1 frag.\n");
95 *hotdrop = 1; 92 *hotdrop = true;
96 } 93 }
97 /* Must not be a fragment. */ 94 /* Must not be a fragment. */
98 return 0; 95 return false;
99 } 96 }
100 97
101#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg)) 98#define FWINVTCP(bool, invflg) ((bool) ^ !!(tcpinfo->invflags & (invflg)))
102 99
103 th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); 100 th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
104 if (th == NULL) { 101 if (th == NULL) {
105 /* We've been asked to examine this packet, and we 102 /* We've been asked to examine this packet, and we
106 can't. Hence, no choice but to drop. */ 103 can't. Hence, no choice but to drop. */
107 duprintf("Dropping evil TCP offset=0 tinygram.\n"); 104 duprintf("Dropping evil TCP offset=0 tinygram.\n");
108 *hotdrop = 1; 105 *hotdrop = true;
109 return 0; 106 return false;
110 } 107 }
111 108
112 if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1], 109 if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
113 ntohs(th->source), 110 ntohs(th->source),
114 !!(tcpinfo->invflags & XT_TCP_INV_SRCPT))) 111 !!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
115 return 0; 112 return false;
116 if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1], 113 if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
117 ntohs(th->dest), 114 ntohs(th->dest),
118 !!(tcpinfo->invflags & XT_TCP_INV_DSTPT))) 115 !!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
119 return 0; 116 return false;
120 if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask) 117 if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
121 == tcpinfo->flg_cmp, 118 == tcpinfo->flg_cmp,
122 XT_TCP_INV_FLAGS)) 119 XT_TCP_INV_FLAGS))
123 return 0; 120 return false;
124 if (tcpinfo->option) { 121 if (tcpinfo->option) {
125 if (th->doff * 4 < sizeof(_tcph)) { 122 if (th->doff * 4 < sizeof(_tcph)) {
126 *hotdrop = 1; 123 *hotdrop = true;
127 return 0; 124 return false;
128 } 125 }
129 if (!tcp_find_option(tcpinfo->option, skb, protoff, 126 if (!tcp_find_option(tcpinfo->option, skb, protoff,
130 th->doff*4 - sizeof(_tcph), 127 th->doff*4 - sizeof(_tcph),
131 tcpinfo->invflags & XT_TCP_INV_OPTION, 128 tcpinfo->invflags & XT_TCP_INV_OPTION,
132 hotdrop)) 129 hotdrop))
133 return 0; 130 return false;
134 } 131 }
135 return 1; 132 return true;
136} 133}
137 134
138/* Called when user tries to insert an entry of this type. */ 135/* Called when user tries to insert an entry of this type. */
139static int 136static bool
140tcp_checkentry(const char *tablename, 137tcp_checkentry(const char *tablename,
141 const void *info, 138 const void *info,
142 const struct xt_match *match, 139 const struct xt_match *match,
@@ -149,7 +146,7 @@ tcp_checkentry(const char *tablename,
149 return !(tcpinfo->invflags & ~XT_TCP_INV_MASK); 146 return !(tcpinfo->invflags & ~XT_TCP_INV_MASK);
150} 147}
151 148
152static int 149static bool
153udp_match(const struct sk_buff *skb, 150udp_match(const struct sk_buff *skb,
154 const struct net_device *in, 151 const struct net_device *in,
155 const struct net_device *out, 152 const struct net_device *out,
@@ -157,22 +154,22 @@ udp_match(const struct sk_buff *skb,
157 const void *matchinfo, 154 const void *matchinfo,
158 int offset, 155 int offset,
159 unsigned int protoff, 156 unsigned int protoff,
160 int *hotdrop) 157 bool *hotdrop)
161{ 158{
162 struct udphdr _udph, *uh; 159 struct udphdr _udph, *uh;
163 const struct xt_udp *udpinfo = matchinfo; 160 const struct xt_udp *udpinfo = matchinfo;
164 161
165 /* Must not be a fragment. */ 162 /* Must not be a fragment. */
166 if (offset) 163 if (offset)
167 return 0; 164 return false;
168 165
169 uh = skb_header_pointer(skb, protoff, sizeof(_udph), &_udph); 166 uh = skb_header_pointer(skb, protoff, sizeof(_udph), &_udph);
170 if (uh == NULL) { 167 if (uh == NULL) {
171 /* We've been asked to examine this packet, and we 168 /* We've been asked to examine this packet, and we
172 can't. Hence, no choice but to drop. */ 169 can't. Hence, no choice but to drop. */
173 duprintf("Dropping evil UDP tinygram.\n"); 170 duprintf("Dropping evil UDP tinygram.\n");
174 *hotdrop = 1; 171 *hotdrop = true;
175 return 0; 172 return false;
176 } 173 }
177 174
178 return port_match(udpinfo->spts[0], udpinfo->spts[1], 175 return port_match(udpinfo->spts[0], udpinfo->spts[1],
@@ -184,7 +181,7 @@ udp_match(const struct sk_buff *skb,
184} 181}
185 182
186/* Called when user tries to insert an entry of this type. */ 183/* Called when user tries to insert an entry of this type. */
187static int 184static bool
188udp_checkentry(const char *tablename, 185udp_checkentry(const char *tablename,
189 const void *info, 186 const void *info,
190 const struct xt_match *match, 187 const struct xt_match *match,
@@ -197,7 +194,7 @@ udp_checkentry(const char *tablename,
197 return !(udpinfo->invflags & ~XT_UDP_INV_MASK); 194 return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
198} 195}
199 196
200static struct xt_match xt_tcpudp_match[] = { 197static struct xt_match xt_tcpudp_match[] __read_mostly = {
201 { 198 {
202 .name = "tcp", 199 .name = "tcp",
203 .family = AF_INET, 200 .family = AF_INET,
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
new file mode 100644
index 000000000000..04b677ae8dae
--- /dev/null
+++ b/net/netfilter/xt_u32.c
@@ -0,0 +1,135 @@
1/*
2 * xt_u32 - kernel module to match u32 packet content
3 *
4 * Original author: Don Cohen <don@isis.cs3-inc.com>
5 * © Jan Engelhardt <jengelh@gmx.de>, 2007
6 */
7
8#include <linux/module.h>
9#include <linux/moduleparam.h>
10#include <linux/spinlock.h>
11#include <linux/skbuff.h>
12#include <linux/types.h>
13#include <linux/netfilter/x_tables.h>
14#include <linux/netfilter/xt_u32.h>
15
16static bool u32_match_it(const struct xt_u32 *data,
17 const struct sk_buff *skb)
18{
19 const struct xt_u32_test *ct;
20 unsigned int testind;
21 unsigned int nnums;
22 unsigned int nvals;
23 unsigned int i;
24 u_int32_t pos;
25 u_int32_t val;
26 u_int32_t at;
27 int ret;
28
29 /*
30 * Small example: "0 >> 28 == 4 && 8 & 0xFF0000 >> 16 = 6, 17"
31 * (=IPv4 and (TCP or UDP)). Outer loop runs over the "&&" operands.
32 */
33 for (testind = 0; testind < data->ntests; ++testind) {
34 ct = &data->tests[testind];
35 at = 0;
36 pos = ct->location[0].number;
37
38 if (skb->len < 4 || pos > skb->len - 4);
39 return false;
40
41 ret = skb_copy_bits(skb, pos, &val, sizeof(val));
42 BUG_ON(ret < 0);
43 val = ntohl(val);
44 nnums = ct->nnums;
45
46 /* Inner loop runs over "&", "<<", ">>" and "@" operands */
47 for (i = 1; i < nnums; ++i) {
48 u_int32_t number = ct->location[i].number;
49 switch (ct->location[i].nextop) {
50 case XT_U32_AND:
51 val &= number;
52 break;
53 case XT_U32_LEFTSH:
54 val <<= number;
55 break;
56 case XT_U32_RIGHTSH:
57 val >>= number;
58 break;
59 case XT_U32_AT:
60 if (at + val < at)
61 return false;
62 at += val;
63 pos = number;
64 if (at + 4 < at || skb->len < at + 4 ||
65 pos > skb->len - at - 4)
66 return false;
67
68 ret = skb_copy_bits(skb, at + pos, &val,
69 sizeof(val));
70 BUG_ON(ret < 0);
71 val = ntohl(val);
72 break;
73 }
74 }
75
76 /* Run over the "," and ":" operands */
77 nvals = ct->nvalues;
78 for (i = 0; i < nvals; ++i)
79 if (ct->value[i].min <= val && val <= ct->value[i].max)
80 break;
81
82 if (i >= ct->nvalues)
83 return false;
84 }
85
86 return true;
87}
88
89static bool u32_match(const struct sk_buff *skb,
90 const struct net_device *in,
91 const struct net_device *out,
92 const struct xt_match *match, const void *matchinfo,
93 int offset, unsigned int protoff, bool *hotdrop)
94{
95 const struct xt_u32 *data = matchinfo;
96 bool ret;
97
98 ret = u32_match_it(data, skb);
99 return ret ^ data->invert;
100}
101
102static struct xt_match u32_reg[] __read_mostly = {
103 {
104 .name = "u32",
105 .family = AF_INET,
106 .match = u32_match,
107 .matchsize = sizeof(struct xt_u32),
108 .me = THIS_MODULE,
109 },
110 {
111 .name = "u32",
112 .family = AF_INET6,
113 .match = u32_match,
114 .matchsize = sizeof(struct xt_u32),
115 .me = THIS_MODULE,
116 },
117};
118
119static int __init xt_u32_init(void)
120{
121 return xt_register_matches(u32_reg, ARRAY_SIZE(u32_reg));
122}
123
124static void __exit xt_u32_exit(void)
125{
126 xt_unregister_matches(u32_reg, ARRAY_SIZE(u32_reg));
127}
128
129module_init(xt_u32_init);
130module_exit(xt_u32_exit);
131MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>");
132MODULE_DESCRIPTION("netfilter u32 match module");
133MODULE_LICENSE("GPL");
134MODULE_ALIAS("ipt_u32");
135MODULE_ALIAS("ip6t_u32");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1f15821c8da4..a3c8e692f493 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1713,7 +1713,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1713 return 0; 1713 return 0;
1714} 1714}
1715 1715
1716static struct seq_operations netlink_seq_ops = { 1716static const struct seq_operations netlink_seq_ops = {
1717 .start = netlink_seq_start, 1717 .start = netlink_seq_start,
1718 .next = netlink_seq_next, 1718 .next = netlink_seq_next,
1719 .stop = netlink_seq_stop, 1719 .stop = netlink_seq_stop,
diff --git a/net/netlink/attr.c b/net/netlink/attr.c
index c591212793ee..e4d7bed99c2e 100644
--- a/net/netlink/attr.c
+++ b/net/netlink/attr.c
@@ -72,6 +72,17 @@ static int validate_nla(struct nlattr *nla, int maxtype,
72 return -ERANGE; 72 return -ERANGE;
73 break; 73 break;
74 74
75 case NLA_NESTED_COMPAT:
76 if (attrlen < pt->len)
77 return -ERANGE;
78 if (attrlen < NLA_ALIGN(pt->len))
79 break;
80 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
81 return -ERANGE;
82 nla = nla_data(nla) + NLA_ALIGN(pt->len);
83 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
84 return -ERANGE;
85 break;
75 default: 86 default:
76 if (pt->len) 87 if (pt->len)
77 minlen = pt->len; 88 minlen = pt->len;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 5d4a26c2aa0c..5d66490dd290 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1328,7 +1328,7 @@ static int nr_info_show(struct seq_file *seq, void *v)
1328 return 0; 1328 return 0;
1329} 1329}
1330 1330
1331static struct seq_operations nr_info_seqops = { 1331static const struct seq_operations nr_info_seqops = {
1332 .start = nr_info_start, 1332 .start = nr_info_start,
1333 .next = nr_info_next, 1333 .next = nr_info_next,
1334 .stop = nr_info_stop, 1334 .stop = nr_info_stop,
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 2f76e062609d..24fe4a66d297 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -922,7 +922,7 @@ static int nr_node_show(struct seq_file *seq, void *v)
922 return 0; 922 return 0;
923} 923}
924 924
925static struct seq_operations nr_node_seqops = { 925static const struct seq_operations nr_node_seqops = {
926 .start = nr_node_start, 926 .start = nr_node_start,
927 .next = nr_node_next, 927 .next = nr_node_next,
928 .stop = nr_node_stop, 928 .stop = nr_node_stop,
@@ -1006,7 +1006,7 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
1006 return 0; 1006 return 0;
1007} 1007}
1008 1008
1009static struct seq_operations nr_neigh_seqops = { 1009static const struct seq_operations nr_neigh_seqops = {
1010 .start = nr_neigh_start, 1010 .start = nr_neigh_start,
1011 .next = nr_neigh_next, 1011 .next = nr_neigh_next,
1012 .stop = nr_neigh_stop, 1012 .stop = nr_neigh_stop,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f8b83014ccca..7c27bd389b7e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1928,7 +1928,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
1928 return 0; 1928 return 0;
1929} 1929}
1930 1930
1931static struct seq_operations packet_seq_ops = { 1931static const struct seq_operations packet_seq_ops = {
1932 .start = packet_seq_start, 1932 .start = packet_seq_start,
1933 .next = packet_seq_next, 1933 .next = packet_seq_next,
1934 .stop = packet_seq_stop, 1934 .stop = packet_seq_stop,
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d476c43d5216..f4d3aba00800 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1454,7 +1454,7 @@ static int rose_info_show(struct seq_file *seq, void *v)
1454 return 0; 1454 return 0;
1455} 1455}
1456 1456
1457static struct seq_operations rose_info_seqops = { 1457static const struct seq_operations rose_info_seqops = {
1458 .start = rose_info_start, 1458 .start = rose_info_start,
1459 .next = rose_info_next, 1459 .next = rose_info_next,
1460 .stop = rose_info_stop, 1460 .stop = rose_info_stop,
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 929a784a86d7..bbcbad1da0d0 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -1118,7 +1118,7 @@ static int rose_node_show(struct seq_file *seq, void *v)
1118 return 0; 1118 return 0;
1119} 1119}
1120 1120
1121static struct seq_operations rose_node_seqops = { 1121static const struct seq_operations rose_node_seqops = {
1122 .start = rose_node_start, 1122 .start = rose_node_start,
1123 .next = rose_node_next, 1123 .next = rose_node_next,
1124 .stop = rose_node_stop, 1124 .stop = rose_node_stop,
@@ -1200,7 +1200,7 @@ static int rose_neigh_show(struct seq_file *seq, void *v)
1200} 1200}
1201 1201
1202 1202
1203static struct seq_operations rose_neigh_seqops = { 1203static const struct seq_operations rose_neigh_seqops = {
1204 .start = rose_neigh_start, 1204 .start = rose_neigh_start,
1205 .next = rose_neigh_next, 1205 .next = rose_neigh_next,
1206 .stop = rose_neigh_stop, 1206 .stop = rose_neigh_stop,
@@ -1284,7 +1284,7 @@ static int rose_route_show(struct seq_file *seq, void *v)
1284 return 0; 1284 return 0;
1285} 1285}
1286 1286
1287static struct seq_operations rose_route_seqops = { 1287static const struct seq_operations rose_route_seqops = {
1288 .start = rose_route_start, 1288 .start = rose_route_start,
1289 .next = rose_route_next, 1289 .next = rose_route_next,
1290 .stop = rose_route_stop, 1290 .stop = rose_route_stop,
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
index 1c0be0e77b16..2e83ce325d15 100644
--- a/net/rxrpc/ar-proc.c
+++ b/net/rxrpc/ar-proc.c
@@ -30,31 +30,13 @@ static const char *rxrpc_conn_states[] = {
30 */ 30 */
31static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) 31static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
32{ 32{
33 struct list_head *_p;
34 loff_t pos = *_pos;
35
36 read_lock(&rxrpc_call_lock); 33 read_lock(&rxrpc_call_lock);
37 if (!pos) 34 return seq_list_start_head(&rxrpc_calls, *_pos);
38 return SEQ_START_TOKEN;
39 pos--;
40
41 list_for_each(_p, &rxrpc_calls)
42 if (!pos--)
43 break;
44
45 return _p != &rxrpc_calls ? _p : NULL;
46} 35}
47 36
48static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) 37static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
49{ 38{
50 struct list_head *_p; 39 return seq_list_next(v, &rxrpc_calls, pos);
51
52 (*pos)++;
53
54 _p = v;
55 _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next;
56
57 return _p != &rxrpc_calls ? _p : NULL;
58} 40}
59 41
60static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) 42static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
@@ -68,7 +50,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
68 struct rxrpc_call *call; 50 struct rxrpc_call *call;
69 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; 51 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
70 52
71 if (v == SEQ_START_TOKEN) { 53 if (v == &rxrpc_calls) {
72 seq_puts(seq, 54 seq_puts(seq,
73 "Proto Local Remote " 55 "Proto Local Remote "
74 " SvID ConnID CallID End Use State Abort " 56 " SvID ConnID CallID End Use State Abort "
@@ -104,7 +86,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
104 return 0; 86 return 0;
105} 87}
106 88
107static struct seq_operations rxrpc_call_seq_ops = { 89static const struct seq_operations rxrpc_call_seq_ops = {
108 .start = rxrpc_call_seq_start, 90 .start = rxrpc_call_seq_start,
109 .next = rxrpc_call_seq_next, 91 .next = rxrpc_call_seq_next,
110 .stop = rxrpc_call_seq_stop, 92 .stop = rxrpc_call_seq_stop,
@@ -129,32 +111,14 @@ struct file_operations rxrpc_call_seq_fops = {
129 */ 111 */
130static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) 112static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
131{ 113{
132 struct list_head *_p;
133 loff_t pos = *_pos;
134
135 read_lock(&rxrpc_connection_lock); 114 read_lock(&rxrpc_connection_lock);
136 if (!pos) 115 return seq_list_start_head(&rxrpc_connections, *_pos);
137 return SEQ_START_TOKEN;
138 pos--;
139
140 list_for_each(_p, &rxrpc_connections)
141 if (!pos--)
142 break;
143
144 return _p != &rxrpc_connections ? _p : NULL;
145} 116}
146 117
147static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, 118static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
148 loff_t *pos) 119 loff_t *pos)
149{ 120{
150 struct list_head *_p; 121 return seq_list_next(v, &rxrpc_connections, pos);
151
152 (*pos)++;
153
154 _p = v;
155 _p = (v == SEQ_START_TOKEN) ? rxrpc_connections.next : _p->next;
156
157 return _p != &rxrpc_connections ? _p : NULL;
158} 122}
159 123
160static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) 124static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
@@ -168,7 +132,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
168 struct rxrpc_transport *trans; 132 struct rxrpc_transport *trans;
169 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; 133 char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
170 134
171 if (v == SEQ_START_TOKEN) { 135 if (v == &rxrpc_connections) {
172 seq_puts(seq, 136 seq_puts(seq,
173 "Proto Local Remote " 137 "Proto Local Remote "
174 " SvID ConnID Calls End Use State Key " 138 " SvID ConnID Calls End Use State Key "
@@ -206,7 +170,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
206 return 0; 170 return 0;
207} 171}
208 172
209static struct seq_operations rxrpc_connection_seq_ops = { 173static const struct seq_operations rxrpc_connection_seq_ops = {
210 .start = rxrpc_connection_seq_start, 174 .start = rxrpc_connection_seq_start,
211 .next = rxrpc_connection_seq_next, 175 .next = rxrpc_connection_seq_next,
212 .stop = rxrpc_connection_seq_stop, 176 .stop = rxrpc_connection_seq_stop,
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 475df8449be9..b4662888bdbd 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -111,6 +111,17 @@ config NET_SCH_PRIO
111 To compile this code as a module, choose M here: the 111 To compile this code as a module, choose M here: the
112 module will be called sch_prio. 112 module will be called sch_prio.
113 113
114config NET_SCH_RR
115 tristate "Multi Band Round Robin Queuing (RR)"
116 select NET_SCH_PRIO
117 ---help---
118 Say Y here if you want to use an n-band round robin packet
119 scheduler.
120
121 The module uses sch_prio for its framework and is aliased as
122 sch_rr, so it will load sch_prio, although it is referred
123 to using sch_rr.
124
114config NET_SCH_RED 125config NET_SCH_RED
115 tristate "Random Early Detection (RED)" 126 tristate "Random Early Detection (RED)"
116 ---help--- 127 ---help---
@@ -275,7 +286,6 @@ config CLS_U32_MARK
275config NET_CLS_RSVP 286config NET_CLS_RSVP
276 tristate "IPv4 Resource Reservation Protocol (RSVP)" 287 tristate "IPv4 Resource Reservation Protocol (RSVP)"
277 select NET_CLS 288 select NET_CLS
278 select NET_ESTIMATOR
279 ---help--- 289 ---help---
280 The Resource Reservation Protocol (RSVP) permits end systems to 290 The Resource Reservation Protocol (RSVP) permits end systems to
281 request a minimum and maximum data flow rate for a connection; this 291 request a minimum and maximum data flow rate for a connection; this
@@ -290,7 +300,6 @@ config NET_CLS_RSVP
290config NET_CLS_RSVP6 300config NET_CLS_RSVP6
291 tristate "IPv6 Resource Reservation Protocol (RSVP6)" 301 tristate "IPv6 Resource Reservation Protocol (RSVP6)"
292 select NET_CLS 302 select NET_CLS
293 select NET_ESTIMATOR
294 ---help--- 303 ---help---
295 The Resource Reservation Protocol (RSVP) permits end systems to 304 The Resource Reservation Protocol (RSVP) permits end systems to
296 request a minimum and maximum data flow rate for a connection; this 305 request a minimum and maximum data flow rate for a connection; this
@@ -382,7 +391,6 @@ config NET_EMATCH_TEXT
382 391
383config NET_CLS_ACT 392config NET_CLS_ACT
384 bool "Actions" 393 bool "Actions"
385 select NET_ESTIMATOR
386 ---help--- 394 ---help---
387 Say Y here if you want to use traffic control actions. Actions 395 Say Y here if you want to use traffic control actions. Actions
388 get attached to classifiers and are invoked after a successful 396 get attached to classifiers and are invoked after a successful
@@ -465,7 +473,6 @@ config NET_ACT_SIMP
465config NET_CLS_POLICE 473config NET_CLS_POLICE
466 bool "Traffic Policing (obsolete)" 474 bool "Traffic Policing (obsolete)"
467 depends on NET_CLS_ACT!=y 475 depends on NET_CLS_ACT!=y
468 select NET_ESTIMATOR
469 ---help--- 476 ---help---
470 Say Y here if you want to do traffic policing, i.e. strict 477 Say Y here if you want to do traffic policing, i.e. strict
471 bandwidth limiting. This option is obsoleted by the traffic 478 bandwidth limiting. This option is obsoleted by the traffic
@@ -480,14 +487,6 @@ config NET_CLS_IND
480 classification based on the incoming device. This option is 487 classification based on the incoming device. This option is
481 likely to disappear in favour of the metadata ematch. 488 likely to disappear in favour of the metadata ematch.
482 489
483config NET_ESTIMATOR
484 bool "Rate estimator"
485 ---help---
486 Say Y here to allow using rate estimators to estimate the current
487 rate-of-flow for network devices, queues, etc. This module is
488 automatically selected if needed but can be selected manually for
489 statistical purposes.
490
491endif # NET_SCHED 490endif # NET_SCHED
492 491
493endmenu 492endmenu
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 711dd26c95c3..feef366cad5d 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -11,23 +11,13 @@
11 * 11 *
12 */ 12 */
13 13
14#include <asm/uaccess.h>
15#include <asm/system.h>
16#include <linux/bitops.h>
17#include <linux/types.h> 14#include <linux/types.h>
18#include <linux/kernel.h> 15#include <linux/kernel.h>
19#include <linux/string.h> 16#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/socket.h>
22#include <linux/sockios.h>
23#include <linux/in.h>
24#include <linux/errno.h> 17#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/skbuff.h> 18#include <linux/skbuff.h>
28#include <linux/init.h> 19#include <linux/init.h>
29#include <linux/kmod.h> 20#include <linux/kmod.h>
30#include <net/sock.h>
31#include <net/sch_generic.h> 21#include <net/sch_generic.h>
32#include <net/act_api.h> 22#include <net/act_api.h>
33#include <net/netlink.h> 23#include <net/netlink.h>
@@ -42,10 +32,8 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
42 write_lock_bh(hinfo->lock); 32 write_lock_bh(hinfo->lock);
43 *p1p = p->tcfc_next; 33 *p1p = p->tcfc_next;
44 write_unlock_bh(hinfo->lock); 34 write_unlock_bh(hinfo->lock);
45#ifdef CONFIG_NET_ESTIMATOR
46 gen_kill_estimator(&p->tcfc_bstats, 35 gen_kill_estimator(&p->tcfc_bstats,
47 &p->tcfc_rate_est); 36 &p->tcfc_rate_est);
48#endif
49 kfree(p); 37 kfree(p);
50 return; 38 return;
51 } 39 }
@@ -232,15 +220,12 @@ struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_acti
232 p->tcfc_bindcnt = 1; 220 p->tcfc_bindcnt = 1;
233 221
234 spin_lock_init(&p->tcfc_lock); 222 spin_lock_init(&p->tcfc_lock);
235 p->tcfc_stats_lock = &p->tcfc_lock;
236 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); 223 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
237 p->tcfc_tm.install = jiffies; 224 p->tcfc_tm.install = jiffies;
238 p->tcfc_tm.lastuse = jiffies; 225 p->tcfc_tm.lastuse = jiffies;
239#ifdef CONFIG_NET_ESTIMATOR
240 if (est) 226 if (est)
241 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, 227 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
242 p->tcfc_stats_lock, est); 228 &p->tcfc_lock, est);
243#endif
244 a->priv = (void *) p; 229 a->priv = (void *) p;
245 return p; 230 return p;
246} 231}
@@ -599,12 +584,12 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
599 if (compat_mode) { 584 if (compat_mode) {
600 if (a->type == TCA_OLD_COMPAT) 585 if (a->type == TCA_OLD_COMPAT)
601 err = gnet_stats_start_copy_compat(skb, 0, 586 err = gnet_stats_start_copy_compat(skb, 0,
602 TCA_STATS, TCA_XSTATS, h->tcf_stats_lock, &d); 587 TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d);
603 else 588 else
604 return 0; 589 return 0;
605 } else 590 } else
606 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 591 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
607 h->tcf_stats_lock, &d); 592 &h->tcf_lock, &d);
608 593
609 if (err < 0) 594 if (err < 0)
610 goto errout; 595 goto errout;
@@ -614,9 +599,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
614 goto errout; 599 goto errout;
615 600
616 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || 601 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
617#ifdef CONFIG_NET_ESTIMATOR
618 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || 602 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
619#endif
620 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) 603 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
621 goto errout; 604 goto errout;
622 605
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 7517f3791541..a9631e426d91 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -10,26 +10,15 @@
10 * 10 *
11 */ 11 */
12 12
13#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/bitops.h>
16#include <linux/types.h> 13#include <linux/types.h>
17#include <linux/kernel.h> 14#include <linux/kernel.h>
18#include <linux/string.h> 15#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/socket.h>
21#include <linux/sockios.h>
22#include <linux/in.h>
23#include <linux/errno.h> 16#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/netdevice.h>
26#include <linux/skbuff.h> 17#include <linux/skbuff.h>
27#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
28#include <linux/module.h> 19#include <linux/module.h>
29#include <linux/init.h> 20#include <linux/init.h>
30#include <linux/proc_fs.h>
31#include <net/netlink.h> 21#include <net/netlink.h>
32#include <net/sock.h>
33#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
34#include <linux/tc_act/tc_gact.h> 23#include <linux/tc_act/tc_gact.h>
35#include <net/tc_act/tc_gact.h> 24#include <net/tc_act/tc_gact.h>
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 00b05f422d45..6b407ece953c 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -11,27 +11,15 @@
11 * Copyright: Jamal Hadi Salim (2002-4) 11 * Copyright: Jamal Hadi Salim (2002-4)
12 */ 12 */
13 13
14#include <asm/uaccess.h>
15#include <asm/system.h>
16#include <asm/bitops.h>
17#include <linux/types.h> 14#include <linux/types.h>
18#include <linux/kernel.h> 15#include <linux/kernel.h>
19#include <linux/string.h> 16#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/socket.h>
22#include <linux/sockios.h>
23#include <linux/in.h>
24#include <linux/errno.h> 17#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/skbuff.h> 18#include <linux/skbuff.h>
28#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
29#include <linux/module.h> 20#include <linux/module.h>
30#include <linux/init.h> 21#include <linux/init.h>
31#include <linux/proc_fs.h>
32#include <linux/kmod.h>
33#include <net/netlink.h> 22#include <net/netlink.h>
34#include <net/sock.h>
35#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
36#include <linux/tc_act/tc_ipt.h> 24#include <linux/tc_act/tc_ipt.h>
37#include <net/tc_act/tc_ipt.h> 25#include <net/tc_act/tc_ipt.h>
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index de21c92faaa2..579578944ae7 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -12,31 +12,19 @@
12 * 12 *
13 */ 13 */
14 14
15#include <asm/uaccess.h>
16#include <asm/system.h>
17#include <asm/bitops.h>
18#include <linux/types.h> 15#include <linux/types.h>
19#include <linux/kernel.h> 16#include <linux/kernel.h>
20#include <linux/string.h> 17#include <linux/string.h>
21#include <linux/mm.h>
22#include <linux/socket.h>
23#include <linux/sockios.h>
24#include <linux/in.h>
25#include <linux/errno.h> 18#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/netdevice.h>
28#include <linux/skbuff.h> 19#include <linux/skbuff.h>
29#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
30#include <linux/module.h> 21#include <linux/module.h>
31#include <linux/init.h> 22#include <linux/init.h>
32#include <linux/proc_fs.h>
33#include <net/netlink.h> 23#include <net/netlink.h>
34#include <net/sock.h>
35#include <net/pkt_sched.h> 24#include <net/pkt_sched.h>
36#include <linux/tc_act/tc_mirred.h> 25#include <linux/tc_act/tc_mirred.h>
37#include <net/tc_act/tc_mirred.h> 26#include <net/tc_act/tc_mirred.h>
38 27
39#include <linux/etherdevice.h>
40#include <linux/if_arp.h> 28#include <linux/if_arp.h>
41 29
42#define MIRRED_TAB_MASK 7 30#define MIRRED_TAB_MASK 7
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 6f8684b5617e..b46fab5fb323 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -9,26 +9,15 @@
9 * Authors: Jamal Hadi Salim (2002-4) 9 * Authors: Jamal Hadi Salim (2002-4)
10 */ 10 */
11 11
12#include <asm/uaccess.h>
13#include <asm/system.h>
14#include <asm/bitops.h>
15#include <linux/types.h> 12#include <linux/types.h>
16#include <linux/kernel.h> 13#include <linux/kernel.h>
17#include <linux/string.h> 14#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/socket.h>
20#include <linux/sockios.h>
21#include <linux/in.h>
22#include <linux/errno.h> 15#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/skbuff.h> 16#include <linux/skbuff.h>
26#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
27#include <linux/module.h> 18#include <linux/module.h>
28#include <linux/init.h> 19#include <linux/init.h>
29#include <linux/proc_fs.h>
30#include <net/netlink.h> 20#include <net/netlink.h>
31#include <net/sock.h>
32#include <net/pkt_sched.h> 21#include <net/pkt_sched.h>
33#include <linux/tc_act/tc_pedit.h> 22#include <linux/tc_act/tc_pedit.h>
34#include <net/tc_act/tc_pedit.h> 23#include <net/tc_act/tc_pedit.h>
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 616f465f407e..d20403890877 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -10,25 +10,15 @@
10 * J Hadi Salim (action changes) 10 * J Hadi Salim (action changes)
11 */ 11 */
12 12
13#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/bitops.h>
16#include <linux/module.h> 13#include <linux/module.h>
17#include <linux/types.h> 14#include <linux/types.h>
18#include <linux/kernel.h> 15#include <linux/kernel.h>
19#include <linux/string.h> 16#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/socket.h>
22#include <linux/sockios.h>
23#include <linux/in.h>
24#include <linux/errno.h> 17#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/skbuff.h> 18#include <linux/skbuff.h>
28#include <linux/module.h> 19#include <linux/module.h>
29#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
30#include <linux/init.h> 21#include <linux/init.h>
31#include <net/sock.h>
32#include <net/act_api.h> 22#include <net/act_api.h>
33#include <net/netlink.h> 23#include <net/netlink.h>
34 24
@@ -118,10 +108,8 @@ void tcf_police_destroy(struct tcf_police *p)
118 write_lock_bh(&police_lock); 108 write_lock_bh(&police_lock);
119 *p1p = p->tcf_next; 109 *p1p = p->tcf_next;
120 write_unlock_bh(&police_lock); 110 write_unlock_bh(&police_lock);
121#ifdef CONFIG_NET_ESTIMATOR
122 gen_kill_estimator(&p->tcf_bstats, 111 gen_kill_estimator(&p->tcf_bstats,
123 &p->tcf_rate_est); 112 &p->tcf_rate_est);
124#endif
125 if (p->tcfp_R_tab) 113 if (p->tcfp_R_tab)
126 qdisc_put_rtab(p->tcfp_R_tab); 114 qdisc_put_rtab(p->tcfp_R_tab);
127 if (p->tcfp_P_tab) 115 if (p->tcfp_P_tab)
@@ -185,7 +173,6 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
185 ret = ACT_P_CREATED; 173 ret = ACT_P_CREATED;
186 police->tcf_refcnt = 1; 174 police->tcf_refcnt = 1;
187 spin_lock_init(&police->tcf_lock); 175 spin_lock_init(&police->tcf_lock);
188 police->tcf_stats_lock = &police->tcf_lock;
189 if (bind) 176 if (bind)
190 police->tcf_bindcnt = 1; 177 police->tcf_bindcnt = 1;
191override: 178override:
@@ -227,15 +214,13 @@ override:
227 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu); 214 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
228 police->tcf_action = parm->action; 215 police->tcf_action = parm->action;
229 216
230#ifdef CONFIG_NET_ESTIMATOR
231 if (tb[TCA_POLICE_AVRATE-1]) 217 if (tb[TCA_POLICE_AVRATE-1])
232 police->tcfp_ewma_rate = 218 police->tcfp_ewma_rate =
233 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 219 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
234 if (est) 220 if (est)
235 gen_replace_estimator(&police->tcf_bstats, 221 gen_replace_estimator(&police->tcf_bstats,
236 &police->tcf_rate_est, 222 &police->tcf_rate_est,
237 police->tcf_stats_lock, est); 223 &police->tcf_lock, est);
238#endif
239 224
240 spin_unlock_bh(&police->tcf_lock); 225 spin_unlock_bh(&police->tcf_lock);
241 if (ret != ACT_P_CREATED) 226 if (ret != ACT_P_CREATED)
@@ -281,14 +266,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
281 police->tcf_bstats.bytes += skb->len; 266 police->tcf_bstats.bytes += skb->len;
282 police->tcf_bstats.packets++; 267 police->tcf_bstats.packets++;
283 268
284#ifdef CONFIG_NET_ESTIMATOR
285 if (police->tcfp_ewma_rate && 269 if (police->tcfp_ewma_rate &&
286 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 270 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
287 police->tcf_qstats.overlimits++; 271 police->tcf_qstats.overlimits++;
288 spin_unlock(&police->tcf_lock); 272 spin_unlock(&police->tcf_lock);
289 return police->tcf_action; 273 return police->tcf_action;
290 } 274 }
291#endif
292 275
293 if (skb->len <= police->tcfp_mtu) { 276 if (skb->len <= police->tcfp_mtu) {
294 if (police->tcfp_R_tab == NULL) { 277 if (police->tcfp_R_tab == NULL) {
@@ -348,10 +331,8 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
348 if (police->tcfp_result) 331 if (police->tcfp_result)
349 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), 332 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
350 &police->tcfp_result); 333 &police->tcfp_result);
351#ifdef CONFIG_NET_ESTIMATOR
352 if (police->tcfp_ewma_rate) 334 if (police->tcfp_ewma_rate)
353 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); 335 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
354#endif
355 return skb->len; 336 return skb->len;
356 337
357rtattr_failure: 338rtattr_failure:
@@ -458,7 +439,6 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
458 439
459 police->tcf_refcnt = 1; 440 police->tcf_refcnt = 1;
460 spin_lock_init(&police->tcf_lock); 441 spin_lock_init(&police->tcf_lock);
461 police->tcf_stats_lock = &police->tcf_lock;
462 if (parm->rate.rate) { 442 if (parm->rate.rate) {
463 police->tcfp_R_tab = 443 police->tcfp_R_tab =
464 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]); 444 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
@@ -477,14 +457,12 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
477 goto failure; 457 goto failure;
478 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 458 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
479 } 459 }
480#ifdef CONFIG_NET_ESTIMATOR
481 if (tb[TCA_POLICE_AVRATE-1]) { 460 if (tb[TCA_POLICE_AVRATE-1]) {
482 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32)) 461 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
483 goto failure; 462 goto failure;
484 police->tcfp_ewma_rate = 463 police->tcfp_ewma_rate =
485 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 464 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
486 } 465 }
487#endif
488 police->tcfp_toks = police->tcfp_burst = parm->burst; 466 police->tcfp_toks = police->tcfp_burst = parm->burst;
489 police->tcfp_mtu = parm->mtu; 467 police->tcfp_mtu = parm->mtu;
490 if (police->tcfp_mtu == 0) { 468 if (police->tcfp_mtu == 0) {
@@ -498,11 +476,9 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
498 police->tcf_index = parm->index ? parm->index : 476 police->tcf_index = parm->index ? parm->index :
499 tcf_police_new_index(); 477 tcf_police_new_index();
500 police->tcf_action = parm->action; 478 police->tcf_action = parm->action;
501#ifdef CONFIG_NET_ESTIMATOR
502 if (est) 479 if (est)
503 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est, 480 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
504 police->tcf_stats_lock, est); 481 &police->tcf_lock, est);
505#endif
506 h = tcf_hash(police->tcf_index, POL_TAB_MASK); 482 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
507 write_lock_bh(&police_lock); 483 write_lock_bh(&police_lock);
508 police->tcf_next = tcf_police_ht[h]; 484 police->tcf_next = tcf_police_ht[h];
@@ -528,14 +504,12 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *police)
528 police->tcf_bstats.bytes += skb->len; 504 police->tcf_bstats.bytes += skb->len;
529 police->tcf_bstats.packets++; 505 police->tcf_bstats.packets++;
530 506
531#ifdef CONFIG_NET_ESTIMATOR
532 if (police->tcfp_ewma_rate && 507 if (police->tcfp_ewma_rate &&
533 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 508 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
534 police->tcf_qstats.overlimits++; 509 police->tcf_qstats.overlimits++;
535 spin_unlock(&police->tcf_lock); 510 spin_unlock(&police->tcf_lock);
536 return police->tcf_action; 511 return police->tcf_action;
537 } 512 }
538#endif
539 if (skb->len <= police->tcfp_mtu) { 513 if (skb->len <= police->tcfp_mtu) {
540 if (police->tcfp_R_tab == NULL) { 514 if (police->tcfp_R_tab == NULL) {
541 spin_unlock(&police->tcf_lock); 515 spin_unlock(&police->tcf_lock);
@@ -591,10 +565,8 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
591 if (police->tcfp_result) 565 if (police->tcfp_result)
592 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), 566 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
593 &police->tcfp_result); 567 &police->tcfp_result);
594#ifdef CONFIG_NET_ESTIMATOR
595 if (police->tcfp_ewma_rate) 568 if (police->tcfp_ewma_rate)
596 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); 569 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
597#endif
598 return skb->len; 570 return skb->len;
599 571
600rtattr_failure: 572rtattr_failure:
@@ -607,14 +579,12 @@ int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
607 struct gnet_dump d; 579 struct gnet_dump d;
608 580
609 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 581 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
610 TCA_XSTATS, police->tcf_stats_lock, 582 TCA_XSTATS, &police->tcf_lock,
611 &d) < 0) 583 &d) < 0)
612 goto errout; 584 goto errout;
613 585
614 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 || 586 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
615#ifdef CONFIG_NET_ESTIMATOR
616 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 || 587 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
617#endif
618 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0) 588 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
619 goto errout; 589 goto errout;
620 590
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 36e1edad5990..fb84ef33d14f 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/skbuff.h> 16#include <linux/skbuff.h>
18#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
19#include <net/netlink.h> 18#include <net/netlink.h>
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ebf94edf0478..36b72aab1bde 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -14,26 +14,16 @@
14 * 14 *
15 */ 15 */
16 16
17#include <asm/uaccess.h>
18#include <asm/system.h>
19#include <linux/bitops.h>
20#include <linux/module.h> 17#include <linux/module.h>
21#include <linux/types.h> 18#include <linux/types.h>
22#include <linux/kernel.h> 19#include <linux/kernel.h>
23#include <linux/string.h> 20#include <linux/string.h>
24#include <linux/mm.h>
25#include <linux/socket.h>
26#include <linux/sockios.h>
27#include <linux/in.h>
28#include <linux/errno.h> 21#include <linux/errno.h>
29#include <linux/interrupt.h>
30#include <linux/netdevice.h>
31#include <linux/skbuff.h> 22#include <linux/skbuff.h>
32#include <linux/init.h> 23#include <linux/init.h>
33#include <linux/kmod.h> 24#include <linux/kmod.h>
34#include <linux/netlink.h> 25#include <linux/netlink.h>
35#include <net/netlink.h> 26#include <net/netlink.h>
36#include <net/sock.h>
37#include <net/pkt_sched.h> 27#include <net/pkt_sched.h>
38#include <net/pkt_cls.h> 28#include <net/pkt_cls.h>
39 29
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index c885412d79d5..8dbcf2771a46 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -13,7 +13,6 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/mm.h>
17#include <linux/errno.h> 16#include <linux/errno.h>
18#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
19#include <linux/skbuff.h> 18#include <linux/skbuff.h>
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index bbec4a0d4dcb..8adbd6a37d14 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -19,29 +19,12 @@
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <asm/uaccess.h>
23#include <asm/system.h>
24#include <linux/bitops.h>
25#include <linux/types.h> 22#include <linux/types.h>
26#include <linux/kernel.h> 23#include <linux/kernel.h>
27#include <linux/string.h> 24#include <linux/string.h>
28#include <linux/mm.h>
29#include <linux/socket.h>
30#include <linux/sockios.h>
31#include <linux/in.h>
32#include <linux/errno.h> 25#include <linux/errno.h>
33#include <linux/interrupt.h>
34#include <linux/if_ether.h>
35#include <linux/inet.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/notifier.h>
39#include <linux/netfilter.h>
40#include <net/ip.h>
41#include <net/netlink.h>
42#include <net/route.h>
43#include <linux/skbuff.h> 26#include <linux/skbuff.h>
44#include <net/sock.h> 27#include <net/netlink.h>
45#include <net/act_api.h> 28#include <net/act_api.h>
46#include <net/pkt_cls.h> 29#include <net/pkt_cls.h>
47 30
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index cc941d0ee3a5..0a8409c1d28a 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -10,28 +10,14 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/bitops.h>
16#include <linux/types.h> 13#include <linux/types.h>
17#include <linux/kernel.h> 14#include <linux/kernel.h>
18#include <linux/string.h> 15#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/socket.h>
21#include <linux/sockios.h>
22#include <linux/in.h>
23#include <linux/errno.h> 16#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/if_ether.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/notifier.h>
30#include <net/ip.h>
31#include <net/netlink.h>
32#include <net/route.h>
33#include <linux/skbuff.h> 17#include <linux/skbuff.h>
34#include <net/sock.h> 18#include <net/dst.h>
19#include <net/route.h>
20#include <net/netlink.h>
35#include <net/act_api.h> 21#include <net/act_api.h>
36#include <net/pkt_cls.h> 22#include <net/pkt_cls.h>
37 23
diff --git a/net/sched/cls_rsvp.c b/net/sched/cls_rsvp.c
index 0a683c07c648..cbb5e0d600f3 100644
--- a/net/sched/cls_rsvp.c
+++ b/net/sched/cls_rsvp.c
@@ -10,27 +10,12 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/bitops.h>
16#include <linux/types.h> 13#include <linux/types.h>
17#include <linux/kernel.h> 14#include <linux/kernel.h>
18#include <linux/string.h> 15#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/socket.h>
21#include <linux/sockios.h>
22#include <linux/in.h>
23#include <linux/errno.h> 16#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/if_ether.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/notifier.h>
30#include <net/ip.h>
31#include <net/route.h>
32#include <linux/skbuff.h> 17#include <linux/skbuff.h>
33#include <net/sock.h> 18#include <net/ip.h>
34#include <net/netlink.h> 19#include <net/netlink.h>
35#include <net/act_api.h> 20#include <net/act_api.h>
36#include <net/pkt_cls.h> 21#include <net/pkt_cls.h>
diff --git a/net/sched/cls_rsvp6.c b/net/sched/cls_rsvp6.c
index 93b6abed57db..dd08aea2aee5 100644
--- a/net/sched/cls_rsvp6.c
+++ b/net/sched/cls_rsvp6.c
@@ -10,28 +10,12 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/bitops.h>
16#include <linux/types.h> 13#include <linux/types.h>
17#include <linux/kernel.h> 14#include <linux/kernel.h>
18#include <linux/string.h> 15#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/socket.h>
21#include <linux/sockios.h>
22#include <linux/in.h>
23#include <linux/errno.h> 16#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/if_ether.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/notifier.h>
30#include <net/ip.h>
31#include <linux/ipv6.h> 17#include <linux/ipv6.h>
32#include <net/route.h>
33#include <linux/skbuff.h> 18#include <linux/skbuff.h>
34#include <net/sock.h>
35#include <net/act_api.h> 19#include <net/act_api.h>
36#include <net/pkt_cls.h> 20#include <net/pkt_cls.h>
37#include <net/netlink.h> 21#include <net/netlink.h>
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 47ac0c556429..2314820a080a 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -9,12 +9,9 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/netdevice.h>
13#include <net/ip.h>
14#include <net/act_api.h> 12#include <net/act_api.h>
15#include <net/netlink.h> 13#include <net/netlink.h>
16#include <net/pkt_cls.h> 14#include <net/pkt_cls.h>
17#include <net/route.h>
18 15
19 16
20/* 17/*
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c7a347bd6d70..77961e2314dc 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -30,30 +30,14 @@
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> 30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */ 31 */
32 32
33#include <asm/uaccess.h>
34#include <asm/system.h>
35#include <linux/bitops.h>
36#include <linux/module.h> 33#include <linux/module.h>
37#include <linux/types.h> 34#include <linux/types.h>
38#include <linux/kernel.h> 35#include <linux/kernel.h>
39#include <linux/string.h> 36#include <linux/string.h>
40#include <linux/mm.h>
41#include <linux/socket.h>
42#include <linux/sockios.h>
43#include <linux/in.h>
44#include <linux/errno.h> 37#include <linux/errno.h>
45#include <linux/interrupt.h>
46#include <linux/if_ether.h>
47#include <linux/inet.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/notifier.h>
51#include <linux/rtnetlink.h> 38#include <linux/rtnetlink.h>
52#include <net/ip.h>
53#include <net/netlink.h>
54#include <net/route.h>
55#include <linux/skbuff.h> 39#include <linux/skbuff.h>
56#include <net/sock.h> 40#include <net/netlink.h>
57#include <net/act_api.h> 41#include <net/act_api.h>
58#include <net/pkt_cls.h> 42#include <net/pkt_cls.h>
59 43
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index 8d6dacd81900..cc49c932641d 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -98,3 +98,4 @@ MODULE_LICENSE("GPL");
98module_init(init_em_cmp); 98module_init(init_em_cmp);
99module_exit(exit_em_cmp); 99module_exit(exit_em_cmp);
100 100
101MODULE_ALIAS_TCF_EMATCH(TCF_EM_CMP);
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 60acf8cdb27b..650f09c8bd6a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -848,3 +848,5 @@ MODULE_LICENSE("GPL");
848 848
849module_init(init_em_meta); 849module_init(init_em_meta);
850module_exit(exit_em_meta); 850module_exit(exit_em_meta);
851
852MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index b4b36efce292..370a1b2ea317 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -76,3 +76,5 @@ MODULE_LICENSE("GPL");
76 76
77module_init(init_em_nbyte); 77module_init(init_em_nbyte);
78module_exit(exit_em_nbyte); 78module_exit(exit_em_nbyte);
79
80MODULE_ALIAS_TCF_EMATCH(TCF_EM_NBYTE);
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index e8f46169449d..d5cd86efb7d0 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -150,3 +150,5 @@ MODULE_LICENSE("GPL");
150 150
151module_init(init_em_text); 151module_init(init_em_text);
152module_exit(exit_em_text); 152module_exit(exit_em_text);
153
154MODULE_ALIAS_TCF_EMATCH(TCF_EM_TEXT);
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c
index 0a2a7fe08de3..112796e4a7c4 100644
--- a/net/sched/em_u32.c
+++ b/net/sched/em_u32.c
@@ -60,3 +60,5 @@ MODULE_LICENSE("GPL");
60 60
61module_init(init_em_u32); 61module_init(init_em_u32);
62module_exit(exit_em_u32); 62module_exit(exit_em_u32);
63
64MODULE_ALIAS_TCF_EMATCH(TCF_EM_U32);
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 63146d339d81..f3a104e323bd 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -84,9 +84,7 @@
84#include <linux/module.h> 84#include <linux/module.h>
85#include <linux/types.h> 85#include <linux/types.h>
86#include <linux/kernel.h> 86#include <linux/kernel.h>
87#include <linux/mm.h>
88#include <linux/errno.h> 87#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/rtnetlink.h> 88#include <linux/rtnetlink.h>
91#include <linux/skbuff.h> 89#include <linux/skbuff.h>
92#include <net/pkt_cls.h> 90#include <net/pkt_cls.h>
@@ -224,6 +222,19 @@ static int tcf_em_validate(struct tcf_proto *tp,
224 222
225 if (em->ops == NULL) { 223 if (em->ops == NULL) {
226 err = -ENOENT; 224 err = -ENOENT;
225#ifdef CONFIG_KMOD
226 __rtnl_unlock();
227 request_module("ematch-kind-%u", em_hdr->kind);
228 rtnl_lock();
229 em->ops = tcf_em_lookup(em_hdr->kind);
230 if (em->ops) {
231 /* We dropped the RTNL mutex in order to
232 * perform the module load. Tell the caller
233 * to replay the request. */
234 module_put(em->ops->owner);
235 err = -EAGAIN;
236 }
237#endif
227 goto errout; 238 goto errout;
228 } 239 }
229 240
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bec600af03ca..d92ea26982c5 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -19,30 +19,18 @@
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/mm.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/in.h>
26#include <linux/errno.h> 22#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/netdevice.h>
29#include <linux/skbuff.h> 23#include <linux/skbuff.h>
30#include <linux/init.h> 24#include <linux/init.h>
31#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
32#include <linux/seq_file.h> 26#include <linux/seq_file.h>
33#include <linux/kmod.h> 27#include <linux/kmod.h>
34#include <linux/list.h> 28#include <linux/list.h>
35#include <linux/bitops.h>
36#include <linux/hrtimer.h> 29#include <linux/hrtimer.h>
37 30
38#include <net/netlink.h> 31#include <net/netlink.h>
39#include <net/sock.h>
40#include <net/pkt_sched.h> 32#include <net/pkt_sched.h>
41 33
42#include <asm/processor.h>
43#include <asm/uaccess.h>
44#include <asm/system.h>
45
46static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, 34static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
47 struct Qdisc *old, struct Qdisc *new); 35 struct Qdisc *old, struct Qdisc *new);
48static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, 36static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
@@ -515,7 +503,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
515 sch->handle = handle; 503 sch->handle = handle;
516 504
517 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { 505 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
518#ifdef CONFIG_NET_ESTIMATOR
519 if (tca[TCA_RATE-1]) { 506 if (tca[TCA_RATE-1]) {
520 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 507 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
521 sch->stats_lock, 508 sch->stats_lock,
@@ -531,7 +518,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
531 goto err_out3; 518 goto err_out3;
532 } 519 }
533 } 520 }
534#endif
535 qdisc_lock_tree(dev); 521 qdisc_lock_tree(dev);
536 list_add_tail(&sch->list, &dev->qdisc_list); 522 list_add_tail(&sch->list, &dev->qdisc_list);
537 qdisc_unlock_tree(dev); 523 qdisc_unlock_tree(dev);
@@ -559,11 +545,9 @@ static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
559 if (err) 545 if (err)
560 return err; 546 return err;
561 } 547 }
562#ifdef CONFIG_NET_ESTIMATOR
563 if (tca[TCA_RATE-1]) 548 if (tca[TCA_RATE-1])
564 gen_replace_estimator(&sch->bstats, &sch->rate_est, 549 gen_replace_estimator(&sch->bstats, &sch->rate_est,
565 sch->stats_lock, tca[TCA_RATE-1]); 550 sch->stats_lock, tca[TCA_RATE-1]);
566#endif
567 return 0; 551 return 0;
568} 552}
569 553
@@ -839,9 +823,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
839 goto rtattr_failure; 823 goto rtattr_failure;
840 824
841 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || 825 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
842#ifdef CONFIG_NET_ESTIMATOR
843 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || 826 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
844#endif
845 gnet_stats_copy_queue(&d, &q->qstats) < 0) 827 gnet_stats_copy_queue(&d, &q->qstats) < 0)
846 goto rtattr_failure; 828 goto rtattr_failure;
847 829
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index d1c383fca82c..54b92d22796c 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -8,15 +8,12 @@
8#include <linux/string.h> 8#include <linux/string.h>
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/interrupt.h>
12#include <linux/atmdev.h> 11#include <linux/atmdev.h>
13#include <linux/atmclip.h> 12#include <linux/atmclip.h>
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
16#include <linux/file.h> /* for fput */ 14#include <linux/file.h> /* for fput */
17#include <net/netlink.h> 15#include <net/netlink.h>
18#include <net/pkt_sched.h> 16#include <net/pkt_sched.h>
19#include <net/sock.h>
20 17
21 18
22extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */ 19extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
@@ -71,7 +68,6 @@ struct atm_flow_data {
71 int ref; /* reference count */ 68 int ref; /* reference count */
72 struct gnet_stats_basic bstats; 69 struct gnet_stats_basic bstats;
73 struct gnet_stats_queue qstats; 70 struct gnet_stats_queue qstats;
74 spinlock_t *stats_lock;
75 struct atm_flow_data *next; 71 struct atm_flow_data *next;
76 struct atm_flow_data *excess; /* flow for excess traffic; 72 struct atm_flow_data *excess; /* flow for excess traffic;
77 NULL to set CLP instead */ 73 NULL to set CLP instead */
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index cb0c456aa349..f914fc43a124 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -14,7 +14,6 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h> 17#include <linux/skbuff.h>
19#include <net/pkt_sched.h> 18#include <net/pkt_sched.h>
20 19
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index ee2d5967d109..b184c3545145 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -11,28 +11,12 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <asm/uaccess.h>
15#include <asm/system.h>
16#include <linux/bitops.h>
17#include <linux/types.h> 14#include <linux/types.h>
18#include <linux/kernel.h> 15#include <linux/kernel.h>
19#include <linux/string.h> 16#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/socket.h>
22#include <linux/sockios.h>
23#include <linux/in.h>
24#include <linux/errno.h> 17#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/if_ether.h>
27#include <linux/inet.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/notifier.h>
31#include <net/ip.h>
32#include <net/netlink.h>
33#include <net/route.h>
34#include <linux/skbuff.h> 18#include <linux/skbuff.h>
35#include <net/sock.h> 19#include <net/netlink.h>
36#include <net/pkt_sched.h> 20#include <net/pkt_sched.h>
37 21
38 22
@@ -148,7 +132,6 @@ struct cbq_class
148 struct gnet_stats_basic bstats; 132 struct gnet_stats_basic bstats;
149 struct gnet_stats_queue qstats; 133 struct gnet_stats_queue qstats;
150 struct gnet_stats_rate_est rate_est; 134 struct gnet_stats_rate_est rate_est;
151 spinlock_t *stats_lock;
152 struct tc_cbq_xstats xstats; 135 struct tc_cbq_xstats xstats;
153 136
154 struct tcf_proto *filter_list; 137 struct tcf_proto *filter_list;
@@ -1442,7 +1425,6 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
1442 q->link.ewma_log = TC_CBQ_DEF_EWMA; 1425 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1443 q->link.avpkt = q->link.allot/2; 1426 q->link.avpkt = q->link.allot/2;
1444 q->link.minidle = -0x7FFFFFFF; 1427 q->link.minidle = -0x7FFFFFFF;
1445 q->link.stats_lock = &sch->dev->queue_lock;
1446 1428
1447 qdisc_watchdog_init(&q->watchdog, sch); 1429 qdisc_watchdog_init(&q->watchdog, sch);
1448 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1430 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -1653,9 +1635,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1653 cl->xstats.undertime = cl->undertime - q->now; 1635 cl->xstats.undertime = cl->undertime - q->now;
1654 1636
1655 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1637 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1656#ifdef CONFIG_NET_ESTIMATOR
1657 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1638 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1658#endif
1659 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1639 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1660 return -1; 1640 return -1;
1661 1641
@@ -1726,9 +1706,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1726 tcf_destroy_chain(cl->filter_list); 1706 tcf_destroy_chain(cl->filter_list);
1727 qdisc_destroy(cl->q); 1707 qdisc_destroy(cl->q);
1728 qdisc_put_rtab(cl->R_tab); 1708 qdisc_put_rtab(cl->R_tab);
1729#ifdef CONFIG_NET_ESTIMATOR
1730 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1709 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1731#endif
1732 if (cl != &q->link) 1710 if (cl != &q->link)
1733 kfree(cl); 1711 kfree(cl);
1734} 1712}
@@ -1873,11 +1851,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1873 1851
1874 sch_tree_unlock(sch); 1852 sch_tree_unlock(sch);
1875 1853
1876#ifdef CONFIG_NET_ESTIMATOR
1877 if (tca[TCA_RATE-1]) 1854 if (tca[TCA_RATE-1])
1878 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1855 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1879 cl->stats_lock, tca[TCA_RATE-1]); 1856 &sch->dev->queue_lock,
1880#endif 1857 tca[TCA_RATE-1]);
1881 return 0; 1858 return 0;
1882 } 1859 }
1883 1860
@@ -1935,7 +1912,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1935 cl->allot = parent->allot; 1912 cl->allot = parent->allot;
1936 cl->quantum = cl->allot; 1913 cl->quantum = cl->allot;
1937 cl->weight = cl->R_tab->rate.rate; 1914 cl->weight = cl->R_tab->rate.rate;
1938 cl->stats_lock = &sch->dev->queue_lock;
1939 1915
1940 sch_tree_lock(sch); 1916 sch_tree_lock(sch);
1941 cbq_link_class(cl); 1917 cbq_link_class(cl);
@@ -1963,11 +1939,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1963 cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1])); 1939 cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
1964 sch_tree_unlock(sch); 1940 sch_tree_unlock(sch);
1965 1941
1966#ifdef CONFIG_NET_ESTIMATOR
1967 if (tca[TCA_RATE-1]) 1942 if (tca[TCA_RATE-1])
1968 gen_new_estimator(&cl->bstats, &cl->rate_est, 1943 gen_new_estimator(&cl->bstats, &cl->rate_est,
1969 cl->stats_lock, tca[TCA_RATE-1]); 1944 &sch->dev->queue_lock, tca[TCA_RATE-1]);
1970#endif
1971 1945
1972 *arg = (unsigned long)cl; 1946 *arg = (unsigned long)cl;
1973 return 0; 1947 return 0;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 3c6fd181263f..4d2c233a8611 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -9,7 +9,6 @@
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/netdevice.h> /* for pkt_sched */
13#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
14#include <net/pkt_sched.h> 13#include <net/pkt_sched.h>
15#include <net/dsfield.h> 14#include <net/dsfield.h>
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index c2689f4ba8de..c264308f17c1 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -13,7 +13,6 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/skbuff.h> 16#include <linux/skbuff.h>
18#include <net/pkt_sched.h> 17#include <net/pkt_sched.h>
19 18
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f4d34480a093..c81649cf0b9e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -11,27 +11,19 @@
11 * - Ingress support 11 * - Ingress support
12 */ 12 */
13 13
14#include <asm/uaccess.h>
15#include <asm/system.h>
16#include <linux/bitops.h> 14#include <linux/bitops.h>
17#include <linux/module.h> 15#include <linux/module.h>
18#include <linux/types.h> 16#include <linux/types.h>
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/sched.h> 18#include <linux/sched.h>
21#include <linux/string.h> 19#include <linux/string.h>
22#include <linux/mm.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/in.h>
26#include <linux/errno.h> 20#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/netdevice.h> 21#include <linux/netdevice.h>
29#include <linux/skbuff.h> 22#include <linux/skbuff.h>
30#include <linux/rtnetlink.h> 23#include <linux/rtnetlink.h>
31#include <linux/init.h> 24#include <linux/init.h>
32#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
33#include <linux/list.h> 26#include <linux/list.h>
34#include <net/sock.h>
35#include <net/pkt_sched.h> 27#include <net/pkt_sched.h>
36 28
37/* Main transmission queue. */ 29/* Main transmission queue. */
@@ -59,122 +51,143 @@ void qdisc_unlock_tree(struct net_device *dev)
59 spin_unlock_bh(&dev->queue_lock); 51 spin_unlock_bh(&dev->queue_lock);
60} 52}
61 53
62/* 54static inline int qdisc_qlen(struct Qdisc *q)
63 dev->queue_lock serializes queue accesses for this device 55{
64 AND dev->qdisc pointer itself. 56 return q->q.qlen;
57}
65 58
66 netif_tx_lock serializes accesses to device driver. 59static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev,
60 struct Qdisc *q)
61{
62 if (unlikely(skb->next))
63 dev->gso_skb = skb;
64 else
65 q->ops->requeue(skb, q);
67 66
68 dev->queue_lock and netif_tx_lock are mutually exclusive, 67 netif_schedule(dev);
69 if one is grabbed, another must be free. 68 return 0;
70 */ 69}
71 70
71static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
72 struct Qdisc *q)
73{
74 struct sk_buff *skb;
72 75
73/* Kick device. 76 if ((skb = dev->gso_skb))
77 dev->gso_skb = NULL;
78 else
79 skb = q->dequeue(q);
74 80
75 Returns: 0 - queue is empty or throttled. 81 return skb;
76 >0 - queue is not empty. 82}
77 83
78 NOTE: Called under dev->queue_lock with locally disabled BH. 84static inline int handle_dev_cpu_collision(struct sk_buff *skb,
79*/ 85 struct net_device *dev,
86 struct Qdisc *q)
87{
88 int ret;
80 89
90 if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
91 /*
92 * Same CPU holding the lock. It may be a transient
93 * configuration error, when hard_start_xmit() recurses. We
94 * detect it by checking xmit owner and drop the packet when
95 * deadloop is detected. Return OK to try the next skb.
96 */
97 kfree_skb(skb);
98 if (net_ratelimit())
99 printk(KERN_WARNING "Dead loop on netdevice %s, "
100 "fix it urgently!\n", dev->name);
101 ret = qdisc_qlen(q);
102 } else {
103 /*
104 * Another cpu is holding lock, requeue & delay xmits for
105 * some time.
106 */
107 __get_cpu_var(netdev_rx_stat).cpu_collision++;
108 ret = dev_requeue_skb(skb, dev, q);
109 }
110
111 return ret;
112}
113
114/*
115 * NOTE: Called under dev->queue_lock with locally disabled BH.
116 *
117 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
118 * device at a time. dev->queue_lock serializes queue accesses for
119 * this device AND dev->qdisc pointer itself.
120 *
121 * netif_tx_lock serializes accesses to device driver.
122 *
123 * dev->queue_lock and netif_tx_lock are mutually exclusive,
124 * if one is grabbed, another must be free.
125 *
126 * Note, that this procedure can be called by a watchdog timer
127 *
128 * Returns to the caller:
129 * 0 - queue is empty or throttled.
130 * >0 - queue is not empty.
131 *
132 */
81static inline int qdisc_restart(struct net_device *dev) 133static inline int qdisc_restart(struct net_device *dev)
82{ 134{
83 struct Qdisc *q = dev->qdisc; 135 struct Qdisc *q = dev->qdisc;
84 struct sk_buff *skb; 136 struct sk_buff *skb;
137 unsigned lockless;
138 int ret;
85 139
86 /* Dequeue packet */ 140 /* Dequeue packet */
87 if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) { 141 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
88 unsigned nolock = (dev->features & NETIF_F_LLTX); 142 return 0;
143
144 /*
145 * When the driver has LLTX set, it does its own locking in
146 * start_xmit. These checks are worth it because even uncongested
147 * locks can be quite expensive. The driver can do a trylock, as
148 * is being done here; in case of lock contention it should return
149 * NETDEV_TX_LOCKED and the packet will be requeued.
150 */
151 lockless = (dev->features & NETIF_F_LLTX);
89 152
90 dev->gso_skb = NULL; 153 if (!lockless && !netif_tx_trylock(dev)) {
154 /* Another CPU grabbed the driver tx lock */
155 return handle_dev_cpu_collision(skb, dev, q);
156 }
91 157
92 /* 158 /* And release queue */
93 * When the driver has LLTX set it does its own locking 159 spin_unlock(&dev->queue_lock);
94 * in start_xmit. No need to add additional overhead by
95 * locking again. These checks are worth it because
96 * even uncongested locks can be quite expensive.
97 * The driver can do trylock like here too, in case
98 * of lock congestion it should return -1 and the packet
99 * will be requeued.
100 */
101 if (!nolock) {
102 if (!netif_tx_trylock(dev)) {
103 collision:
104 /* So, someone grabbed the driver. */
105
106 /* It may be transient configuration error,
107 when hard_start_xmit() recurses. We detect
108 it by checking xmit owner and drop the
109 packet when deadloop is detected.
110 */
111 if (dev->xmit_lock_owner == smp_processor_id()) {
112 kfree_skb(skb);
113 if (net_ratelimit())
114 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
115 goto out;
116 }
117 __get_cpu_var(netdev_rx_stat).cpu_collision++;
118 goto requeue;
119 }
120 }
121 160
122 { 161 ret = dev_hard_start_xmit(skb, dev);
123 /* And release queue */
124 spin_unlock(&dev->queue_lock);
125
126 if (!netif_queue_stopped(dev)) {
127 int ret;
128
129 ret = dev_hard_start_xmit(skb, dev);
130 if (ret == NETDEV_TX_OK) {
131 if (!nolock) {
132 netif_tx_unlock(dev);
133 }
134 spin_lock(&dev->queue_lock);
135 q = dev->qdisc;
136 goto out;
137 }
138 if (ret == NETDEV_TX_LOCKED && nolock) {
139 spin_lock(&dev->queue_lock);
140 q = dev->qdisc;
141 goto collision;
142 }
143 }
144 162
145 /* NETDEV_TX_BUSY - we need to requeue */ 163 if (!lockless)
146 /* Release the driver */ 164 netif_tx_unlock(dev);
147 if (!nolock) {
148 netif_tx_unlock(dev);
149 }
150 spin_lock(&dev->queue_lock);
151 q = dev->qdisc;
152 }
153 165
154 /* Device kicked us out :( 166 spin_lock(&dev->queue_lock);
155 This is possible in three cases: 167 q = dev->qdisc;
156 168
157 0. driver is locked 169 switch (ret) {
158 1. fastroute is enabled 170 case NETDEV_TX_OK:
159 2. device cannot determine busy state 171 /* Driver sent out skb successfully */
160 before start of transmission (f.e. dialout) 172 ret = qdisc_qlen(q);
161 3. device is buggy (ppp) 173 break;
162 */
163 174
164requeue: 175 case NETDEV_TX_LOCKED:
165 if (unlikely(q == &noop_qdisc)) 176 /* Driver try lock failed */
166 kfree_skb(skb); 177 ret = handle_dev_cpu_collision(skb, dev, q);
167 else if (skb->next) 178 break;
168 dev->gso_skb = skb; 179
169 else 180 default:
170 q->ops->requeue(skb, q); 181 /* Driver returned NETDEV_TX_BUSY - requeue skb */
171 netif_schedule(dev); 182 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
183 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
184 dev->name, ret, q->q.qlen);
185
186 ret = dev_requeue_skb(skb, dev, q);
187 break;
172 } 188 }
173 return 0;
174 189
175out: 190 return ret;
176 BUG_ON((int) q->q.qlen < 0);
177 return q->q.qlen;
178} 191}
179 192
180void __qdisc_run(struct net_device *dev) 193void __qdisc_run(struct net_device *dev)
@@ -493,9 +506,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
493 return; 506 return;
494 507
495 list_del(&qdisc->list); 508 list_del(&qdisc->list);
496#ifdef CONFIG_NET_ESTIMATOR
497 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 509 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
498#endif
499 if (ops->reset) 510 if (ops->reset)
500 ops->reset(qdisc); 511 ops->reset(qdisc);
501 if (ops->destroy) 512 if (ops->destroy)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index fa1b4fe7a5fd..3cc6dda02e2e 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -21,7 +21,6 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/netdevice.h>
25#include <linux/skbuff.h> 24#include <linux/skbuff.h>
26#include <net/pkt_sched.h> 25#include <net/pkt_sched.h>
27#include <net/red.h> 26#include <net/red.h>
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 9d124c4ee3a7..874452c41a01 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -53,7 +53,6 @@
53#include <linux/module.h> 53#include <linux/module.h>
54#include <linux/types.h> 54#include <linux/types.h>
55#include <linux/errno.h> 55#include <linux/errno.h>
56#include <linux/jiffies.h>
57#include <linux/compiler.h> 56#include <linux/compiler.h>
58#include <linux/spinlock.h> 57#include <linux/spinlock.h>
59#include <linux/skbuff.h> 58#include <linux/skbuff.h>
@@ -62,13 +61,11 @@
62#include <linux/list.h> 61#include <linux/list.h>
63#include <linux/rbtree.h> 62#include <linux/rbtree.h>
64#include <linux/init.h> 63#include <linux/init.h>
65#include <linux/netdevice.h>
66#include <linux/rtnetlink.h> 64#include <linux/rtnetlink.h>
67#include <linux/pkt_sched.h> 65#include <linux/pkt_sched.h>
68#include <net/netlink.h> 66#include <net/netlink.h>
69#include <net/pkt_sched.h> 67#include <net/pkt_sched.h>
70#include <net/pkt_cls.h> 68#include <net/pkt_cls.h>
71#include <asm/system.h>
72#include <asm/div64.h> 69#include <asm/div64.h>
73 70
74/* 71/*
@@ -122,7 +119,6 @@ struct hfsc_class
122 struct gnet_stats_basic bstats; 119 struct gnet_stats_basic bstats;
123 struct gnet_stats_queue qstats; 120 struct gnet_stats_queue qstats;
124 struct gnet_stats_rate_est rate_est; 121 struct gnet_stats_rate_est rate_est;
125 spinlock_t *stats_lock;
126 unsigned int level; /* class level in hierarchy */ 122 unsigned int level; /* class level in hierarchy */
127 struct tcf_proto *filter_list; /* filter list */ 123 struct tcf_proto *filter_list; /* filter list */
128 unsigned int filter_cnt; /* filter count */ 124 unsigned int filter_cnt; /* filter count */
@@ -1054,11 +1050,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1054 } 1050 }
1055 sch_tree_unlock(sch); 1051 sch_tree_unlock(sch);
1056 1052
1057#ifdef CONFIG_NET_ESTIMATOR
1058 if (tca[TCA_RATE-1]) 1053 if (tca[TCA_RATE-1])
1059 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1054 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1060 cl->stats_lock, tca[TCA_RATE-1]); 1055 &sch->dev->queue_lock,
1061#endif 1056 tca[TCA_RATE-1]);
1062 return 0; 1057 return 0;
1063 } 1058 }
1064 1059
@@ -1098,7 +1093,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1098 cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); 1093 cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1099 if (cl->qdisc == NULL) 1094 if (cl->qdisc == NULL)
1100 cl->qdisc = &noop_qdisc; 1095 cl->qdisc = &noop_qdisc;
1101 cl->stats_lock = &sch->dev->queue_lock;
1102 INIT_LIST_HEAD(&cl->children); 1096 INIT_LIST_HEAD(&cl->children);
1103 cl->vt_tree = RB_ROOT; 1097 cl->vt_tree = RB_ROOT;
1104 cl->cf_tree = RB_ROOT; 1098 cl->cf_tree = RB_ROOT;
@@ -1112,11 +1106,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1112 cl->cl_pcvtoff = parent->cl_cvtoff; 1106 cl->cl_pcvtoff = parent->cl_cvtoff;
1113 sch_tree_unlock(sch); 1107 sch_tree_unlock(sch);
1114 1108
1115#ifdef CONFIG_NET_ESTIMATOR
1116 if (tca[TCA_RATE-1]) 1109 if (tca[TCA_RATE-1])
1117 gen_new_estimator(&cl->bstats, &cl->rate_est, 1110 gen_new_estimator(&cl->bstats, &cl->rate_est,
1118 cl->stats_lock, tca[TCA_RATE-1]); 1111 &sch->dev->queue_lock, tca[TCA_RATE-1]);
1119#endif
1120 *arg = (unsigned long)cl; 1112 *arg = (unsigned long)cl;
1121 return 0; 1113 return 0;
1122} 1114}
@@ -1128,9 +1120,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1128 1120
1129 tcf_destroy_chain(cl->filter_list); 1121 tcf_destroy_chain(cl->filter_list);
1130 qdisc_destroy(cl->qdisc); 1122 qdisc_destroy(cl->qdisc);
1131#ifdef CONFIG_NET_ESTIMATOR
1132 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1123 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1133#endif
1134 if (cl != &q->root) 1124 if (cl != &q->root)
1135 kfree(cl); 1125 kfree(cl);
1136} 1126}
@@ -1384,9 +1374,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1384 xstats.rtwork = cl->cl_cumul; 1374 xstats.rtwork = cl->cl_cumul;
1385 1375
1386 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1376 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1387#ifdef CONFIG_NET_ESTIMATOR
1388 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1377 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1389#endif
1390 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1378 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1391 return -1; 1379 return -1;
1392 1380
@@ -1448,8 +1436,6 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
1448 return -EINVAL; 1436 return -EINVAL;
1449 qopt = RTA_DATA(opt); 1437 qopt = RTA_DATA(opt);
1450 1438
1451 sch->stats_lock = &sch->dev->queue_lock;
1452
1453 q->defcls = qopt->defcls; 1439 q->defcls = qopt->defcls;
1454 for (i = 0; i < HFSC_HSIZE; i++) 1440 for (i = 0; i < HFSC_HSIZE; i++)
1455 INIT_LIST_HEAD(&q->clhash[i]); 1441 INIT_LIST_HEAD(&q->clhash[i]);
@@ -1464,7 +1450,6 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
1464 sch->handle); 1450 sch->handle);
1465 if (q->root.qdisc == NULL) 1451 if (q->root.qdisc == NULL)
1466 q->root.qdisc = &noop_qdisc; 1452 q->root.qdisc = &noop_qdisc;
1467 q->root.stats_lock = &sch->dev->queue_lock;
1468 INIT_LIST_HEAD(&q->root.children); 1453 INIT_LIST_HEAD(&q->root.children);
1469 q->root.vt_tree = RB_ROOT; 1454 q->root.vt_tree = RB_ROOT;
1470 q->root.cf_tree = RB_ROOT; 1455 q->root.cf_tree = RB_ROOT;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 035788c5b7f8..b417a95df322 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -28,32 +28,16 @@
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $ 28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */ 29 */
30#include <linux/module.h> 30#include <linux/module.h>
31#include <asm/uaccess.h>
32#include <asm/system.h>
33#include <linux/bitops.h>
34#include <linux/types.h> 31#include <linux/types.h>
35#include <linux/kernel.h> 32#include <linux/kernel.h>
36#include <linux/string.h> 33#include <linux/string.h>
37#include <linux/mm.h>
38#include <linux/socket.h>
39#include <linux/sockios.h>
40#include <linux/in.h>
41#include <linux/errno.h> 34#include <linux/errno.h>
42#include <linux/interrupt.h>
43#include <linux/if_ether.h>
44#include <linux/inet.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/notifier.h>
48#include <net/ip.h>
49#include <net/route.h>
50#include <linux/skbuff.h> 35#include <linux/skbuff.h>
51#include <linux/list.h> 36#include <linux/list.h>
52#include <linux/compiler.h> 37#include <linux/compiler.h>
38#include <linux/rbtree.h>
53#include <net/netlink.h> 39#include <net/netlink.h>
54#include <net/sock.h>
55#include <net/pkt_sched.h> 40#include <net/pkt_sched.h>
56#include <linux/rbtree.h>
57 41
58/* HTB algorithm. 42/* HTB algorithm.
59 Author: devik@cdi.cz 43 Author: devik@cdi.cz
@@ -69,8 +53,6 @@
69*/ 53*/
70 54
71#define HTB_HSIZE 16 /* classid hash size */ 55#define HTB_HSIZE 16 /* classid hash size */
72#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
73#define HTB_RATECM 1 /* whether to use rate computer */
74#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */ 56#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
75#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ 57#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
76 58
@@ -95,12 +77,6 @@ struct htb_class {
95 struct tc_htb_xstats xstats; /* our special stats */ 77 struct tc_htb_xstats xstats; /* our special stats */
96 int refcnt; /* usage count of this class */ 78 int refcnt; /* usage count of this class */
97 79
98#ifdef HTB_RATECM
99 /* rate measurement counters */
100 unsigned long rate_bytes, sum_bytes;
101 unsigned long rate_packets, sum_packets;
102#endif
103
104 /* topology */ 80 /* topology */
105 int level; /* our level (see above) */ 81 int level; /* our level (see above) */
106 struct htb_class *parent; /* parent class */ 82 struct htb_class *parent; /* parent class */
@@ -153,15 +129,12 @@ struct htb_class {
153 /* of un.leaf originals should be done. */ 129 /* of un.leaf originals should be done. */
154}; 130};
155 131
156/* TODO: maybe compute rate when size is too large .. or drop ? */
157static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate, 132static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
158 int size) 133 int size)
159{ 134{
160 int slot = size >> rate->rate.cell_log; 135 int slot = size >> rate->rate.cell_log;
161 if (slot > 255) { 136 if (slot > 255)
162 cl->xstats.giants++; 137 return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
163 slot = 255;
164 }
165 return rate->data[slot]; 138 return rate->data[slot];
166} 139}
167 140
@@ -194,10 +167,6 @@ struct htb_sched {
194 int rate2quantum; /* quant = rate / rate2quantum */ 167 int rate2quantum; /* quant = rate / rate2quantum */
195 psched_time_t now; /* cached dequeue time */ 168 psched_time_t now; /* cached dequeue time */
196 struct qdisc_watchdog watchdog; 169 struct qdisc_watchdog watchdog;
197#ifdef HTB_RATECM
198 struct timer_list rttim; /* rate computer timer */
199 int recmp_bucket; /* which hash bucket to recompute next */
200#endif
201 170
202 /* non shaped skbs; let them go directly thru */ 171 /* non shaped skbs; let them go directly thru */
203 struct sk_buff_head direct_queue; 172 struct sk_buff_head direct_queue;
@@ -634,13 +603,14 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
634 cl->qstats.drops++; 603 cl->qstats.drops++;
635 return NET_XMIT_DROP; 604 return NET_XMIT_DROP;
636 } else { 605 } else {
637 cl->bstats.packets++; 606 cl->bstats.packets +=
607 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
638 cl->bstats.bytes += skb->len; 608 cl->bstats.bytes += skb->len;
639 htb_activate(q, cl); 609 htb_activate(q, cl);
640 } 610 }
641 611
642 sch->q.qlen++; 612 sch->q.qlen++;
643 sch->bstats.packets++; 613 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
644 sch->bstats.bytes += skb->len; 614 sch->bstats.bytes += skb->len;
645 return NET_XMIT_SUCCESS; 615 return NET_XMIT_SUCCESS;
646} 616}
@@ -677,34 +647,6 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
677 return NET_XMIT_SUCCESS; 647 return NET_XMIT_SUCCESS;
678} 648}
679 649
680#ifdef HTB_RATECM
681#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
682static void htb_rate_timer(unsigned long arg)
683{
684 struct Qdisc *sch = (struct Qdisc *)arg;
685 struct htb_sched *q = qdisc_priv(sch);
686 struct hlist_node *p;
687 struct htb_class *cl;
688
689
690 /* lock queue so that we can muck with it */
691 spin_lock_bh(&sch->dev->queue_lock);
692
693 q->rttim.expires = jiffies + HZ;
694 add_timer(&q->rttim);
695
696 /* scan and recompute one bucket at time */
697 if (++q->recmp_bucket >= HTB_HSIZE)
698 q->recmp_bucket = 0;
699
700 hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
701 RT_GEN(cl->sum_bytes, cl->rate_bytes);
702 RT_GEN(cl->sum_packets, cl->rate_packets);
703 }
704 spin_unlock_bh(&sch->dev->queue_lock);
705}
706#endif
707
708/** 650/**
709 * htb_charge_class - charges amount "bytes" to leaf and ancestors 651 * htb_charge_class - charges amount "bytes" to leaf and ancestors
710 * 652 *
@@ -717,8 +659,9 @@ static void htb_rate_timer(unsigned long arg)
717 * In such case we remove class from event queue first. 659 * In such case we remove class from event queue first.
718 */ 660 */
719static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, 661static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
720 int level, int bytes) 662 int level, struct sk_buff *skb)
721{ 663{
664 int bytes = skb->len;
722 long toks, diff; 665 long toks, diff;
723 enum htb_cmode old_mode; 666 enum htb_cmode old_mode;
724 667
@@ -750,16 +693,12 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
750 if (cl->cmode != HTB_CAN_SEND) 693 if (cl->cmode != HTB_CAN_SEND)
751 htb_add_to_wait_tree(q, cl, diff); 694 htb_add_to_wait_tree(q, cl, diff);
752 } 695 }
753#ifdef HTB_RATECM
754 /* update rate counters */
755 cl->sum_bytes += bytes;
756 cl->sum_packets++;
757#endif
758 696
759 /* update byte stats except for leaves which are already updated */ 697 /* update byte stats except for leaves which are already updated */
760 if (cl->level) { 698 if (cl->level) {
761 cl->bstats.bytes += bytes; 699 cl->bstats.bytes += bytes;
762 cl->bstats.packets++; 700 cl->bstats.packets += skb_is_gso(skb)?
701 skb_shinfo(skb)->gso_segs:1;
763 } 702 }
764 cl = cl->parent; 703 cl = cl->parent;
765 } 704 }
@@ -943,7 +882,7 @@ next:
943 gives us slightly better performance */ 882 gives us slightly better performance */
944 if (!cl->un.leaf.q->q.qlen) 883 if (!cl->un.leaf.q->q.qlen)
945 htb_deactivate(q, cl); 884 htb_deactivate(q, cl);
946 htb_charge_class(q, cl, level, skb->len); 885 htb_charge_class(q, cl, level, skb);
947 } 886 }
948 return skb; 887 return skb;
949} 888}
@@ -1095,13 +1034,6 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1095 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1034 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1096 q->direct_qlen = 2; 1035 q->direct_qlen = 2;
1097 1036
1098#ifdef HTB_RATECM
1099 init_timer(&q->rttim);
1100 q->rttim.function = htb_rate_timer;
1101 q->rttim.data = (unsigned long)sch;
1102 q->rttim.expires = jiffies + HZ;
1103 add_timer(&q->rttim);
1104#endif
1105 if ((q->rate2quantum = gopt->rate2quantum) < 1) 1037 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1106 q->rate2quantum = 1; 1038 q->rate2quantum = 1;
1107 q->defcls = gopt->defcls; 1039 q->defcls = gopt->defcls;
@@ -1175,11 +1107,6 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1175{ 1107{
1176 struct htb_class *cl = (struct htb_class *)arg; 1108 struct htb_class *cl = (struct htb_class *)arg;
1177 1109
1178#ifdef HTB_RATECM
1179 cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
1180 cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
1181#endif
1182
1183 if (!cl->level && cl->un.leaf.q) 1110 if (!cl->level && cl->un.leaf.q)
1184 cl->qstats.qlen = cl->un.leaf.q->q.qlen; 1111 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1185 cl->xstats.tokens = cl->tokens; 1112 cl->xstats.tokens = cl->tokens;
@@ -1277,6 +1204,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1277 BUG_TRAP(cl->un.leaf.q); 1204 BUG_TRAP(cl->un.leaf.q);
1278 qdisc_destroy(cl->un.leaf.q); 1205 qdisc_destroy(cl->un.leaf.q);
1279 } 1206 }
1207 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1280 qdisc_put_rtab(cl->rate); 1208 qdisc_put_rtab(cl->rate);
1281 qdisc_put_rtab(cl->ceil); 1209 qdisc_put_rtab(cl->ceil);
1282 1210
@@ -1305,9 +1233,6 @@ static void htb_destroy(struct Qdisc *sch)
1305 struct htb_sched *q = qdisc_priv(sch); 1233 struct htb_sched *q = qdisc_priv(sch);
1306 1234
1307 qdisc_watchdog_cancel(&q->watchdog); 1235 qdisc_watchdog_cancel(&q->watchdog);
1308#ifdef HTB_RATECM
1309 del_timer_sync(&q->rttim);
1310#endif
1311 /* This line used to be after htb_destroy_class call below 1236 /* This line used to be after htb_destroy_class call below
1312 and surprisingly it worked in 2.4. But it must precede it 1237 and surprisingly it worked in 2.4. But it must precede it
1313 because filter need its target class alive to be able to call 1238 because filter need its target class alive to be able to call
@@ -1403,6 +1328,20 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1403 if (!cl) { /* new class */ 1328 if (!cl) { /* new class */
1404 struct Qdisc *new_q; 1329 struct Qdisc *new_q;
1405 int prio; 1330 int prio;
1331 struct {
1332 struct rtattr rta;
1333 struct gnet_estimator opt;
1334 } est = {
1335 .rta = {
1336 .rta_len = RTA_LENGTH(sizeof(est.opt)),
1337 .rta_type = TCA_RATE,
1338 },
1339 .opt = {
1340 /* 4s interval, 16s averaging constant */
1341 .interval = 2,
1342 .ewma_log = 2,
1343 },
1344 };
1406 1345
1407 /* check for valid classid */ 1346 /* check for valid classid */
1408 if (!classid || TC_H_MAJ(classid ^ sch->handle) 1347 if (!classid || TC_H_MAJ(classid ^ sch->handle)
@@ -1418,6 +1357,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1418 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1357 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1419 goto failure; 1358 goto failure;
1420 1359
1360 gen_new_estimator(&cl->bstats, &cl->rate_est,
1361 &sch->dev->queue_lock,
1362 tca[TCA_RATE-1] ? : &est.rta);
1421 cl->refcnt = 1; 1363 cl->refcnt = 1;
1422 INIT_LIST_HEAD(&cl->sibling); 1364 INIT_LIST_HEAD(&cl->sibling);
1423 INIT_HLIST_NODE(&cl->hlist); 1365 INIT_HLIST_NODE(&cl->hlist);
@@ -1469,8 +1411,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1469 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); 1411 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
1470 list_add_tail(&cl->sibling, 1412 list_add_tail(&cl->sibling,
1471 parent ? &parent->children : &q->root); 1413 parent ? &parent->children : &q->root);
1472 } else 1414 } else {
1415 if (tca[TCA_RATE-1])
1416 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1417 &sch->dev->queue_lock,
1418 tca[TCA_RATE-1]);
1473 sch_tree_lock(sch); 1419 sch_tree_lock(sch);
1420 }
1474 1421
1475 /* it used to be a nasty bug here, we have to check that node 1422 /* it used to be a nasty bug here, we have to check that node
1476 is really leaf before changing cl->un.leaf ! */ 1423 is really leaf before changing cl->un.leaf ! */
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f8b9f1cdf738..cd0aab6a2a7c 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -9,21 +9,14 @@
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/list.h>
12#include <linux/skbuff.h> 13#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h> 14#include <linux/rtnetlink.h>
15#include <linux/netfilter_ipv4.h> 15#include <linux/netfilter_ipv4.h>
16#include <linux/netfilter_ipv6.h> 16#include <linux/netfilter_ipv6.h>
17#include <linux/netfilter.h> 17#include <linux/netfilter.h>
18#include <linux/smp.h>
19#include <net/netlink.h> 18#include <net/netlink.h>
20#include <net/pkt_sched.h> 19#include <net/pkt_sched.h>
21#include <asm/byteorder.h>
22#include <asm/uaccess.h>
23#include <linux/kmod.h>
24#include <linux/stat.h>
25#include <linux/interrupt.h>
26#include <linux/list.h>
27 20
28 21
29#undef DEBUG_INGRESS 22#undef DEBUG_INGRESS
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5d9d8bc9cc3a..9e5e87e81f00 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -14,11 +14,9 @@
14 */ 14 */
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/bitops.h>
18#include <linux/types.h> 17#include <linux/types.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/errno.h> 19#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h> 20#include <linux/skbuff.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24 22
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 6d7542c26e47..2d8c08493d6e 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -12,37 +12,23 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <asm/uaccess.h>
16#include <asm/system.h>
17#include <linux/bitops.h>
18#include <linux/types.h> 15#include <linux/types.h>
19#include <linux/kernel.h> 16#include <linux/kernel.h>
20#include <linux/string.h> 17#include <linux/string.h>
21#include <linux/mm.h>
22#include <linux/socket.h>
23#include <linux/sockios.h>
24#include <linux/in.h>
25#include <linux/errno.h> 18#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/if_ether.h>
28#include <linux/inet.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/notifier.h>
32#include <net/ip.h>
33#include <net/route.h>
34#include <linux/skbuff.h> 19#include <linux/skbuff.h>
35#include <net/netlink.h> 20#include <net/netlink.h>
36#include <net/sock.h>
37#include <net/pkt_sched.h> 21#include <net/pkt_sched.h>
38 22
39 23
40struct prio_sched_data 24struct prio_sched_data
41{ 25{
42 int bands; 26 int bands;
27 int curband; /* for round-robin */
43 struct tcf_proto *filter_list; 28 struct tcf_proto *filter_list;
44 u8 prio2band[TC_PRIO_MAX+1]; 29 u8 prio2band[TC_PRIO_MAX+1];
45 struct Qdisc *queues[TCQ_PRIO_BANDS]; 30 struct Qdisc *queues[TCQ_PRIO_BANDS];
31 int mq;
46}; 32};
47 33
48 34
@@ -70,14 +56,17 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
70#endif 56#endif
71 if (TC_H_MAJ(band)) 57 if (TC_H_MAJ(band))
72 band = 0; 58 band = 0;
73 return q->queues[q->prio2band[band&TC_PRIO_MAX]]; 59 band = q->prio2band[band&TC_PRIO_MAX];
60 goto out;
74 } 61 }
75 band = res.classid; 62 band = res.classid;
76 } 63 }
77 band = TC_H_MIN(band) - 1; 64 band = TC_H_MIN(band) - 1;
78 if (band >= q->bands) 65 if (band >= q->bands)
79 return q->queues[q->prio2band[0]]; 66 band = q->prio2band[0];
80 67out:
68 if (q->mq)
69 skb_set_queue_mapping(skb, band);
81 return q->queues[band]; 70 return q->queues[band];
82} 71}
83 72
@@ -144,17 +133,58 @@ prio_dequeue(struct Qdisc* sch)
144 struct Qdisc *qdisc; 133 struct Qdisc *qdisc;
145 134
146 for (prio = 0; prio < q->bands; prio++) { 135 for (prio = 0; prio < q->bands; prio++) {
147 qdisc = q->queues[prio]; 136 /* Check if the target subqueue is available before
148 skb = qdisc->dequeue(qdisc); 137 * pulling an skb. This way we avoid excessive requeues
149 if (skb) { 138 * for slower queues.
150 sch->q.qlen--; 139 */
151 return skb; 140 if (!netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
141 qdisc = q->queues[prio];
142 skb = qdisc->dequeue(qdisc);
143 if (skb) {
144 sch->q.qlen--;
145 return skb;
146 }
152 } 147 }
153 } 148 }
154 return NULL; 149 return NULL;
155 150
156} 151}
157 152
153static struct sk_buff *rr_dequeue(struct Qdisc* sch)
154{
155 struct sk_buff *skb;
156 struct prio_sched_data *q = qdisc_priv(sch);
157 struct Qdisc *qdisc;
158 int bandcount;
159
160 /* Only take one pass through the queues. If nothing is available,
161 * return nothing.
162 */
163 for (bandcount = 0; bandcount < q->bands; bandcount++) {
164 /* Check if the target subqueue is available before
165 * pulling an skb. This way we avoid excessive requeues
166 * for slower queues. If the queue is stopped, try the
167 * next queue.
168 */
169 if (!netif_subqueue_stopped(sch->dev,
170 (q->mq ? q->curband : 0))) {
171 qdisc = q->queues[q->curband];
172 skb = qdisc->dequeue(qdisc);
173 if (skb) {
174 sch->q.qlen--;
175 q->curband++;
176 if (q->curband >= q->bands)
177 q->curband = 0;
178 return skb;
179 }
180 }
181 q->curband++;
182 if (q->curband >= q->bands)
183 q->curband = 0;
184 }
185 return NULL;
186}
187
158static unsigned int prio_drop(struct Qdisc* sch) 188static unsigned int prio_drop(struct Qdisc* sch)
159{ 189{
160 struct prio_sched_data *q = qdisc_priv(sch); 190 struct prio_sched_data *q = qdisc_priv(sch);
@@ -198,21 +228,41 @@ prio_destroy(struct Qdisc* sch)
198static int prio_tune(struct Qdisc *sch, struct rtattr *opt) 228static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
199{ 229{
200 struct prio_sched_data *q = qdisc_priv(sch); 230 struct prio_sched_data *q = qdisc_priv(sch);
201 struct tc_prio_qopt *qopt = RTA_DATA(opt); 231 struct tc_prio_qopt *qopt;
232 struct rtattr *tb[TCA_PRIO_MAX];
202 int i; 233 int i;
203 234
204 if (opt->rta_len < RTA_LENGTH(sizeof(*qopt))) 235 if (rtattr_parse_nested_compat(tb, TCA_PRIO_MAX, opt, qopt,
236 sizeof(*qopt)))
205 return -EINVAL; 237 return -EINVAL;
206 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) 238 q->bands = qopt->bands;
239 /* If we're multiqueue, make sure the number of incoming bands
240 * matches the number of queues on the device we're associating with.
241 * If the number of bands requested is zero, then set q->bands to
242 * dev->egress_subqueue_count.
243 */
244 q->mq = RTA_GET_FLAG(tb[TCA_PRIO_MQ - 1]);
245 if (q->mq) {
246 if (sch->handle != TC_H_ROOT)
247 return -EINVAL;
248 if (netif_is_multiqueue(sch->dev)) {
249 if (q->bands == 0)
250 q->bands = sch->dev->egress_subqueue_count;
251 else if (q->bands != sch->dev->egress_subqueue_count)
252 return -EINVAL;
253 } else
254 return -EOPNOTSUPP;
255 }
256
257 if (q->bands > TCQ_PRIO_BANDS || q->bands < 2)
207 return -EINVAL; 258 return -EINVAL;
208 259
209 for (i=0; i<=TC_PRIO_MAX; i++) { 260 for (i=0; i<=TC_PRIO_MAX; i++) {
210 if (qopt->priomap[i] >= qopt->bands) 261 if (qopt->priomap[i] >= q->bands)
211 return -EINVAL; 262 return -EINVAL;
212 } 263 }
213 264
214 sch_tree_lock(sch); 265 sch_tree_lock(sch);
215 q->bands = qopt->bands;
216 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 266 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
217 267
218 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { 268 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
@@ -268,11 +318,17 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
268{ 318{
269 struct prio_sched_data *q = qdisc_priv(sch); 319 struct prio_sched_data *q = qdisc_priv(sch);
270 unsigned char *b = skb_tail_pointer(skb); 320 unsigned char *b = skb_tail_pointer(skb);
321 struct rtattr *nest;
271 struct tc_prio_qopt opt; 322 struct tc_prio_qopt opt;
272 323
273 opt.bands = q->bands; 324 opt.bands = q->bands;
274 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); 325 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
275 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 326
327 nest = RTA_NEST_COMPAT(skb, TCA_OPTIONS, sizeof(opt), &opt);
328 if (q->mq)
329 RTA_PUT_FLAG(skb, TCA_PRIO_MQ);
330 RTA_NEST_COMPAT_END(skb, nest);
331
276 return skb->len; 332 return skb->len;
277 333
278rtattr_failure: 334rtattr_failure:
@@ -443,17 +499,44 @@ static struct Qdisc_ops prio_qdisc_ops = {
443 .owner = THIS_MODULE, 499 .owner = THIS_MODULE,
444}; 500};
445 501
502static struct Qdisc_ops rr_qdisc_ops = {
503 .next = NULL,
504 .cl_ops = &prio_class_ops,
505 .id = "rr",
506 .priv_size = sizeof(struct prio_sched_data),
507 .enqueue = prio_enqueue,
508 .dequeue = rr_dequeue,
509 .requeue = prio_requeue,
510 .drop = prio_drop,
511 .init = prio_init,
512 .reset = prio_reset,
513 .destroy = prio_destroy,
514 .change = prio_tune,
515 .dump = prio_dump,
516 .owner = THIS_MODULE,
517};
518
446static int __init prio_module_init(void) 519static int __init prio_module_init(void)
447{ 520{
448 return register_qdisc(&prio_qdisc_ops); 521 int err;
522
523 err = register_qdisc(&prio_qdisc_ops);
524 if (err < 0)
525 return err;
526 err = register_qdisc(&rr_qdisc_ops);
527 if (err < 0)
528 unregister_qdisc(&prio_qdisc_ops);
529 return err;
449} 530}
450 531
451static void __exit prio_module_exit(void) 532static void __exit prio_module_exit(void)
452{ 533{
453 unregister_qdisc(&prio_qdisc_ops); 534 unregister_qdisc(&prio_qdisc_ops);
535 unregister_qdisc(&rr_qdisc_ops);
454} 536}
455 537
456module_init(prio_module_init) 538module_init(prio_module_init)
457module_exit(prio_module_exit) 539module_exit(prio_module_exit)
458 540
459MODULE_LICENSE("GPL"); 541MODULE_LICENSE("GPL");
542MODULE_ALIAS("sch_rr");
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 00db53eb8159..9b95fefb70f4 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -17,7 +17,6 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/netdevice.h>
21#include <linux/skbuff.h> 20#include <linux/skbuff.h>
22#include <net/pkt_sched.h> 21#include <net/pkt_sched.h>
23#include <net/inet_ecn.h> 22#include <net/inet_ecn.h>
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 96dfdf78d32c..957957309859 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -10,31 +10,17 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/uaccess.h>
14#include <asm/system.h>
15#include <linux/bitops.h>
16#include <linux/types.h> 13#include <linux/types.h>
17#include <linux/kernel.h> 14#include <linux/kernel.h>
18#include <linux/jiffies.h> 15#include <linux/jiffies.h>
19#include <linux/string.h> 16#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/socket.h>
22#include <linux/sockios.h>
23#include <linux/in.h> 17#include <linux/in.h>
24#include <linux/errno.h> 18#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/if_ether.h>
27#include <linux/inet.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/notifier.h>
31#include <linux/init.h> 19#include <linux/init.h>
32#include <net/ip.h>
33#include <net/netlink.h>
34#include <linux/ipv6.h> 20#include <linux/ipv6.h>
35#include <net/route.h>
36#include <linux/skbuff.h> 21#include <linux/skbuff.h>
37#include <net/sock.h> 22#include <net/ip.h>
23#include <net/netlink.h>
38#include <net/pkt_sched.h> 24#include <net/pkt_sched.h>
39 25
40 26
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 53862953baaf..22e431dace54 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -13,29 +13,12 @@
13 */ 13 */
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <asm/uaccess.h>
17#include <asm/system.h>
18#include <linux/bitops.h>
19#include <linux/types.h> 16#include <linux/types.h>
20#include <linux/kernel.h> 17#include <linux/kernel.h>
21#include <linux/jiffies.h>
22#include <linux/string.h> 18#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/sockios.h>
26#include <linux/in.h>
27#include <linux/errno.h> 19#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/if_ether.h>
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/notifier.h>
34#include <net/ip.h>
35#include <net/netlink.h>
36#include <net/route.h>
37#include <linux/skbuff.h> 20#include <linux/skbuff.h>
38#include <net/sock.h> 21#include <net/netlink.h>
39#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
40 23
41 24
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index f05ad9a30b4c..0968184ea6be 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -9,30 +9,17 @@
9 */ 9 */
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <asm/uaccess.h>
13#include <asm/system.h>
14#include <linux/bitops.h>
15#include <linux/types.h> 12#include <linux/types.h>
16#include <linux/kernel.h> 13#include <linux/kernel.h>
17#include <linux/string.h> 14#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/socket.h>
20#include <linux/sockios.h>
21#include <linux/in.h>
22#include <linux/errno.h> 15#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/if_arp.h> 16#include <linux/if_arp.h>
25#include <linux/if_ether.h>
26#include <linux/inet.h>
27#include <linux/netdevice.h> 17#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/notifier.h>
30#include <linux/init.h> 18#include <linux/init.h>
31#include <net/ip.h>
32#include <net/route.h>
33#include <linux/skbuff.h> 19#include <linux/skbuff.h>
34#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
35#include <net/sock.h> 21#include <net/dst.h>
22#include <net/neighbour.h>
36#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
37 24
38/* 25/*
@@ -225,7 +212,6 @@ static int teql_qdisc_init(struct Qdisc *sch, struct rtattr *opt)
225 return 0; 212 return 0;
226} 213}
227 214
228/* "teql*" netdevice routines */
229 215
230static int 216static int
231__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) 217__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
@@ -277,6 +263,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
277 int busy; 263 int busy;
278 int nores; 264 int nores;
279 int len = skb->len; 265 int len = skb->len;
266 int subq = skb->queue_mapping;
280 struct sk_buff *skb_res = NULL; 267 struct sk_buff *skb_res = NULL;
281 268
282 start = master->slaves; 269 start = master->slaves;
@@ -293,7 +280,9 @@ restart:
293 280
294 if (slave->qdisc_sleeping != q) 281 if (slave->qdisc_sleeping != q)
295 continue; 282 continue;
296 if (netif_queue_stopped(slave) || ! netif_running(slave)) { 283 if (netif_queue_stopped(slave) ||
284 netif_subqueue_stopped(slave, subq) ||
285 !netif_running(slave)) {
297 busy = 1; 286 busy = 1;
298 continue; 287 continue;
299 } 288 }
@@ -302,6 +291,7 @@ restart:
302 case 0: 291 case 0:
303 if (netif_tx_trylock(slave)) { 292 if (netif_tx_trylock(slave)) {
304 if (!netif_queue_stopped(slave) && 293 if (!netif_queue_stopped(slave) &&
294 !netif_subqueue_stopped(slave, subq) &&
305 slave->hard_start_xmit(skb, slave) == 0) { 295 slave->hard_start_xmit(skb, slave) == 0) {
306 netif_tx_unlock(slave); 296 netif_tx_unlock(slave);
307 master->slaves = NEXT_SLAVE(q); 297 master->slaves = NEXT_SLAVE(q);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 2f12bf2d8d3c..e4cd841a22e4 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -250,7 +250,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
250 return 0; 250 return 0;
251} 251}
252 252
253static struct seq_operations sctp_eps_ops = { 253static const struct seq_operations sctp_eps_ops = {
254 .start = sctp_eps_seq_start, 254 .start = sctp_eps_seq_start,
255 .next = sctp_eps_seq_next, 255 .next = sctp_eps_seq_next,
256 .stop = sctp_eps_seq_stop, 256 .stop = sctp_eps_seq_stop,
@@ -361,7 +361,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
361 return 0; 361 return 0;
362} 362}
363 363
364static struct seq_operations sctp_assoc_ops = { 364static const struct seq_operations sctp_assoc_ops = {
365 .start = sctp_assocs_seq_start, 365 .start = sctp_assocs_seq_start,
366 .next = sctp_assocs_seq_next, 366 .next = sctp_assocs_seq_next,
367 .stop = sctp_assocs_seq_stop, 367 .stop = sctp_assocs_seq_stop,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 543b085ae2c1..01c3c4105204 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1210,7 +1210,7 @@ static int c_show(struct seq_file *m, void *p)
1210 return cd->cache_show(m, cd, cp); 1210 return cd->cache_show(m, cd, cp);
1211} 1211}
1212 1212
1213static struct seq_operations cache_content_op = { 1213static const struct seq_operations cache_content_op = {
1214 .start = c_start, 1214 .start = c_start,
1215 .next = c_next, 1215 .next = c_next,
1216 .stop = c_stop, 1216 .stop = c_stop,
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 77d2d9ce8962..711ca4b1f051 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2006, Ericsson AB 4 * Copyright (c) 2001-2007, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -87,6 +87,9 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
87/** 87/**
88 * recv_msg - handle incoming TIPC message from an Ethernet interface 88 * recv_msg - handle incoming TIPC message from an Ethernet interface
89 * 89 *
90 * Accept only packets explicitly sent to this node, or broadcast packets;
91 * ignores packets sent using Ethernet multicast, and traffic sent to other
92 * nodes (which can happen if interface is running in promiscuous mode).
90 * Routine truncates any Ethernet padding/CRC appended to the message, 93 * Routine truncates any Ethernet padding/CRC appended to the message,
91 * and ensures message size matches actual length 94 * and ensures message size matches actual length
92 */ 95 */
@@ -98,9 +101,7 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
98 u32 size; 101 u32 size;
99 102
100 if (likely(eb_ptr->bearer)) { 103 if (likely(eb_ptr->bearer)) {
101 if (likely(!dev->promiscuity) || 104 if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
102 !memcmp(skb_mac_header(buf), dev->dev_addr, ETH_ALEN) ||
103 !memcmp(skb_mac_header(buf), dev->broadcast, ETH_ALEN)) {
104 size = msg_size((struct tipc_msg *)buf->data); 105 size = msg_size((struct tipc_msg *)buf->data);
105 skb_trim(buf, size); 106 skb_trim(buf, size);
106 if (likely(buf->len == size)) { 107 if (likely(buf->len == size)) {
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2124f32ef29f..5adfdfd49d61 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/link.c: TIPC link code 2 * net/tipc/link.c: TIPC link code
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2007, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -1260,7 +1260,7 @@ again:
1260 * (Must not hold any locks while building message.) 1260 * (Must not hold any locks while building message.)
1261 */ 1261 */
1262 1262
1263 res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt, 1263 res = msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt,
1264 !sender->user_port, &buf); 1264 !sender->user_port, &buf);
1265 1265
1266 read_lock_bh(&tipc_net_lock); 1266 read_lock_bh(&tipc_net_lock);
@@ -1271,7 +1271,7 @@ again:
1271 if (likely(l_ptr)) { 1271 if (likely(l_ptr)) {
1272 if (likely(buf)) { 1272 if (likely(buf)) {
1273 res = link_send_buf_fast(l_ptr, buf, 1273 res = link_send_buf_fast(l_ptr, buf,
1274 &sender->max_pkt); 1274 &sender->publ.max_pkt);
1275 if (unlikely(res < 0)) 1275 if (unlikely(res < 0))
1276 buf_discard(buf); 1276 buf_discard(buf);
1277exit: 1277exit:
@@ -1299,12 +1299,12 @@ exit:
1299 * then re-try fast path or fragment the message 1299 * then re-try fast path or fragment the message
1300 */ 1300 */
1301 1301
1302 sender->max_pkt = link_max_pkt(l_ptr); 1302 sender->publ.max_pkt = link_max_pkt(l_ptr);
1303 tipc_node_unlock(node); 1303 tipc_node_unlock(node);
1304 read_unlock_bh(&tipc_net_lock); 1304 read_unlock_bh(&tipc_net_lock);
1305 1305
1306 1306
1307 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1307 if ((msg_hdr_sz(hdr) + res) <= sender->publ.max_pkt)
1308 goto again; 1308 goto again;
1309 1309
1310 return link_send_sections_long(sender, msg_sect, 1310 return link_send_sections_long(sender, msg_sect,
@@ -1357,7 +1357,7 @@ static int link_send_sections_long(struct port *sender,
1357 1357
1358again: 1358again:
1359 fragm_no = 1; 1359 fragm_no = 1;
1360 max_pkt = sender->max_pkt - INT_H_SIZE; 1360 max_pkt = sender->publ.max_pkt - INT_H_SIZE;
1361 /* leave room for tunnel header in case of link changeover */ 1361 /* leave room for tunnel header in case of link changeover */
1362 fragm_sz = max_pkt - INT_H_SIZE; 1362 fragm_sz = max_pkt - INT_H_SIZE;
1363 /* leave room for fragmentation header in each fragment */ 1363 /* leave room for fragmentation header in each fragment */
@@ -1463,7 +1463,7 @@ error:
1463 goto reject; 1463 goto reject;
1464 } 1464 }
1465 if (link_max_pkt(l_ptr) < max_pkt) { 1465 if (link_max_pkt(l_ptr) < max_pkt) {
1466 sender->max_pkt = link_max_pkt(l_ptr); 1466 sender->publ.max_pkt = link_max_pkt(l_ptr);
1467 tipc_node_unlock(node); 1467 tipc_node_unlock(node);
1468 for (; buf_chain; buf_chain = buf) { 1468 for (; buf_chain; buf_chain = buf) {
1469 buf = buf_chain->next; 1469 buf = buf_chain->next;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index bcd5da00737b..5d2b9ce84d0a 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/port.c: TIPC port code 2 * net/tipc/port.c: TIPC port code
3 * 3 *
4 * Copyright (c) 1992-2006, Ericsson AB 4 * Copyright (c) 1992-2007, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -239,6 +239,8 @@ u32 tipc_createport_raw(void *usr_handle,
239 } 239 }
240 240
241 tipc_port_lock(ref); 241 tipc_port_lock(ref);
242 p_ptr->publ.usr_handle = usr_handle;
243 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
242 p_ptr->publ.ref = ref; 244 p_ptr->publ.ref = ref;
243 msg = &p_ptr->publ.phdr; 245 msg = &p_ptr->publ.phdr;
244 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0); 246 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
@@ -248,11 +250,9 @@ u32 tipc_createport_raw(void *usr_handle,
248 msg_set_importance(msg,importance); 250 msg_set_importance(msg,importance);
249 p_ptr->last_in_seqno = 41; 251 p_ptr->last_in_seqno = 41;
250 p_ptr->sent = 1; 252 p_ptr->sent = 1;
251 p_ptr->publ.usr_handle = usr_handle;
252 INIT_LIST_HEAD(&p_ptr->wait_list); 253 INIT_LIST_HEAD(&p_ptr->wait_list);
253 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); 254 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
254 p_ptr->congested_link = NULL; 255 p_ptr->congested_link = NULL;
255 p_ptr->max_pkt = MAX_PKT_DEFAULT;
256 p_ptr->dispatcher = dispatcher; 256 p_ptr->dispatcher = dispatcher;
257 p_ptr->wakeup = wakeup; 257 p_ptr->wakeup = wakeup;
258 p_ptr->user_port = NULL; 258 p_ptr->user_port = NULL;
@@ -1243,7 +1243,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1243 res = TIPC_OK; 1243 res = TIPC_OK;
1244exit: 1244exit:
1245 tipc_port_unlock(p_ptr); 1245 tipc_port_unlock(p_ptr);
1246 p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref); 1246 p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref);
1247 return res; 1247 return res;
1248} 1248}
1249 1249
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 7ef4d64b32f7..e5f8c16429bd 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/port.h: Include file for TIPC port code 2 * net/tipc/port.h: Include file for TIPC port code
3 * 3 *
4 * Copyright (c) 1994-2006, Ericsson AB 4 * Copyright (c) 1994-2007, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -81,7 +81,6 @@ struct user_port {
81 * @acked: 81 * @acked:
82 * @publications: list of publications for port 82 * @publications: list of publications for port
83 * @pub_count: total # of publications port has made during its lifetime 83 * @pub_count: total # of publications port has made during its lifetime
84 * @max_pkt: maximum packet size "hint" used when building messages sent by port
85 * @probing_state: 84 * @probing_state:
86 * @probing_interval: 85 * @probing_interval:
87 * @last_in_seqno: 86 * @last_in_seqno:
@@ -102,7 +101,6 @@ struct port {
102 u32 acked; 101 u32 acked;
103 struct list_head publications; 102 struct list_head publications;
104 u32 pub_count; 103 u32 pub_count;
105 u32 max_pkt;
106 u32 probing_state; 104 u32 probing_state;
107 u32 probing_interval; 105 u32 probing_interval;
108 u32 last_in_seqno; 106 u32 last_in_seqno;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 45832fb75ea4..4a8f37f48764 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2006, Ericsson AB 4 * Copyright (c) 2001-2007, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -607,23 +607,24 @@ exit:
607static int send_stream(struct kiocb *iocb, struct socket *sock, 607static int send_stream(struct kiocb *iocb, struct socket *sock,
608 struct msghdr *m, size_t total_len) 608 struct msghdr *m, size_t total_len)
609{ 609{
610 struct tipc_port *tport;
610 struct msghdr my_msg; 611 struct msghdr my_msg;
611 struct iovec my_iov; 612 struct iovec my_iov;
612 struct iovec *curr_iov; 613 struct iovec *curr_iov;
613 int curr_iovlen; 614 int curr_iovlen;
614 char __user *curr_start; 615 char __user *curr_start;
616 u32 hdr_size;
615 int curr_left; 617 int curr_left;
616 int bytes_to_send; 618 int bytes_to_send;
617 int bytes_sent; 619 int bytes_sent;
618 int res; 620 int res;
619 621
620 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE)) 622 /* Handle special cases where there is no connection */
621 return send_packet(iocb, sock, m, total_len);
622
623 /* Can only send large data streams if already connected */
624 623
625 if (unlikely(sock->state != SS_CONNECTED)) { 624 if (unlikely(sock->state != SS_CONNECTED)) {
626 if (sock->state == SS_DISCONNECTING) 625 if (sock->state == SS_UNCONNECTED)
626 return send_packet(iocb, sock, m, total_len);
627 else if (sock->state == SS_DISCONNECTING)
627 return -EPIPE; 628 return -EPIPE;
628 else 629 else
629 return -ENOTCONN; 630 return -ENOTCONN;
@@ -648,17 +649,25 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
648 my_msg.msg_name = NULL; 649 my_msg.msg_name = NULL;
649 bytes_sent = 0; 650 bytes_sent = 0;
650 651
652 tport = tipc_sk(sock->sk)->p;
653 hdr_size = msg_hdr_sz(&tport->phdr);
654
651 while (curr_iovlen--) { 655 while (curr_iovlen--) {
652 curr_start = curr_iov->iov_base; 656 curr_start = curr_iov->iov_base;
653 curr_left = curr_iov->iov_len; 657 curr_left = curr_iov->iov_len;
654 658
655 while (curr_left) { 659 while (curr_left) {
656 bytes_to_send = (curr_left < TIPC_MAX_USER_MSG_SIZE) 660 bytes_to_send = tport->max_pkt - hdr_size;
657 ? curr_left : TIPC_MAX_USER_MSG_SIZE; 661 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
662 bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
663 if (curr_left < bytes_to_send)
664 bytes_to_send = curr_left;
658 my_iov.iov_base = curr_start; 665 my_iov.iov_base = curr_start;
659 my_iov.iov_len = bytes_to_send; 666 my_iov.iov_len = bytes_to_send;
660 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) { 667 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) {
661 return bytes_sent ? bytes_sent : res; 668 if (bytes_sent != 0)
669 res = bytes_sent;
670 return res;
662 } 671 }
663 curr_left -= bytes_to_send; 672 curr_left -= bytes_to_send;
664 curr_start += bytes_to_send; 673 curr_start += bytes_to_send;
@@ -1600,33 +1609,6 @@ static int getsockopt(struct socket *sock,
1600} 1609}
1601 1610
1602/** 1611/**
1603 * Placeholders for non-implemented functionality
1604 *
1605 * Returns error code (POSIX-compliant where defined)
1606 */
1607
1608static int ioctl(struct socket *s, u32 cmd, unsigned long arg)
1609{
1610 return -EINVAL;
1611}
1612
1613static int no_mmap(struct file *file, struct socket *sock,
1614 struct vm_area_struct *vma)
1615{
1616 return -EINVAL;
1617}
1618static ssize_t no_sendpage(struct socket *sock, struct page *page,
1619 int offset, size_t size, int flags)
1620{
1621 return -EINVAL;
1622}
1623
1624static int no_skpair(struct socket *s1, struct socket *s2)
1625{
1626 return -EOPNOTSUPP;
1627}
1628
1629/**
1630 * Protocol switches for the various types of TIPC sockets 1612 * Protocol switches for the various types of TIPC sockets
1631 */ 1613 */
1632 1614
@@ -1636,19 +1618,19 @@ static struct proto_ops msg_ops = {
1636 .release = release, 1618 .release = release,
1637 .bind = bind, 1619 .bind = bind,
1638 .connect = connect, 1620 .connect = connect,
1639 .socketpair = no_skpair, 1621 .socketpair = sock_no_socketpair,
1640 .accept = accept, 1622 .accept = accept,
1641 .getname = get_name, 1623 .getname = get_name,
1642 .poll = poll, 1624 .poll = poll,
1643 .ioctl = ioctl, 1625 .ioctl = sock_no_ioctl,
1644 .listen = listen, 1626 .listen = listen,
1645 .shutdown = shutdown, 1627 .shutdown = shutdown,
1646 .setsockopt = setsockopt, 1628 .setsockopt = setsockopt,
1647 .getsockopt = getsockopt, 1629 .getsockopt = getsockopt,
1648 .sendmsg = send_msg, 1630 .sendmsg = send_msg,
1649 .recvmsg = recv_msg, 1631 .recvmsg = recv_msg,
1650 .mmap = no_mmap, 1632 .mmap = sock_no_mmap,
1651 .sendpage = no_sendpage 1633 .sendpage = sock_no_sendpage
1652}; 1634};
1653 1635
1654static struct proto_ops packet_ops = { 1636static struct proto_ops packet_ops = {
@@ -1657,19 +1639,19 @@ static struct proto_ops packet_ops = {
1657 .release = release, 1639 .release = release,
1658 .bind = bind, 1640 .bind = bind,
1659 .connect = connect, 1641 .connect = connect,
1660 .socketpair = no_skpair, 1642 .socketpair = sock_no_socketpair,
1661 .accept = accept, 1643 .accept = accept,
1662 .getname = get_name, 1644 .getname = get_name,
1663 .poll = poll, 1645 .poll = poll,
1664 .ioctl = ioctl, 1646 .ioctl = sock_no_ioctl,
1665 .listen = listen, 1647 .listen = listen,
1666 .shutdown = shutdown, 1648 .shutdown = shutdown,
1667 .setsockopt = setsockopt, 1649 .setsockopt = setsockopt,
1668 .getsockopt = getsockopt, 1650 .getsockopt = getsockopt,
1669 .sendmsg = send_packet, 1651 .sendmsg = send_packet,
1670 .recvmsg = recv_msg, 1652 .recvmsg = recv_msg,
1671 .mmap = no_mmap, 1653 .mmap = sock_no_mmap,
1672 .sendpage = no_sendpage 1654 .sendpage = sock_no_sendpage
1673}; 1655};
1674 1656
1675static struct proto_ops stream_ops = { 1657static struct proto_ops stream_ops = {
@@ -1678,19 +1660,19 @@ static struct proto_ops stream_ops = {
1678 .release = release, 1660 .release = release,
1679 .bind = bind, 1661 .bind = bind,
1680 .connect = connect, 1662 .connect = connect,
1681 .socketpair = no_skpair, 1663 .socketpair = sock_no_socketpair,
1682 .accept = accept, 1664 .accept = accept,
1683 .getname = get_name, 1665 .getname = get_name,
1684 .poll = poll, 1666 .poll = poll,
1685 .ioctl = ioctl, 1667 .ioctl = sock_no_ioctl,
1686 .listen = listen, 1668 .listen = listen,
1687 .shutdown = shutdown, 1669 .shutdown = shutdown,
1688 .setsockopt = setsockopt, 1670 .setsockopt = setsockopt,
1689 .getsockopt = getsockopt, 1671 .getsockopt = getsockopt,
1690 .sendmsg = send_stream, 1672 .sendmsg = send_stream,
1691 .recvmsg = recv_stream, 1673 .recvmsg = recv_stream,
1692 .mmap = no_mmap, 1674 .mmap = sock_no_mmap,
1693 .sendpage = no_sendpage 1675 .sendpage = sock_no_sendpage
1694}; 1676};
1695 1677
1696static struct net_proto_family tipc_family_ops = { 1678static struct net_proto_family tipc_family_ops = {
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d70fa30d4294..65ebccc0a698 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -592,7 +592,8 @@ static struct sock * unix_create1(struct socket *sock)
592 u->dentry = NULL; 592 u->dentry = NULL;
593 u->mnt = NULL; 593 u->mnt = NULL;
594 spin_lock_init(&u->lock); 594 spin_lock_init(&u->lock);
595 atomic_set(&u->inflight, sock ? 0 : -1); 595 atomic_set(&u->inflight, 0);
596 INIT_LIST_HEAD(&u->link);
596 mutex_init(&u->readlock); /* single task reading lock */ 597 mutex_init(&u->readlock); /* single task reading lock */
597 init_waitqueue_head(&u->peer_wait); 598 init_waitqueue_head(&u->peer_wait);
598 unix_insert_socket(unix_sockets_unbound, sk); 599 unix_insert_socket(unix_sockets_unbound, sk);
@@ -1134,9 +1135,6 @@ restart:
1134 /* take ten and and send info to listening sock */ 1135 /* take ten and and send info to listening sock */
1135 spin_lock(&other->sk_receive_queue.lock); 1136 spin_lock(&other->sk_receive_queue.lock);
1136 __skb_queue_tail(&other->sk_receive_queue, skb); 1137 __skb_queue_tail(&other->sk_receive_queue, skb);
1137 /* Undo artificially decreased inflight after embrion
1138 * is installed to listening socket. */
1139 atomic_inc(&newu->inflight);
1140 spin_unlock(&other->sk_receive_queue.lock); 1138 spin_unlock(&other->sk_receive_queue.lock);
1141 unix_state_unlock(other); 1139 unix_state_unlock(other);
1142 other->sk_data_ready(other, 0); 1140 other->sk_data_ready(other, 0);
@@ -2048,7 +2046,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2048 return 0; 2046 return 0;
2049} 2047}
2050 2048
2051static struct seq_operations unix_seq_ops = { 2049static const struct seq_operations unix_seq_ops = {
2052 .start = unix_seq_start, 2050 .start = unix_seq_start,
2053 .next = unix_seq_next, 2051 .next = unix_seq_next,
2054 .stop = unix_seq_stop, 2052 .stop = unix_seq_stop,
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index f20b7ea7c555..406b6433e467 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -62,6 +62,10 @@
62 * AV 1 Mar 1999 62 * AV 1 Mar 1999
63 * Damn. Added missing check for ->dead in listen queues scanning. 63 * Damn. Added missing check for ->dead in listen queues scanning.
64 * 64 *
65 * Miklos Szeredi 25 Jun 2007
66 * Reimplement with a cycle collecting algorithm. This should
67 * solve several problems with the previous code, like being racy
68 * wrt receive and holding up unrelated socket operations.
65 */ 69 */
66 70
67#include <linux/kernel.h> 71#include <linux/kernel.h>
@@ -84,10 +88,9 @@
84 88
85/* Internal data structures and random procedures: */ 89/* Internal data structures and random procedures: */
86 90
87#define GC_HEAD ((struct sock *)(-1)) 91static LIST_HEAD(gc_inflight_list);
88#define GC_ORPHAN ((struct sock *)(-3)) 92static LIST_HEAD(gc_candidates);
89 93static DEFINE_SPINLOCK(unix_gc_lock);
90static struct sock *gc_current = GC_HEAD; /* stack of objects to mark */
91 94
92atomic_t unix_tot_inflight = ATOMIC_INIT(0); 95atomic_t unix_tot_inflight = ATOMIC_INIT(0);
93 96
@@ -122,8 +125,16 @@ void unix_inflight(struct file *fp)
122{ 125{
123 struct sock *s = unix_get_socket(fp); 126 struct sock *s = unix_get_socket(fp);
124 if(s) { 127 if(s) {
125 atomic_inc(&unix_sk(s)->inflight); 128 struct unix_sock *u = unix_sk(s);
129 spin_lock(&unix_gc_lock);
130 if (atomic_inc_return(&u->inflight) == 1) {
131 BUG_ON(!list_empty(&u->link));
132 list_add_tail(&u->link, &gc_inflight_list);
133 } else {
134 BUG_ON(list_empty(&u->link));
135 }
126 atomic_inc(&unix_tot_inflight); 136 atomic_inc(&unix_tot_inflight);
137 spin_unlock(&unix_gc_lock);
127 } 138 }
128} 139}
129 140
@@ -131,182 +142,218 @@ void unix_notinflight(struct file *fp)
131{ 142{
132 struct sock *s = unix_get_socket(fp); 143 struct sock *s = unix_get_socket(fp);
133 if(s) { 144 if(s) {
134 atomic_dec(&unix_sk(s)->inflight); 145 struct unix_sock *u = unix_sk(s);
146 spin_lock(&unix_gc_lock);
147 BUG_ON(list_empty(&u->link));
148 if (atomic_dec_and_test(&u->inflight))
149 list_del_init(&u->link);
135 atomic_dec(&unix_tot_inflight); 150 atomic_dec(&unix_tot_inflight);
151 spin_unlock(&unix_gc_lock);
136 } 152 }
137} 153}
138 154
155static inline struct sk_buff *sock_queue_head(struct sock *sk)
156{
157 return (struct sk_buff *) &sk->sk_receive_queue;
158}
139 159
140/* 160#define receive_queue_for_each_skb(sk, next, skb) \
141 * Garbage Collector Support Functions 161 for (skb = sock_queue_head(sk)->next, next = skb->next; \
142 */ 162 skb != sock_queue_head(sk); skb = next, next = skb->next)
143 163
144static inline struct sock *pop_stack(void) 164static void scan_inflight(struct sock *x, void (*func)(struct sock *),
165 struct sk_buff_head *hitlist)
145{ 166{
146 struct sock *p = gc_current; 167 struct sk_buff *skb;
147 gc_current = unix_sk(p)->gc_tree; 168 struct sk_buff *next;
148 return p; 169
170 spin_lock(&x->sk_receive_queue.lock);
171 receive_queue_for_each_skb(x, next, skb) {
172 /*
173 * Do we have file descriptors ?
174 */
175 if (UNIXCB(skb).fp) {
176 bool hit = false;
177 /*
178 * Process the descriptors of this socket
179 */
180 int nfd = UNIXCB(skb).fp->count;
181 struct file **fp = UNIXCB(skb).fp->fp;
182 while (nfd--) {
183 /*
184 * Get the socket the fd matches
185 * if it indeed does so
186 */
187 struct sock *sk = unix_get_socket(*fp++);
188 if(sk) {
189 hit = true;
190 func(sk);
191 }
192 }
193 if (hit && hitlist != NULL) {
194 __skb_unlink(skb, &x->sk_receive_queue);
195 __skb_queue_tail(hitlist, skb);
196 }
197 }
198 }
199 spin_unlock(&x->sk_receive_queue.lock);
149} 200}
150 201
151static inline int empty_stack(void) 202static void scan_children(struct sock *x, void (*func)(struct sock *),
203 struct sk_buff_head *hitlist)
152{ 204{
153 return gc_current == GC_HEAD; 205 if (x->sk_state != TCP_LISTEN)
206 scan_inflight(x, func, hitlist);
207 else {
208 struct sk_buff *skb;
209 struct sk_buff *next;
210 struct unix_sock *u;
211 LIST_HEAD(embryos);
212
213 /*
214 * For a listening socket collect the queued embryos
215 * and perform a scan on them as well.
216 */
217 spin_lock(&x->sk_receive_queue.lock);
218 receive_queue_for_each_skb(x, next, skb) {
219 u = unix_sk(skb->sk);
220
221 /*
222 * An embryo cannot be in-flight, so it's safe
223 * to use the list link.
224 */
225 BUG_ON(!list_empty(&u->link));
226 list_add_tail(&u->link, &embryos);
227 }
228 spin_unlock(&x->sk_receive_queue.lock);
229
230 while (!list_empty(&embryos)) {
231 u = list_entry(embryos.next, struct unix_sock, link);
232 scan_inflight(&u->sk, func, hitlist);
233 list_del_init(&u->link);
234 }
235 }
154} 236}
155 237
156static void maybe_unmark_and_push(struct sock *x) 238static void dec_inflight(struct sock *sk)
157{ 239{
158 struct unix_sock *u = unix_sk(x); 240 atomic_dec(&unix_sk(sk)->inflight);
241}
159 242
160 if (u->gc_tree != GC_ORPHAN) 243static void inc_inflight(struct sock *sk)
161 return; 244{
162 sock_hold(x); 245 atomic_inc(&unix_sk(sk)->inflight);
163 u->gc_tree = gc_current;
164 gc_current = x;
165} 246}
166 247
248static void inc_inflight_move_tail(struct sock *sk)
249{
250 struct unix_sock *u = unix_sk(sk);
251
252 atomic_inc(&u->inflight);
253 /*
254 * If this is still a candidate, move it to the end of the
255 * list, so that it's checked even if it was already passed
256 * over
257 */
258 if (u->gc_candidate)
259 list_move_tail(&u->link, &gc_candidates);
260}
167 261
168/* The external entry point: unix_gc() */ 262/* The external entry point: unix_gc() */
169 263
170void unix_gc(void) 264void unix_gc(void)
171{ 265{
172 static DEFINE_MUTEX(unix_gc_sem); 266 static bool gc_in_progress = false;
173 int i;
174 struct sock *s;
175 struct sk_buff_head hitlist;
176 struct sk_buff *skb;
177 267
178 /* 268 struct unix_sock *u;
179 * Avoid a recursive GC. 269 struct unix_sock *next;
180 */ 270 struct sk_buff_head hitlist;
271 struct list_head cursor;
181 272
182 if (!mutex_trylock(&unix_gc_sem)) 273 spin_lock(&unix_gc_lock);
183 return;
184 274
185 spin_lock(&unix_table_lock); 275 /* Avoid a recursive GC. */
276 if (gc_in_progress)
277 goto out;
186 278
187 forall_unix_sockets(i, s) 279 gc_in_progress = true;
188 {
189 unix_sk(s)->gc_tree = GC_ORPHAN;
190 }
191 /* 280 /*
192 * Everything is now marked 281 * First, select candidates for garbage collection. Only
193 */ 282 * in-flight sockets are considered, and from those only ones
194 283 * which don't have any external reference.
195 /* Invariant to be maintained: 284 *
196 - everything unmarked is either: 285 * Holding unix_gc_lock will protect these candidates from
197 -- (a) on the stack, or 286 * being detached, and hence from gaining an external
198 -- (b) has all of its children unmarked 287 * reference. This also means, that since there are no
199 - everything on the stack is always unmarked 288 * possible receivers, the receive queues of these sockets are
200 - nothing is ever pushed onto the stack twice, because: 289 * static during the GC, even though the dequeue is done
201 -- nothing previously unmarked is ever pushed on the stack 290 * before the detach without atomicity guarantees.
202 */ 291 */
292 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
293 int total_refs;
294 int inflight_refs;
295
296 total_refs = file_count(u->sk.sk_socket->file);
297 inflight_refs = atomic_read(&u->inflight);
298
299 BUG_ON(inflight_refs < 1);
300 BUG_ON(total_refs < inflight_refs);
301 if (total_refs == inflight_refs) {
302 list_move_tail(&u->link, &gc_candidates);
303 u->gc_candidate = 1;
304 }
305 }
203 306
204 /* 307 /*
205 * Push root set 308 * Now remove all internal in-flight reference to children of
309 * the candidates.
206 */ 310 */
207 311 list_for_each_entry(u, &gc_candidates, link)
208 forall_unix_sockets(i, s) 312 scan_children(&u->sk, dec_inflight, NULL);
209 {
210 int open_count = 0;
211
212 /*
213 * If all instances of the descriptor are not
214 * in flight we are in use.
215 *
216 * Special case: when socket s is embrion, it may be
217 * hashed but still not in queue of listening socket.
218 * In this case (see unix_create1()) we set artificial
219 * negative inflight counter to close race window.
220 * It is trick of course and dirty one.
221 */
222 if (s->sk_socket && s->sk_socket->file)
223 open_count = file_count(s->sk_socket->file);
224 if (open_count > atomic_read(&unix_sk(s)->inflight))
225 maybe_unmark_and_push(s);
226 }
227 313
228 /* 314 /*
229 * Mark phase 315 * Restore the references for children of all candidates,
316 * which have remaining references. Do this recursively, so
317 * only those remain, which form cyclic references.
318 *
319 * Use a "cursor" link, to make the list traversal safe, even
320 * though elements might be moved about.
230 */ 321 */
322 list_add(&cursor, &gc_candidates);
323 while (cursor.next != &gc_candidates) {
324 u = list_entry(cursor.next, struct unix_sock, link);
231 325
232 while (!empty_stack()) 326 /* Move cursor to after the current position. */
233 { 327 list_move(&cursor, &u->link);
234 struct sock *x = pop_stack();
235 struct sock *sk;
236
237 spin_lock(&x->sk_receive_queue.lock);
238 skb = skb_peek(&x->sk_receive_queue);
239
240 /*
241 * Loop through all but first born
242 */
243 328
244 while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) { 329 if (atomic_read(&u->inflight) > 0) {
245 /* 330 list_move_tail(&u->link, &gc_inflight_list);
246 * Do we have file descriptors ? 331 u->gc_candidate = 0;
247 */ 332 scan_children(&u->sk, inc_inflight_move_tail, NULL);
248 if(UNIXCB(skb).fp)
249 {
250 /*
251 * Process the descriptors of this socket
252 */
253 int nfd=UNIXCB(skb).fp->count;
254 struct file **fp = UNIXCB(skb).fp->fp;
255 while(nfd--)
256 {
257 /*
258 * Get the socket the fd matches if
259 * it indeed does so
260 */
261 if((sk=unix_get_socket(*fp++))!=NULL)
262 {
263 maybe_unmark_and_push(sk);
264 }
265 }
266 }
267 /* We have to scan not-yet-accepted ones too */
268 if (x->sk_state == TCP_LISTEN)
269 maybe_unmark_and_push(skb->sk);
270 skb=skb->next;
271 } 333 }
272 spin_unlock(&x->sk_receive_queue.lock);
273 sock_put(x);
274 } 334 }
335 list_del(&cursor);
275 336
337 /*
338 * Now gc_candidates contains only garbage. Restore original
339 * inflight counters for these as well, and remove the skbuffs
340 * which are creating the cycle(s).
341 */
276 skb_queue_head_init(&hitlist); 342 skb_queue_head_init(&hitlist);
343 list_for_each_entry(u, &gc_candidates, link)
344 scan_children(&u->sk, inc_inflight, &hitlist);
277 345
278 forall_unix_sockets(i, s) 346 spin_unlock(&unix_gc_lock);
279 {
280 struct unix_sock *u = unix_sk(s);
281 347
282 if (u->gc_tree == GC_ORPHAN) { 348 /* Here we are. Hitlist is filled. Die. */
283 struct sk_buff *nextsk; 349 __skb_queue_purge(&hitlist);
284 350
285 spin_lock(&s->sk_receive_queue.lock); 351 spin_lock(&unix_gc_lock);
286 skb = skb_peek(&s->sk_receive_queue);
287 while (skb &&
288 skb != (struct sk_buff *)&s->sk_receive_queue) {
289 nextsk = skb->next;
290 /*
291 * Do we have file descriptors ?
292 */
293 if (UNIXCB(skb).fp) {
294 __skb_unlink(skb,
295 &s->sk_receive_queue);
296 __skb_queue_tail(&hitlist, skb);
297 }
298 skb = nextsk;
299 }
300 spin_unlock(&s->sk_receive_queue.lock);
301 }
302 u->gc_tree = GC_ORPHAN;
303 }
304 spin_unlock(&unix_table_lock);
305 352
306 /* 353 /* All candidates should have been detached by now. */
307 * Here we are. Hitlist is filled. Die. 354 BUG_ON(!list_empty(&gc_candidates));
308 */ 355 gc_in_progress = false;
309 356
310 __skb_queue_purge(&hitlist); 357 out:
311 mutex_unlock(&unix_gc_sem); 358 spin_unlock(&unix_gc_lock);
312} 359}
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index 205106521ecb..236e7eaf1b7f 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -164,14 +164,14 @@ static int status_show(struct seq_file *m, void *v)
164 return 0; 164 return 0;
165} 165}
166 166
167static struct seq_operations config_op = { 167static const struct seq_operations config_op = {
168 .start = r_start, 168 .start = r_start,
169 .next = r_next, 169 .next = r_next,
170 .stop = r_stop, 170 .stop = r_stop,
171 .show = config_show, 171 .show = config_show,
172}; 172};
173 173
174static struct seq_operations status_op = { 174static const struct seq_operations status_op = {
175 .start = r_start, 175 .start = r_start,
176 .next = r_next, 176 .next = r_next,
177 .stop = r_stop, 177 .stop = r_stop,
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 96001f0c64fc..7405b9c5b7f2 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -234,21 +234,21 @@ out:
234 return 0; 234 return 0;
235} 235}
236 236
237static struct seq_operations x25_seq_route_ops = { 237static const struct seq_operations x25_seq_route_ops = {
238 .start = x25_seq_route_start, 238 .start = x25_seq_route_start,
239 .next = x25_seq_route_next, 239 .next = x25_seq_route_next,
240 .stop = x25_seq_route_stop, 240 .stop = x25_seq_route_stop,
241 .show = x25_seq_route_show, 241 .show = x25_seq_route_show,
242}; 242};
243 243
244static struct seq_operations x25_seq_socket_ops = { 244static const struct seq_operations x25_seq_socket_ops = {
245 .start = x25_seq_socket_start, 245 .start = x25_seq_socket_start,
246 .next = x25_seq_socket_next, 246 .next = x25_seq_socket_next,
247 .stop = x25_seq_socket_stop, 247 .stop = x25_seq_socket_stop,
248 .show = x25_seq_socket_show, 248 .show = x25_seq_socket_show,
249}; 249};
250 250
251static struct seq_operations x25_seq_forward_ops = { 251static const struct seq_operations x25_seq_forward_ops = {
252 .start = x25_seq_forward_start, 252 .start = x25_seq_forward_start,
253 .next = x25_seq_forward_next, 253 .next = x25_seq_forward_next,
254 .stop = x25_seq_forward_stop, 254 .stop = x25_seq_forward_stop,
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index dfacb9c2a6e3..e070c3f938fb 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -686,6 +686,37 @@ out:
686 return x; 686 return x;
687} 687}
688 688
689struct xfrm_state *
690xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
691 unsigned short family, u8 mode, u8 proto, u32 reqid)
692{
693 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
694 struct xfrm_state *rx = NULL, *x = NULL;
695 struct hlist_node *entry;
696
697 spin_lock(&xfrm_state_lock);
698 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
699 if (x->props.family == family &&
700 x->props.reqid == reqid &&
701 !(x->props.flags & XFRM_STATE_WILDRECV) &&
702 xfrm_state_addr_check(x, daddr, saddr, family) &&
703 mode == x->props.mode &&
704 proto == x->id.proto &&
705 x->km.state == XFRM_STATE_VALID) {
706 rx = x;
707 break;
708 }
709 }
710
711 if (rx)
712 xfrm_state_hold(rx);
713 spin_unlock(&xfrm_state_lock);
714
715
716 return rx;
717}
718EXPORT_SYMBOL(xfrm_stateonly_find);
719
689static void __xfrm_state_insert(struct xfrm_state *x) 720static void __xfrm_state_insert(struct xfrm_state *x)
690{ 721{
691 unsigned int h; 722 unsigned int h;