aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 14:38:13 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 14:38:13 -0500
commitcb18eccff48ef3986d1072964590bce6fec705fb (patch)
tree777fb1d15e0281341e1e02c9803d989538d346f2 /net
parentc827ba4cb49a30ce581201fd0ba2be77cde412c7 (diff)
parent5ef213f6842277ee1df5659f59fac0ffc9beb411 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (45 commits) [IPV4]: Restore multipath routing after rt_next changes. [XFRM] IPV6: Fix outbound RO transformation which is broken by IPsec tunnel patch. [NET]: Reorder fields of struct dst_entry [DECNET]: Convert decnet route to use the new dst_entry 'next' pointer [IPV6]: Convert ipv6 route to use the new dst_entry 'next' pointer [IPV4]: Convert ipv4 route to use the new dst_entry 'next' pointer [NET]: Introduce union in struct dst_entry to hold 'next' pointer [DECNET]: fix misannotation of linkinfo_dn [DECNET]: FRA_{DST,SRC} are le16 for decnet [UDP]: UDP can use sk_hash to speedup lookups [NET]: Fix whitespace errors. [NET] XFRM: Fix whitespace errors. [NET] X25: Fix whitespace errors. [NET] WANROUTER: Fix whitespace errors. [NET] UNIX: Fix whitespace errors. [NET] TIPC: Fix whitespace errors. [NET] SUNRPC: Fix whitespace errors. [NET] SCTP: Fix whitespace errors. [NET] SCHED: Fix whitespace errors. [NET] RXRPC: Fix whitespace errors. ...
Diffstat (limited to 'net')
-rw-r--r--net/802/fc.c24
-rw-r--r--net/802/fddi.c26
-rw-r--r--net/802/hippi.c20
-rw-r--r--net/802/psnap.c2
-rw-r--r--net/802/tr.c136
-rw-r--r--net/8021q/vlan.c52
-rw-r--r--net/8021q/vlan.h12
-rw-r--r--net/8021q/vlan_dev.c42
-rw-r--r--net/8021q/vlanproc.c36
-rw-r--r--net/appletalk/aarp.c14
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/appletalk/ddp.c218
-rw-r--r--net/appletalk/dev.c8
-rw-r--r--net/atm/atm_sysfs.c14
-rw-r--r--net/atm/br2684.c4
-rw-r--r--net/atm/common.c34
-rw-r--r--net/atm/common.h2
-rw-r--r--net/atm/ioctl.c2
-rw-r--r--net/atm/lec.c20
-rw-r--r--net/atm/lec.h4
-rw-r--r--net/atm/mpc.c130
-rw-r--r--net/atm/mpc.h44
-rw-r--r--net/atm/mpoa_caches.c4
-rw-r--r--net/atm/mpoa_caches.h98
-rw-r--r--net/atm/mpoa_proc.c88
-rw-r--r--net/atm/proc.c58
-rw-r--r--net/atm/pvc.c4
-rw-r--r--net/atm/raw.c6
-rw-r--r--net/atm/resources.c16
-rw-r--r--net/atm/signaling.h4
-rw-r--r--net/atm/svc.c48
-rw-r--r--net/ax25/af_ax25.c114
-rw-r--r--net/ax25/ax25_addr.c6
-rw-r--r--net/ax25/ax25_ip.c94
-rw-r--r--net/ax25/ax25_route.c8
-rw-r--r--net/ax25/ax25_std_timer.c2
-rw-r--r--net/ax25/ax25_subr.c4
-rw-r--r--net/ax25/ax25_uid.c2
-rw-r--r--net/bluetooth/af_bluetooth.c16
-rw-r--r--net/bluetooth/bnep/bnep.h18
-rw-r--r--net/bluetooth/bnep/core.c52
-rw-r--r--net/bluetooth/bnep/netdev.c34
-rw-r--r--net/bluetooth/bnep/sock.c26
-rw-r--r--net/bluetooth/cmtp/capi.c14
-rw-r--r--net/bluetooth/cmtp/cmtp.h12
-rw-r--r--net/bluetooth/cmtp/core.c14
-rw-r--r--net/bluetooth/cmtp/sock.c14
-rw-r--r--net/bluetooth/hci_conn.c20
-rw-r--r--net/bluetooth/hci_core.c36
-rw-r--r--net/bluetooth/hci_event.c26
-rw-r--r--net/bluetooth/hci_sock.c28
-rw-r--r--net/bluetooth/hidp/core.c14
-rw-r--r--net/bluetooth/hidp/hidp.h12
-rw-r--r--net/bluetooth/hidp/sock.c14
-rw-r--r--net/bluetooth/l2cap.c26
-rw-r--r--net/bluetooth/lib.c12
-rw-r--r--net/bluetooth/rfcomm/core.c56
-rw-r--r--net/bluetooth/rfcomm/sock.c20
-rw-r--r--net/bluetooth/rfcomm/tty.c46
-rw-r--r--net/bluetooth/sco.c30
-rw-r--r--net/bridge/br_device.c8
-rw-r--r--net/bridge/br_fdb.c34
-rw-r--r--net/bridge/br_forward.c4
-rw-r--r--net/bridge/br_if.c26
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_ioctl.c18
-rw-r--r--net/bridge/br_netfilter.c14
-rw-r--r--net/bridge/br_notify.c10
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_stp.c24
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c4
-rw-r--r--net/bridge/br_stp_timer.c20
-rw-r--r--net/bridge/br_sysfs_br.c8
-rw-r--r--net/bridge/netfilter/ebt_802_3.c6
-rw-r--r--net/bridge/netfilter/ebt_among.c6
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c2
-rw-r--r--net/bridge/netfilter/ebt_ip.c8
-rw-r--r--net/bridge/netfilter/ebt_log.c6
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c2
-rw-r--r--net/bridge/netfilter/ebt_stp.c4
-rw-r--r--net/bridge/netfilter/ebt_ulog.c16
-rw-r--r--net/bridge/netfilter/ebt_vlan.c6
-rw-r--r--net/bridge/netfilter/ebtable_filter.c4
-rw-r--r--net/bridge/netfilter/ebtables.c16
-rw-r--r--net/compat.c16
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c96
-rw-r--r--net/core/dev_mcast.c28
-rw-r--r--net/core/dst.c6
-rw-r--r--net/core/ethtool.c10
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/gen_estimator.c6
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/iovec.c10
-rw-r--r--net/core/link_watch.c4
-rw-r--r--net/core/neighbour.c50
-rw-r--r--net/core/net-sysfs.c10
-rw-r--r--net/core/netpoll.c18
-rw-r--r--net/core/pktgen.c62
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/scm.c12
-rw-r--r--net/core/skbuff.c14
-rw-r--r--net/core/sock.c148
-rw-r--r--net/core/stream.c4
-rw-r--r--net/core/user_dma.c8
-rw-r--r--net/core/utils.c4
-rw-r--r--net/core/wireless.c20
-rw-r--r--net/dccp/ccids/ccid2.c68
-rw-r--r--net/dccp/ccids/ccid3.c14
-rw-r--r--net/dccp/ccids/lib/packet_history.h4
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c18
-rw-r--r--net/dccp/dccp.h8
-rw-r--r--net/dccp/feat.c8
-rw-r--r--net/dccp/feat.h2
-rw-r--r--net/dccp/input.c14
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/dccp/options.c4
-rw-r--r--net/dccp/output.c4
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/decnet/af_decnet.c130
-rw-r--r--net/decnet/dn_dev.c102
-rw-r--r--net/decnet/dn_fib.c132
-rw-r--r--net/decnet/dn_neigh.c24
-rw-r--r--net/decnet/dn_nsp_in.c74
-rw-r--r--net/decnet/dn_nsp_out.c56
-rw-r--r--net/decnet/dn_route.c204
-rw-r--r--net/decnet/dn_table.c238
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c2
-rw-r--r--net/decnet/sysctl_net_decnet.c84
-rw-r--r--net/econet/af_econet.c64
-rw-r--r--net/ethernet/eth.c8
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c8
-rw-r--r--net/ieee80211/ieee80211_rx.c6
-rw-r--r--net/ieee80211/ieee80211_tx.c6
-rw-r--r--net/ieee80211/ieee80211_wx.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c20
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c78
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_event.c12
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c64
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c48
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h12
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c14
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c18
-rw-r--r--net/ipv4/af_inet.c72
-rw-r--r--net/ipv4/ah4.c20
-rw-r--r--net/ipv4/arp.c66
-rw-r--r--net/ipv4/datagram.c14
-rw-r--r--net/ipv4/devinet.c46
-rw-r--r--net/ipv4/esp4.c8
-rw-r--r--net/ipv4/fib_frontend.c22
-rw-r--r--net/ipv4/fib_hash.c10
-rw-r--r--net/ipv4/fib_semantics.c14
-rw-r--r--net/ipv4/fib_trie.c98
-rw-r--r--net/ipv4/icmp.c16
-rw-r--r--net/ipv4/igmp.c46
-rw-r--r--net/ipv4/inet_connection_sock.c6
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_hashtables.c104
-rw-r--r--net/ipv4/ip_forward.c22
-rw-r--r--net/ipv4/ip_fragment.c18
-rw-r--r--net/ipv4/ip_gre.c16
-rw-r--r--net/ipv4/ip_input.c28
-rw-r--r--net/ipv4/ip_options.c14
-rw-r--r--net/ipv4/ip_output.c42
-rw-r--r--net/ipv4/ip_sockglue.c82
-rw-r--r--net/ipv4/ipcomp.c32
-rw-r--r--net/ipv4/ipconfig.c30
-rw-r--r--net/ipv4/ipip.c14
-rw-r--r--net/ipv4/ipmr.c126
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c10
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c20
-rw-r--r--net/ipv4/ipvs/ip_vs_ftp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c12
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c8
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c2
-rw-r--r--net/ipv4/multipath_drr.c6
-rw-r--r--net/ipv4/multipath_random.c4
-rw-r--r--net/ipv4/multipath_rr.c2
-rw-r--r--net/ipv4/multipath_wrandom.c6
-rw-r--r--net/ipv4/netfilter.c6
-rw-r--r--net/ipv4/netfilter/arp_tables.c6
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_amanda.c4
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c76
-rw-r--r--net/ipv4/netfilter/ip_conntrack_ftp.c12
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_pptp.c6
-rw-r--r--net/ipv4/netfilter/ip_conntrack_irc.c18
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netbios_ns.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c118
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_icmp.c12
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_sctp.c94
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c276
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_udp.c8
-rw-r--r--net/ipv4/netfilter/ip_conntrack_sip.c10
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c34
-rw-r--r--net/ipv4/netfilter/ip_conntrack_tftp.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c22
-rw-r--r--net/ipv4/netfilter/ip_nat_ftp.c6
-rw-r--r--net/ipv4/netfilter/ip_nat_helper.c34
-rw-r--r--net/ipv4/netfilter/ip_nat_helper_pptp.c10
-rw-r--r--net/ipv4/netfilter/ip_nat_irc.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_rule.c6
-rw-r--r--net/ipv4/netfilter/ip_nat_sip.c14
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c362
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c22
-rw-r--r--net/ipv4/netfilter/ip_queue.c150
-rw-r--r--net/ipv4/netfilter/ip_tables.c10
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c42
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c10
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c10
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c4
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c4
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c2
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c48
-rw-r--r--net/ipv4/netfilter/ipt_SAME.c16
-rw-r--r--net/ipv4/netfilter/ipt_TOS.c4
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c8
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c30
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c2
-rw-r--r--net/ipv4/netfilter/ipt_ah.c4
-rw-r--r--net/ipv4/netfilter/ipt_iprange.c4
-rw-r--r--net/ipv4/netfilter/ipt_owner.c6
-rw-r--r--net/ipv4/netfilter/ipt_ttl.c4
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c6
-rw-r--r--net/ipv4/netfilter/iptable_raw.c26
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c24
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c8
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c30
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c98
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c20
-rw-r--r--net/ipv4/proc.c10
-rw-r--r--net/ipv4/protocol.c2
-rw-r--r--net/ipv4/raw.c26
-rw-r--r--net/ipv4/route.c164
-rw-r--r--net/ipv4/syncookies.c58
-rw-r--r--net/ipv4/sysctl_net_ipv4.c20
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_cong.c38
-rw-r--r--net/ipv4/tcp_cubic.c50
-rw-r--r--net/ipv4/tcp_highspeed.c4
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_input.c116
-rw-r--r--net/ipv4/tcp_ipv4.c20
-rw-r--r--net/ipv4/tcp_minisocks.c12
-rw-r--r--net/ipv4/tcp_output.c62
-rw-r--r--net/ipv4/tcp_timer.c12
-rw-r--r--net/ipv4/tcp_vegas.c4
-rw-r--r--net/ipv4/tcp_westwood.c24
-rw-r--r--net/ipv4/udp.c109
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/xfrm4_input.c6
-rw-r--r--net/ipv4/xfrm4_output.c8
-rw-r--r--net/ipv4/xfrm4_policy.c8
-rw-r--r--net/ipv4/xfrm4_tunnel.c2
-rw-r--r--net/ipv6/addrconf.c198
-rw-r--r--net/ipv6/af_inet6.c40
-rw-r--r--net/ipv6/ah6.c40
-rw-r--r--net/ipv6/anycast.c4
-rw-r--r--net/ipv6/datagram.c52
-rw-r--r--net/ipv6/esp6.c30
-rw-r--r--net/ipv6/exthdrs.c14
-rw-r--r--net/ipv6/exthdrs_core.c16
-rw-r--r--net/ipv6/icmp.c38
-rw-r--r--net/ipv6/inet6_hashtables.c98
-rw-r--r--net/ipv6/ip6_fib.c66
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_input.c18
-rw-r--r--net/ipv6/ip6_output.c24
-rw-r--r--net/ipv6/ip6_tunnel.c84
-rw-r--r--net/ipv6/ipcomp6.c22
-rw-r--r--net/ipv6/ipv6_sockglue.c12
-rw-r--r--net/ipv6/mcast.c36
-rw-r--r--net/ipv6/mip6.c2
-rw-r--r--net/ipv6/ndisc.c138
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c152
-rw-r--r--net/ipv6/netfilter/ip6_tables.c40
-rw-r--r--net/ipv6/netfilter/ip6t_HL.c12
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c10
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c36
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c22
-rw-r--r--net/ipv6/netfilter/ip6t_hl.c2
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c2
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c10
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c16
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c50
-rw-r--r--net/ipv6/proc.c22
-rw-r--r--net/ipv6/protocol.c2
-rw-r--r--net/ipv6/raw.c78
-rw-r--r--net/ipv6/reassembly.c32
-rw-r--r--net/ipv6/route.c108
-rw-r--r--net/ipv6/sit.c10
-rw-r--r--net/ipv6/sysctl_net_ipv6.c4
-rw-r--r--net/ipv6/tcp_ipv6.c98
-rw-r--r--net/ipv6/udp.c64
-rw-r--r--net/ipv6/udp_impl.h6
-rw-r--r--net/ipv6/xfrm6_input.c18
-rw-r--r--net/ipv6/xfrm6_output.c8
-rw-r--r--net/ipv6/xfrm6_policy.c14
-rw-r--r--net/ipv6/xfrm6_state.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c22
-rw-r--r--net/ipx/af_ipx.c84
-rw-r--r--net/ipx/ipx_proc.c2
-rw-r--r--net/ipx/ipx_route.c4
-rw-r--r--net/ipx/sysctl_net_ipx.c2
-rw-r--r--net/irda/af_irda.c12
-rw-r--r--net/irda/discovery.c58
-rw-r--r--net/irda/ircomm/ircomm_core.c98
-rw-r--r--net/irda/ircomm/ircomm_event.c72
-rw-r--r--net/irda/ircomm/ircomm_lmp.c110
-rw-r--r--net/irda/ircomm/ircomm_param.c94
-rw-r--r--net/irda/ircomm/ircomm_ttp.c88
-rw-r--r--net/irda/ircomm/ircomm_tty.c288
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c236
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c86
-rw-r--r--net/irda/irda_device.c22
-rw-r--r--net/irda/iriap.c20
-rw-r--r--net/irda/irias_object.c6
-rw-r--r--net/irda/irlan/irlan_client.c150
-rw-r--r--net/irda/irlan/irlan_client_event.c190
-rw-r--r--net/irda/irlan/irlan_common.c318
-rw-r--r--net/irda/irlan/irlan_eth.c102
-rw-r--r--net/irda/irlan/irlan_event.c26
-rw-r--r--net/irda/irlan/irlan_filter.c48
-rw-r--r--net/irda/irlan/irlan_provider.c102
-rw-r--r--net/irda/irlan/irlan_provider_event.c68
-rw-r--r--net/irda/irlap.c14
-rw-r--r--net/irda/irlap_event.c6
-rw-r--r--net/irda/irlap_frame.c6
-rw-r--r--net/irda/irlmp.c16
-rw-r--r--net/irda/irlmp_event.c2
-rw-r--r--net/irda/irlmp_frame.c122
-rw-r--r--net/irda/irmod.c42
-rw-r--r--net/irda/irnet/irnet.h4
-rw-r--r--net/irda/irnet/irnet_irda.c38
-rw-r--r--net/irda/irnet/irnet_irda.h2
-rw-r--r--net/irda/irnet/irnet_ppp.c10
-rw-r--r--net/irda/irproc.c40
-rw-r--r--net/irda/irqueue.c106
-rw-r--r--net/irda/irsysctl.c24
-rw-r--r--net/irda/irttp.c40
-rw-r--r--net/irda/parameters.c2
-rw-r--r--net/irda/qos.c150
-rw-r--r--net/irda/timer.c62
-rw-r--r--net/irda/wrapper.c2
-rw-r--r--net/key/af_key.c156
-rw-r--r--net/lapb/lapb_iface.c6
-rw-r--r--net/lapb/lapb_out.c4
-rw-r--r--net/lapb/lapb_subr.c16
-rw-r--r--net/lapb/lapb_timer.c2
-rw-r--r--net/llc/af_llc.c18
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/llc/llc_input.c8
-rw-r--r--net/llc/llc_output.c2
-rw-r--r--net/llc/llc_pdu.c2
-rw-r--r--net/llc/llc_proc.c16
-rw-r--r--net/llc/llc_s_st.c2
-rw-r--r--net/llc/llc_sap.c6
-rw-r--r--net/llc/llc_station.c22
-rw-r--r--net/llc/sysctl_net_llc.c8
-rw-r--r--net/netlabel/netlabel_cipso_v4.c8
-rw-r--r--net/netlink/af_netlink.c44
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/netrom/af_netrom.c38
-rw-r--r--net/netrom/nr_route.c18
-rw-r--r--net/netrom/nr_subr.c2
-rw-r--r--net/netrom/nr_timer.c2
-rw-r--r--net/netrom/sysctl_net_netrom.c26
-rw-r--r--net/packet/af_packet.c78
-rw-r--r--net/rose/af_rose.c12
-rw-r--r--net/rose/rose_route.c36
-rw-r--r--net/rose/sysctl_net_rose.c20
-rw-r--r--net/rxrpc/connection.c10
-rw-r--r--net/rxrpc/internal.h2
-rw-r--r--net/rxrpc/krxiod.c2
-rw-r--r--net/rxrpc/krxsecd.c2
-rw-r--r--net/rxrpc/main.c44
-rw-r--r--net/rxrpc/sysctl.c12
-rw-r--r--net/rxrpc/transport.c6
-rw-r--r--net/sched/act_api.c34
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ipt.c4
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c12
-rw-r--r--net/sched/act_simple.c6
-rw-r--r--net/sched/cls_api.c14
-rw-r--r--net/sched/cls_basic.c6
-rw-r--r--net/sched/cls_fw.c4
-rw-r--r--net/sched/cls_rsvp.h8
-rw-r--r--net/sched/cls_tcindex.c12
-rw-r--r--net/sched/cls_u32.c4
-rw-r--r--net/sched/em_cmp.c2
-rw-r--r--net/sched/em_meta.c12
-rw-r--r--net/sched/em_nbyte.c4
-rw-r--r--net/sched/em_text.c4
-rw-r--r--net/sched/em_u32.c6
-rw-r--r--net/sched/ematch.c12
-rw-r--r--net/sched/sch_api.c18
-rw-r--r--net/sched/sch_atm.c22
-rw-r--r--net/sched/sch_cbq.c32
-rw-r--r--net/sched/sch_dsmark.c16
-rw-r--r--net/sched/sch_generic.c28
-rw-r--r--net/sched/sch_gred.c4
-rw-r--r--net/sched/sch_hfsc.c26
-rw-r--r--net/sched/sch_htb.c28
-rw-r--r--net/sched/sch_ingress.c30
-rw-r--r--net/sched/sch_netem.c26
-rw-r--r--net/sched/sch_prio.c6
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c6
-rw-r--r--net/sched/sch_tbf.c6
-rw-r--r--net/sched/sch_teql.c8
-rw-r--r--net/sctp/associola.c10
-rw-r--r--net/sctp/bind_addr.c12
-rw-r--r--net/sctp/crc32c.c38
-rw-r--r--net/sctp/debug.c32
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/input.c16
-rw-r--r--net/sctp/inqueue.c34
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/objcnt.c34
-rw-r--r--net/sctp/output.c12
-rw-r--r--net/sctp/outqueue.c30
-rw-r--r--net/sctp/primitive.c4
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c24
-rw-r--r--net/sctp/sm_make_chunk.c88
-rw-r--r--net/sctp/sm_sideeffect.c68
-rw-r--r--net/sctp/sm_statefuns.c110
-rw-r--r--net/sctp/sm_statetable.c2
-rw-r--r--net/sctp/socket.c114
-rw-r--r--net/sctp/transport.c14
-rw-r--r--net/sctp/tsnmap.c2
-rw-r--r--net/sctp/ulpevent.c22
-rw-r--r--net/sctp/ulpqueue.c60
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c20
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c10
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c4
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c48
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c6
-rw-r--r--net/sunrpc/cache.c64
-rw-r--r--net/sunrpc/clnt.c28
-rw-r--r--net/sunrpc/pmap_clnt.c4
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/svc.c24
-rw-r--r--net/sunrpc/svcauth.c4
-rw-r--r--net/sunrpc/svcauth_unix.c12
-rw-r--r--net/sunrpc/svcsock.c36
-rw-r--r--net/sunrpc/sysctl.c10
-rw-r--r--net/sunrpc/xdr.c6
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/tipc/addr.c14
-rw-r--r--net/tipc/addr.h6
-rw-r--r--net/tipc/bcast.c98
-rw-r--r--net/tipc/bcast.h6
-rw-r--r--net/tipc/bearer.c58
-rw-r--r--net/tipc/bearer.h22
-rw-r--r--net/tipc/cluster.c22
-rw-r--r--net/tipc/cluster.h4
-rw-r--r--net/tipc/config.c16
-rw-r--r--net/tipc/config.h8
-rw-r--r--net/tipc/core.c18
-rw-r--r--net/tipc/core.h40
-rw-r--r--net/tipc/dbg.c56
-rw-r--r--net/tipc/dbg.h2
-rw-r--r--net/tipc/discover.c50
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/eth_media.c58
-rw-r--r--net/tipc/handler.c6
-rw-r--r--net/tipc/link.c394
-rw-r--r--net/tipc/link.h32
-rw-r--r--net/tipc/msg.c2
-rw-r--r--net/tipc/msg.h138
-rw-r--r--net/tipc/name_distr.c64
-rw-r--r--net/tipc/name_distr.h2
-rw-r--r--net/tipc/name_table.c106
-rw-r--r--net/tipc/name_table.h10
-rw-r--r--net/tipc/net.c74
-rw-r--r--net/tipc/net.h4
-rw-r--r--net/tipc/netlink.c16
-rw-r--r--net/tipc/node.c220
-rw-r--r--net/tipc/node.h8
-rw-r--r--net/tipc/node_subscr.c6
-rw-r--r--net/tipc/node_subscr.h2
-rw-r--r--net/tipc/port.c226
-rw-r--r--net/tipc/port.h28
-rw-r--r--net/tipc/ref.c24
-rw-r--r--net/tipc/ref.h8
-rw-r--r--net/tipc/socket.c426
-rw-r--r--net/tipc/subscr.c82
-rw-r--r--net/tipc/subscr.h16
-rw-r--r--net/tipc/user_reg.c22
-rw-r--r--net/tipc/user_reg.h2
-rw-r--r--net/tipc/zone.c4
-rw-r--r--net/tipc/zone.h4
-rw-r--r--net/unix/af_unix.c46
-rw-r--r--net/unix/garbage.c14
-rw-r--r--net/wanrouter/af_wanpipe.c514
-rw-r--r--net/wanrouter/wanmain.c18
-rw-r--r--net/wanrouter/wanproc.c4
-rw-r--r--net/x25/af_x25.c64
-rw-r--r--net/x25/sysctl_net_x25.c10
-rw-r--r--net/x25/x25_dev.c6
-rw-r--r--net/x25/x25_facilities.c10
-rw-r--r--net/x25/x25_in.c8
-rw-r--r--net/x25/x25_link.c6
-rw-r--r--net/x25/x25_out.c10
-rw-r--r--net/x25/x25_proc.c8
-rw-r--r--net/x25/x25_route.c2
-rw-r--r--net/x25/x25_timer.c6
-rw-r--r--net/xfrm/xfrm_algo.c94
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_policy.c40
-rw-r--r--net/xfrm/xfrm_state.c18
-rw-r--r--net/xfrm/xfrm_user.c6
535 files changed, 9037 insertions, 9034 deletions
diff --git a/net/802/fc.c b/net/802/fc.c
index 2a27e37bc4cb..d64e6a502958 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * NET3: Fibre Channel device handling subroutines 2 * NET3: Fibre Channel device handling subroutines
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
@@ -31,18 +31,18 @@
31#include <net/arp.h> 31#include <net/arp.h>
32 32
33/* 33/*
34 * Put the headers on a Fibre Channel packet. 34 * Put the headers on a Fibre Channel packet.
35 */ 35 */
36 36
37static int fc_header(struct sk_buff *skb, struct net_device *dev, 37static int fc_header(struct sk_buff *skb, struct net_device *dev,
38 unsigned short type, 38 unsigned short type,
39 void *daddr, void *saddr, unsigned len) 39 void *daddr, void *saddr, unsigned len)
40{ 40{
41 struct fch_hdr *fch; 41 struct fch_hdr *fch;
42 int hdr_len; 42 int hdr_len;
43 43
44 /* 44 /*
45 * Add the 802.2 SNAP header if IP as the IPv4 code calls 45 * Add the 802.2 SNAP header if IP as the IPv4 code calls
46 * dev->hard_header directly. 46 * dev->hard_header directly.
47 */ 47 */
48 if (type == ETH_P_IP || type == ETH_P_ARP) 48 if (type == ETH_P_IP || type == ETH_P_ARP)
@@ -60,7 +60,7 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev,
60 else 60 else
61 { 61 {
62 hdr_len = sizeof(struct fch_hdr); 62 hdr_len = sizeof(struct fch_hdr);
63 fch = (struct fch_hdr *)skb_push(skb, hdr_len); 63 fch = (struct fch_hdr *)skb_push(skb, hdr_len);
64 } 64 }
65 65
66 if(saddr) 66 if(saddr)
@@ -68,20 +68,20 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev,
68 else 68 else
69 memcpy(fch->saddr,dev->dev_addr,dev->addr_len); 69 memcpy(fch->saddr,dev->dev_addr,dev->addr_len);
70 70
71 if(daddr) 71 if(daddr)
72 { 72 {
73 memcpy(fch->daddr,daddr,dev->addr_len); 73 memcpy(fch->daddr,daddr,dev->addr_len);
74 return(hdr_len); 74 return(hdr_len);
75 } 75 }
76 return -hdr_len; 76 return -hdr_len;
77} 77}
78 78
79/* 79/*
80 * A neighbour discovery of some species (eg arp) has completed. We 80 * A neighbour discovery of some species (eg arp) has completed. We
81 * can now send the packet. 81 * can now send the packet.
82 */ 82 */
83 83
84static int fc_rebuild_header(struct sk_buff *skb) 84static int fc_rebuild_header(struct sk_buff *skb)
85{ 85{
86 struct fch_hdr *fch=(struct fch_hdr *)skb->data; 86 struct fch_hdr *fch=(struct fch_hdr *)skb->data;
87 struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr)); 87 struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr));
@@ -100,7 +100,7 @@ static void fc_setup(struct net_device *dev)
100{ 100{
101 dev->hard_header = fc_header; 101 dev->hard_header = fc_header;
102 dev->rebuild_header = fc_rebuild_header; 102 dev->rebuild_header = fc_rebuild_header;
103 103
104 dev->type = ARPHRD_IEEE802; 104 dev->type = ARPHRD_IEEE802;
105 dev->hard_header_len = FC_HLEN; 105 dev->hard_header_len = FC_HLEN;
106 dev->mtu = 2024; 106 dev->mtu = 2024;
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 797c6d961deb..0b98fe2fa2f6 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -15,7 +15,7 @@
15 * Mark Evans, <evansmp@uhura.aston.ac.uk> 15 * Mark Evans, <evansmp@uhura.aston.ac.uk>
16 * Florian La Roche, <rzsfl@rz.uni-sb.de> 16 * Florian La Roche, <rzsfl@rz.uni-sb.de>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * 18 *
19 * This program is free software; you can redistribute it and/or 19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License 20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 21 * as published by the Free Software Foundation; either version
@@ -25,7 +25,7 @@
25 * Alan Cox : New arp/rebuild header 25 * Alan Cox : New arp/rebuild header
26 * Maciej W. Rozycki : IPv6 support 26 * Maciej W. Rozycki : IPv6 support
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <linux/types.h> 31#include <linux/types.h>
@@ -57,7 +57,7 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev,
57{ 57{
58 int hl = FDDI_K_SNAP_HLEN; 58 int hl = FDDI_K_SNAP_HLEN;
59 struct fddihdr *fddi; 59 struct fddihdr *fddi;
60 60
61 if(type != ETH_P_IP && type != ETH_P_IPV6 && type != ETH_P_ARP) 61 if(type != ETH_P_IP && type != ETH_P_IPV6 && type != ETH_P_ARP)
62 hl=FDDI_K_8022_HLEN-3; 62 hl=FDDI_K_8022_HLEN-3;
63 fddi = (struct fddihdr *)skb_push(skb, hl); 63 fddi = (struct fddihdr *)skb_push(skb, hl);
@@ -74,7 +74,7 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev,
74 } 74 }
75 75
76 /* Set the source and destination hardware addresses */ 76 /* Set the source and destination hardware addresses */
77 77
78 if (saddr != NULL) 78 if (saddr != NULL)
79 memcpy(fddi->saddr, saddr, dev->addr_len); 79 memcpy(fddi->saddr, saddr, dev->addr_len);
80 else 80 else
@@ -95,7 +95,7 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev,
95 * (or in future other address resolution) has completed on 95 * (or in future other address resolution) has completed on
96 * this sk_buff. We now let ARP fill in the other fields. 96 * this sk_buff. We now let ARP fill in the other fields.
97 */ 97 */
98 98
99static int fddi_rebuild_header(struct sk_buff *skb) 99static int fddi_rebuild_header(struct sk_buff *skb)
100{ 100{
101 struct fddihdr *fddi = (struct fddihdr *)skb->data; 101 struct fddihdr *fddi = (struct fddihdr *)skb->data;
@@ -105,7 +105,7 @@ static int fddi_rebuild_header(struct sk_buff *skb)
105 /* Try to get ARP to resolve the header and fill destination address */ 105 /* Try to get ARP to resolve the header and fill destination address */
106 return arp_find(fddi->daddr, skb); 106 return arp_find(fddi->daddr, skb);
107 else 107 else
108#endif 108#endif
109 { 109 {
110 printk("%s: Don't know how to resolve type %04X addresses.\n", 110 printk("%s: Don't know how to resolve type %04X addresses.\n",
111 skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype)); 111 skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype));
@@ -120,19 +120,19 @@ static int fddi_rebuild_header(struct sk_buff *skb)
120 * up. It's used to fill in specific skb fields and to set 120 * up. It's used to fill in specific skb fields and to set
121 * the proper pointer to the start of packet data (skb->data). 121 * the proper pointer to the start of packet data (skb->data).
122 */ 122 */
123 123
124__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev) 124__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
125{ 125{
126 struct fddihdr *fddi = (struct fddihdr *)skb->data; 126 struct fddihdr *fddi = (struct fddihdr *)skb->data;
127 __be16 type; 127 __be16 type;
128 128
129 /* 129 /*
130 * Set mac.raw field to point to FC byte, set data field to point 130 * Set mac.raw field to point to FC byte, set data field to point
131 * to start of packet data. Assume 802.2 SNAP frames for now. 131 * to start of packet data. Assume 802.2 SNAP frames for now.
132 */ 132 */
133 133
134 skb->mac.raw = skb->data; /* point to frame control (FC) */ 134 skb->mac.raw = skb->data; /* point to frame control (FC) */
135 135
136 if(fddi->hdr.llc_8022_1.dsap==0xe0) 136 if(fddi->hdr.llc_8022_1.dsap==0xe0)
137 { 137 {
138 skb_pull(skb, FDDI_K_8022_HLEN-3); 138 skb_pull(skb, FDDI_K_8022_HLEN-3);
@@ -143,9 +143,9 @@ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
143 skb_pull(skb, FDDI_K_SNAP_HLEN); /* adjust for 21 byte header */ 143 skb_pull(skb, FDDI_K_SNAP_HLEN); /* adjust for 21 byte header */
144 type=fddi->hdr.llc_snap.ethertype; 144 type=fddi->hdr.llc_snap.ethertype;
145 } 145 }
146 146
147 /* Set packet type based on destination address and flag settings */ 147 /* Set packet type based on destination address and flag settings */
148 148
149 if (*fddi->daddr & 0x01) 149 if (*fddi->daddr & 0x01)
150 { 150 {
151 if (memcmp(fddi->daddr, dev->broadcast, FDDI_K_ALEN) == 0) 151 if (memcmp(fddi->daddr, dev->broadcast, FDDI_K_ALEN) == 0)
@@ -153,7 +153,7 @@ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
153 else 153 else
154 skb->pkt_type = PACKET_MULTICAST; 154 skb->pkt_type = PACKET_MULTICAST;
155 } 155 }
156 156
157 else if (dev->flags & IFF_PROMISC) 157 else if (dev->flags & IFF_PROMISC)
158 { 158 {
159 if (memcmp(fddi->daddr, dev->dev_addr, FDDI_K_ALEN)) 159 if (memcmp(fddi->daddr, dev->dev_addr, FDDI_K_ALEN))
@@ -187,7 +187,7 @@ static void fddi_setup(struct net_device *dev)
187 dev->addr_len = FDDI_K_ALEN; 187 dev->addr_len = FDDI_K_ALEN;
188 dev->tx_queue_len = 100; /* Long queues on FDDI */ 188 dev->tx_queue_len = 100; /* Long queues on FDDI */
189 dev->flags = IFF_BROADCAST | IFF_MULTICAST; 189 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
190 190
191 memset(dev->broadcast, 0xFF, FDDI_K_ALEN); 191 memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
192} 192}
193 193
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 579e2ddf5ebe..be0da59323b3 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -39,7 +39,7 @@
39#include <asm/system.h> 39#include <asm/system.h>
40 40
41/* 41/*
42 * Create the HIPPI MAC header for an arbitrary protocol layer 42 * Create the HIPPI MAC header for an arbitrary protocol layer
43 * 43 *
44 * saddr=NULL means use device source address 44 * saddr=NULL means use device source address
45 * daddr=NULL means leave destination address (eg unresolved arp) 45 * daddr=NULL means leave destination address (eg unresolved arp)
@@ -104,8 +104,8 @@ static int hippi_rebuild_header(struct sk_buff *skb)
104 /* 104 /*
105 * Only IP is currently supported 105 * Only IP is currently supported
106 */ 106 */
107 107
108 if(hip->snap.ethertype != __constant_htons(ETH_P_IP)) 108 if(hip->snap.ethertype != __constant_htons(ETH_P_IP))
109 { 109 {
110 printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype)); 110 printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype));
111 return 0; 111 return 0;
@@ -122,11 +122,11 @@ static int hippi_rebuild_header(struct sk_buff *skb)
122/* 122/*
123 * Determine the packet's protocol ID. 123 * Determine the packet's protocol ID.
124 */ 124 */
125 125
126__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev) 126__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev)
127{ 127{
128 struct hippi_hdr *hip; 128 struct hippi_hdr *hip;
129 129
130 hip = (struct hippi_hdr *) skb->data; 130 hip = (struct hippi_hdr *) skb->data;
131 131
132 /* 132 /*
@@ -173,10 +173,10 @@ static int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
173{ 173{
174 /* Never send broadcast/multicast ARP messages */ 174 /* Never send broadcast/multicast ARP messages */
175 p->mcast_probes = 0; 175 p->mcast_probes = 0;
176 176
177 /* In IPv6 unicast probes are valid even on NBMA, 177 /* In IPv6 unicast probes are valid even on NBMA,
178 * because they are encapsulated in normal IPv6 protocol. 178 * because they are encapsulated in normal IPv6 protocol.
179 * Should be a generic flag. 179 * Should be a generic flag.
180 */ 180 */
181 if (p->tbl->family != AF_INET6) 181 if (p->tbl->family != AF_INET6)
182 p->ucast_probes = 0; 182 p->ucast_probes = 0;
@@ -193,7 +193,7 @@ static void hippi_setup(struct net_device *dev)
193 dev->hard_header_parse = NULL; 193 dev->hard_header_parse = NULL;
194 dev->hard_header_cache = NULL; 194 dev->hard_header_cache = NULL;
195 dev->header_cache_update = NULL; 195 dev->header_cache_update = NULL;
196 dev->neigh_setup = hippi_neigh_setup_dev; 196 dev->neigh_setup = hippi_neigh_setup_dev;
197 197
198 /* 198 /*
199 * We don't support HIPPI `ARP' for the time being, and probably 199 * We don't support HIPPI `ARP' for the time being, and probably
@@ -210,9 +210,9 @@ static void hippi_setup(struct net_device *dev)
210 210
211 /* 211 /*
212 * HIPPI doesn't support broadcast+multicast and we only use 212 * HIPPI doesn't support broadcast+multicast and we only use
213 * static ARP tables. ARP is disabled by hippi_neigh_setup_dev. 213 * static ARP tables. ARP is disabled by hippi_neigh_setup_dev.
214 */ 214 */
215 dev->flags = 0; 215 dev->flags = 0;
216} 216}
217 217
218/** 218/**
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 270b9d2cae65..6e7c2120b83f 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -117,7 +117,7 @@ module_exit(snap_exit);
117 */ 117 */
118struct datalink_proto *register_snap_client(unsigned char *desc, 118struct datalink_proto *register_snap_client(unsigned char *desc,
119 int (*rcvfunc)(struct sk_buff *, 119 int (*rcvfunc)(struct sk_buff *,
120 struct net_device *, 120 struct net_device *,
121 struct packet_type *, 121 struct packet_type *,
122 struct net_device *)) 122 struct net_device *))
123{ 123{
diff --git a/net/802/tr.c b/net/802/tr.c
index 829deb41ce81..31509f613401 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * NET3: Token ring device handling subroutines 2 * NET3: Token ring device handling subroutines
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
@@ -12,7 +12,7 @@
12 * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged 12 * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13 * tr_header and tr_type_trans to handle passing IPX SNAP and 13 * tr_header and tr_type_trans to handle passing IPX SNAP and
14 * 802.2 through the correct layers. Eliminated tr_reformat. 14 * 802.2 through the correct layers. Eliminated tr_reformat.
15 * 15 *
16 */ 16 */
17 17
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
@@ -45,7 +45,7 @@ static void rif_check_expire(unsigned long dummy);
45/* 45/*
46 * Each RIF entry we learn is kept this way 46 * Each RIF entry we learn is kept this way
47 */ 47 */
48 48
49struct rif_cache { 49struct rif_cache {
50 unsigned char addr[TR_ALEN]; 50 unsigned char addr[TR_ALEN];
51 int iface; 51 int iface;
@@ -62,7 +62,7 @@ struct rif_cache {
62 * We hash the RIF cache 32 ways. We do after all have to look it 62 * We hash the RIF cache 32 ways. We do after all have to look it
63 * up a lot. 63 * up a lot.
64 */ 64 */
65 65
66static struct rif_cache *rif_table[RIF_TABLE_SIZE]; 66static struct rif_cache *rif_table[RIF_TABLE_SIZE];
67 67
68static DEFINE_SPINLOCK(rif_lock); 68static DEFINE_SPINLOCK(rif_lock);
@@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(rif_lock);
71/* 71/*
72 * Garbage disposal timer. 72 * Garbage disposal timer.
73 */ 73 */
74 74
75static struct timer_list rif_timer; 75static struct timer_list rif_timer;
76 76
77int sysctl_tr_rif_timeout = 60*10*HZ; 77int sysctl_tr_rif_timeout = 60*10*HZ;
@@ -96,16 +96,16 @@ static inline unsigned long rif_hash(const unsigned char *addr)
96 * Put the headers on a token ring packet. Token ring source routing 96 * Put the headers on a token ring packet. Token ring source routing
97 * makes this a little more exciting than on ethernet. 97 * makes this a little more exciting than on ethernet.
98 */ 98 */
99 99
100static int tr_header(struct sk_buff *skb, struct net_device *dev, 100static int tr_header(struct sk_buff *skb, struct net_device *dev,
101 unsigned short type, 101 unsigned short type,
102 void *daddr, void *saddr, unsigned len) 102 void *daddr, void *saddr, unsigned len)
103{ 103{
104 struct trh_hdr *trh; 104 struct trh_hdr *trh;
105 int hdr_len; 105 int hdr_len;
106 106
107 /* 107 /*
108 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls 108 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
109 * dev->hard_header directly. 109 * dev->hard_header directly.
110 */ 110 */
111 if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP) 111 if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
@@ -123,7 +123,7 @@ static int tr_header(struct sk_buff *skb, struct net_device *dev,
123 else 123 else
124 { 124 {
125 hdr_len = sizeof(struct trh_hdr); 125 hdr_len = sizeof(struct trh_hdr);
126 trh = (struct trh_hdr *)skb_push(skb, hdr_len); 126 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
127 } 127 }
128 128
129 trh->ac=AC; 129 trh->ac=AC;
@@ -137,8 +137,8 @@ static int tr_header(struct sk_buff *skb, struct net_device *dev,
137 /* 137 /*
138 * Build the destination and then source route the frame 138 * Build the destination and then source route the frame
139 */ 139 */
140 140
141 if(daddr) 141 if(daddr)
142 { 142 {
143 memcpy(trh->daddr,daddr,dev->addr_len); 143 memcpy(trh->daddr,daddr,dev->addr_len);
144 tr_source_route(skb,trh,dev); 144 tr_source_route(skb,trh,dev);
@@ -147,13 +147,13 @@ static int tr_header(struct sk_buff *skb, struct net_device *dev,
147 147
148 return -hdr_len; 148 return -hdr_len;
149} 149}
150 150
151/* 151/*
152 * A neighbour discovery of some species (eg arp) has completed. We 152 * A neighbour discovery of some species (eg arp) has completed. We
153 * can now send the packet. 153 * can now send the packet.
154 */ 154 */
155 155
156static int tr_rebuild_header(struct sk_buff *skb) 156static int tr_rebuild_header(struct sk_buff *skb)
157{ 157{
158 struct trh_hdr *trh=(struct trh_hdr *)skb->data; 158 struct trh_hdr *trh=(struct trh_hdr *)skb->data;
159 struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); 159 struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
@@ -162,7 +162,7 @@ static int tr_rebuild_header(struct sk_buff *skb)
162 /* 162 /*
163 * FIXME: We don't yet support IPv6 over token rings 163 * FIXME: We don't yet support IPv6 over token rings
164 */ 164 */
165 165
166 if(trllc->ethertype != htons(ETH_P_IP)) { 166 if(trllc->ethertype != htons(ETH_P_IP)) {
167 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype)); 167 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
168 return 0; 168 return 0;
@@ -172,39 +172,39 @@ static int tr_rebuild_header(struct sk_buff *skb)
172 if(arp_find(trh->daddr, skb)) { 172 if(arp_find(trh->daddr, skb)) {
173 return 1; 173 return 1;
174 } 174 }
175 else 175 else
176#endif 176#endif
177 { 177 {
178 tr_source_route(skb,trh,dev); 178 tr_source_route(skb,trh,dev);
179 return 0; 179 return 0;
180 } 180 }
181} 181}
182 182
183/* 183/*
184 * Some of this is a bit hackish. We intercept RIF information 184 * Some of this is a bit hackish. We intercept RIF information
185 * used for source routing. We also grab IP directly and don't feed 185 * used for source routing. We also grab IP directly and don't feed
186 * it via SNAP. 186 * it via SNAP.
187 */ 187 */
188 188
189__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev) 189__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
190{ 190{
191 191
192 struct trh_hdr *trh=(struct trh_hdr *)skb->data; 192 struct trh_hdr *trh=(struct trh_hdr *)skb->data;
193 struct trllc *trllc; 193 struct trllc *trllc;
194 unsigned riflen=0; 194 unsigned riflen=0;
195 195
196 skb->mac.raw = skb->data; 196 skb->mac.raw = skb->data;
197 197
198 if(trh->saddr[0] & TR_RII) 198 if(trh->saddr[0] & TR_RII)
199 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; 199 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
200 200
201 trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); 201 trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
202 202
203 skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); 203 skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
204 204
205 if(*trh->daddr & 0x80) 205 if(*trh->daddr & 0x80)
206 { 206 {
207 if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN)) 207 if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
208 skb->pkt_type=PACKET_BROADCAST; 208 skb->pkt_type=PACKET_BROADCAST;
209 else 209 else
210 skb->pkt_type=PACKET_MULTICAST; 210 skb->pkt_type=PACKET_MULTICAST;
@@ -213,7 +213,7 @@ __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
213 { 213 {
214 skb->pkt_type=PACKET_MULTICAST; 214 skb->pkt_type=PACKET_MULTICAST;
215 } 215 }
216 else if(dev->flags & IFF_PROMISC) 216 else if(dev->flags & IFF_PROMISC)
217 { 217 {
218 if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN)) 218 if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
219 skb->pkt_type=PACKET_OTHERHOST; 219 skb->pkt_type=PACKET_OTHERHOST;
@@ -221,10 +221,10 @@ __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
221 221
222 if ((skb->pkt_type != PACKET_BROADCAST) && 222 if ((skb->pkt_type != PACKET_BROADCAST) &&
223 (skb->pkt_type != PACKET_MULTICAST)) 223 (skb->pkt_type != PACKET_MULTICAST))
224 tr_add_rif_info(trh,dev) ; 224 tr_add_rif_info(trh,dev) ;
225 225
226 /* 226 /*
227 * Strip the SNAP header from ARP packets since we don't 227 * Strip the SNAP header from ARP packets since we don't
228 * pass them through to the 802.2/SNAP layers. 228 * pass them through to the 802.2/SNAP layers.
229 */ 229 */
230 230
@@ -241,32 +241,32 @@ __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
241} 241}
242 242
243/* 243/*
244 * We try to do source routing... 244 * We try to do source routing...
245 */ 245 */
246 246
247void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *dev) 247void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *dev)
248{ 248{
249 int slack; 249 int slack;
250 unsigned int hash; 250 unsigned int hash;
251 struct rif_cache *entry; 251 struct rif_cache *entry;
252 unsigned char *olddata; 252 unsigned char *olddata;
253 unsigned long flags; 253 unsigned long flags;
254 static const unsigned char mcast_func_addr[] 254 static const unsigned char mcast_func_addr[]
255 = {0xC0,0x00,0x00,0x04,0x00,0x00}; 255 = {0xC0,0x00,0x00,0x04,0x00,0x00};
256 256
257 spin_lock_irqsave(&rif_lock, flags); 257 spin_lock_irqsave(&rif_lock, flags);
258 258
259 /* 259 /*
260 * Broadcasts are single route as stated in RFC 1042 260 * Broadcasts are single route as stated in RFC 1042
261 */ 261 */
262 if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) || 262 if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
263 (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) ) 263 (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) )
264 { 264 {
265 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) 265 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
266 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); 266 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
267 trh->saddr[0]|=TR_RII; 267 trh->saddr[0]|=TR_RII;
268 } 268 }
269 else 269 else
270 { 270 {
271 hash = rif_hash(trh->daddr); 271 hash = rif_hash(trh->daddr);
272 /* 272 /*
@@ -277,7 +277,7 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *
277 /* 277 /*
278 * If we found an entry we can route the frame. 278 * If we found an entry we can route the frame.
279 */ 279 */
280 if(entry) 280 if(entry)
281 { 281 {
282#if TR_SR_DEBUG 282#if TR_SR_DEBUG
283printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0], 283printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
@@ -287,7 +287,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
287 { 287 {
288 trh->rcf=entry->rcf; 288 trh->rcf=entry->rcf;
289 memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short)); 289 memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
290 trh->rcf^=htons(TR_RCF_DIR_BIT); 290 trh->rcf^=htons(TR_RCF_DIR_BIT);
291 trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */ 291 trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
292 292
293 trh->saddr[0]|=TR_RII; 293 trh->saddr[0]|=TR_RII;
@@ -301,14 +301,14 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
301 } 301 }
302 entry->last_used=jiffies; 302 entry->last_used=jiffies;
303 } 303 }
304 else 304 else
305 { 305 {
306 /* 306 /*
307 * Without the information we simply have to shout 307 * Without the information we simply have to shout
308 * on the wire. The replies should rapidly clean this 308 * on the wire. The replies should rapidly clean this
309 * situation up. 309 * situation up.
310 */ 310 */
311 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) 311 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
312 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); 312 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
313 trh->saddr[0]|=TR_RII; 313 trh->saddr[0]|=TR_RII;
314#if TR_SR_DEBUG 314#if TR_SR_DEBUG
@@ -320,7 +320,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
320 /* Compress the RIF here so we don't have to do it in the driver(s) */ 320 /* Compress the RIF here so we don't have to do it in the driver(s) */
321 if (!(trh->saddr[0] & 0x80)) 321 if (!(trh->saddr[0] & 0x80))
322 slack = 18; 322 slack = 18;
323 else 323 else
324 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); 324 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
325 olddata = skb->data; 325 olddata = skb->data;
326 spin_unlock_irqrestore(&rif_lock, flags); 326 spin_unlock_irqrestore(&rif_lock, flags);
@@ -333,7 +333,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
333 * We have learned some new RIF information for our source 333 * We have learned some new RIF information for our source
334 * routing. 334 * routing.
335 */ 335 */
336 336
337static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) 337static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
338{ 338{
339 unsigned int hash, rii_p = 0; 339 unsigned int hash, rii_p = 0;
@@ -343,29 +343,29 @@ static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
343 343
344 spin_lock_irqsave(&rif_lock, flags); 344 spin_lock_irqsave(&rif_lock, flags);
345 saddr0 = trh->saddr[0]; 345 saddr0 = trh->saddr[0];
346 346
347 /* 347 /*
348 * Firstly see if the entry exists 348 * Firstly see if the entry exists
349 */ 349 */
350 350
351 if(trh->saddr[0] & TR_RII) 351 if(trh->saddr[0] & TR_RII)
352 { 352 {
353 trh->saddr[0]&=0x7f; 353 trh->saddr[0]&=0x7f;
354 if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2) 354 if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
355 { 355 {
356 rii_p = 1; 356 rii_p = 1;
357 } 357 }
358 } 358 }
359 359
360 hash = rif_hash(trh->saddr); 360 hash = rif_hash(trh->saddr);
361 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next); 361 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
362 362
363 if(entry==NULL) 363 if(entry==NULL)
364 { 364 {
365#if TR_SR_DEBUG 365#if TR_SR_DEBUG
366printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", 366printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
367 trh->saddr[0],trh->saddr[1],trh->saddr[2], 367 trh->saddr[0],trh->saddr[1],trh->saddr[2],
368 trh->saddr[3],trh->saddr[4],trh->saddr[5], 368 trh->saddr[3],trh->saddr[4],trh->saddr[5],
369 ntohs(trh->rcf)); 369 ntohs(trh->rcf));
370#endif 370#endif
371 /* 371 /*
@@ -377,7 +377,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
377 */ 377 */
378 entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC); 378 entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
379 379
380 if(!entry) 380 if(!entry)
381 { 381 {
382 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); 382 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
383 spin_unlock_irqrestore(&rif_lock, flags); 383 spin_unlock_irqrestore(&rif_lock, flags);
@@ -400,13 +400,13 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
400 { 400 {
401 entry->local_ring = 1; 401 entry->local_ring = 1;
402 } 402 }
403 } 403 }
404 else /* Y. Tahara added */ 404 else /* Y. Tahara added */
405 { 405 {
406 /* 406 /*
407 * Update existing entries 407 * Update existing entries
408 */ 408 */
409 if (!entry->local_ring) 409 if (!entry->local_ring)
410 if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) && 410 if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
411 !(trh->rcf & htons(TR_RCF_BROADCAST_MASK))) 411 !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
412 { 412 {
@@ -417,9 +417,9 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
417 ntohs(trh->rcf)); 417 ntohs(trh->rcf));
418#endif 418#endif
419 entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); 419 entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
420 memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); 420 memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
421 } 421 }
422 entry->last_used=jiffies; 422 entry->last_used=jiffies;
423 } 423 }
424 trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */ 424 trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
425 spin_unlock_irqrestore(&rif_lock, flags); 425 spin_unlock_irqrestore(&rif_lock, flags);
@@ -429,16 +429,16 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
429 * Scan the cache with a timer and see what we need to throw out. 429 * Scan the cache with a timer and see what we need to throw out.
430 */ 430 */
431 431
432static void rif_check_expire(unsigned long dummy) 432static void rif_check_expire(unsigned long dummy)
433{ 433{
434 int i; 434 int i;
435 unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2; 435 unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
436 436
437 spin_lock_irqsave(&rif_lock, flags); 437 spin_lock_irqsave(&rif_lock, flags);
438 438
439 for(i =0; i < RIF_TABLE_SIZE; i++) { 439 for(i =0; i < RIF_TABLE_SIZE; i++) {
440 struct rif_cache *entry, **pentry; 440 struct rif_cache *entry, **pentry;
441 441
442 pentry = rif_table+i; 442 pentry = rif_table+i;
443 while((entry=*pentry) != NULL) { 443 while((entry=*pentry) != NULL) {
444 unsigned long expires 444 unsigned long expires
@@ -455,7 +455,7 @@ static void rif_check_expire(unsigned long dummy)
455 } 455 }
456 } 456 }
457 } 457 }
458 458
459 spin_unlock_irqrestore(&rif_lock, flags); 459 spin_unlock_irqrestore(&rif_lock, flags);
460 460
461 mod_timer(&rif_timer, next_interval); 461 mod_timer(&rif_timer, next_interval);
@@ -466,7 +466,7 @@ static void rif_check_expire(unsigned long dummy)
466 * Generate the /proc/net information for the token ring RIF 466 * Generate the /proc/net information for the token ring RIF
467 * routing. 467 * routing.
468 */ 468 */
469 469
470#ifdef CONFIG_PROC_FS 470#ifdef CONFIG_PROC_FS
471 471
472static struct rif_cache *rif_get_idx(loff_t pos) 472static struct rif_cache *rif_get_idx(loff_t pos)
@@ -475,7 +475,7 @@ static struct rif_cache *rif_get_idx(loff_t pos)
475 struct rif_cache *entry; 475 struct rif_cache *entry;
476 loff_t off = 0; 476 loff_t off = 0;
477 477
478 for(i = 0; i < RIF_TABLE_SIZE; i++) 478 for(i = 0; i < RIF_TABLE_SIZE; i++)
479 for(entry = rif_table[i]; entry; entry = entry->next) { 479 for(entry = rif_table[i]; entry; entry = entry->next) {
480 if (off == pos) 480 if (off == pos)
481 return entry; 481 return entry;
@@ -504,7 +504,7 @@ static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
504 goto scan; 504 goto scan;
505 } 505 }
506 506
507 if (ent->next) 507 if (ent->next)
508 return ent->next; 508 return ent->next;
509 509
510 i = rif_hash(ent->addr); 510 i = rif_hash(ent->addr);
@@ -541,13 +541,13 @@ static int rif_seq_show(struct seq_file *seq, void *v)
541 ttl/HZ); 541 ttl/HZ);
542 542
543 if (entry->local_ring) 543 if (entry->local_ring)
544 seq_puts(seq, "local\n"); 544 seq_puts(seq, "local\n");
545 else { 545 else {
546 546
547 seq_printf(seq, "%04X", ntohs(entry->rcf)); 547 seq_printf(seq, "%04X", ntohs(entry->rcf));
548 rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2; 548 rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
549 if (rcf_len) 549 if (rcf_len)
550 rcf_len >>= 1; 550 rcf_len >>= 1;
551 for(j = 1; j < rcf_len; j++) { 551 for(j = 1; j < rcf_len; j++) {
552 if(j==1) { 552 if(j==1) {
553 segment=ntohs(entry->rseg[j-1])>>4; 553 segment=ntohs(entry->rseg[j-1])>>4;
@@ -559,7 +559,7 @@ static int rif_seq_show(struct seq_file *seq, void *v)
559 } 559 }
560 seq_putc(seq, '\n'); 560 seq_putc(seq, '\n');
561 } 561 }
562 } 562 }
563 return 0; 563 return 0;
564} 564}
565 565
@@ -591,7 +591,7 @@ static void tr_setup(struct net_device *dev)
591 /* 591 /*
592 * Configure and register 592 * Configure and register
593 */ 593 */
594 594
595 dev->hard_header = tr_header; 595 dev->hard_header = tr_header;
596 dev->rebuild_header = tr_rebuild_header; 596 dev->rebuild_header = tr_rebuild_header;
597 597
@@ -600,7 +600,7 @@ static void tr_setup(struct net_device *dev)
600 dev->mtu = 2000; 600 dev->mtu = 2000;
601 dev->addr_len = TR_ALEN; 601 dev->addr_len = TR_ALEN;
602 dev->tx_queue_len = 100; /* Long queues on tr */ 602 dev->tx_queue_len = 100; /* Long queues on tr */
603 603
604 memset(dev->broadcast,0xFF, TR_ALEN); 604 memset(dev->broadcast,0xFF, TR_ALEN);
605 605
606 /* New-style flags. */ 606 /* New-style flags. */
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 18fcb9fa518d..c1c205fad4fb 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -5,7 +5,7 @@
5 * Authors: Ben Greear <greearb@candelatech.com> 5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: vlan@scry.wanfear.com 6 * Please send support related email to: vlan@scry.wanfear.com
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html 7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8 * 8 *
9 * Fixes: 9 * Fixes:
10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>; 10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>; 11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
@@ -72,7 +72,7 @@ static struct packet_type vlan_packet_type = {
72/* 72/*
73 * Function vlan_proto_init (pro) 73 * Function vlan_proto_init (pro)
74 * 74 *
75 * Initialize VLAN protocol layer, 75 * Initialize VLAN protocol layer,
76 * 76 *
77 */ 77 */
78static int __init vlan_proto_init(void) 78static int __init vlan_proto_init(void)
@@ -87,7 +87,7 @@ static int __init vlan_proto_init(void)
87 /* proc file system initialization */ 87 /* proc file system initialization */
88 err = vlan_proc_init(); 88 err = vlan_proc_init();
89 if (err < 0) { 89 if (err < 0) {
90 printk(KERN_ERR 90 printk(KERN_ERR
91 "%s %s: can't create entry in proc filesystem!\n", 91 "%s %s: can't create entry in proc filesystem!\n",
92 __FUNCTION__, VLAN_NAME); 92 __FUNCTION__, VLAN_NAME);
93 return err; 93 return err;
@@ -108,7 +108,7 @@ static int __init vlan_proto_init(void)
108 return 0; 108 return 0;
109} 109}
110 110
111/* Cleanup all vlan devices 111/* Cleanup all vlan devices
112 * Note: devices that have been registered that but not 112 * Note: devices that have been registered that but not
113 * brought up will exist but have no module ref count. 113 * brought up will exist but have no module ref count.
114 */ 114 */
@@ -132,7 +132,7 @@ static void __exit vlan_cleanup_devices(void)
132/* 132/*
133 * Module 'remove' entry point. 133 * Module 'remove' entry point.
134 * o delete /proc/net/router directory and static entries. 134 * o delete /proc/net/router directory and static entries.
135 */ 135 */
136static void __exit vlan_cleanup_module(void) 136static void __exit vlan_cleanup_module(void)
137{ 137{
138 int i; 138 int i;
@@ -184,7 +184,7 @@ struct net_device *__find_vlan_dev(struct net_device *real_dev,
184 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); 184 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex);
185 185
186 if (grp) 186 if (grp)
187 return grp->vlan_devices[VID]; 187 return grp->vlan_devices[VID];
188 188
189 return NULL; 189 return NULL;
190} 190}
@@ -269,7 +269,7 @@ static int unregister_vlan_dev(struct net_device *real_dev,
269 } 269 }
270 } 270 }
271 271
272 return ret; 272 return ret;
273} 273}
274 274
275static int unregister_vlan_device(const char *vlan_IF_name) 275static int unregister_vlan_device(const char *vlan_IF_name)
@@ -295,7 +295,7 @@ static int unregister_vlan_device(const char *vlan_IF_name)
295 if (ret == 1) 295 if (ret == 1)
296 ret = 0; 296 ret = 0;
297 } else { 297 } else {
298 printk(VLAN_ERR 298 printk(VLAN_ERR
299 "%s: ERROR: Tried to remove a non-vlan device " 299 "%s: ERROR: Tried to remove a non-vlan device "
300 "with VLAN code, name: %s priv_flags: %hX\n", 300 "with VLAN code, name: %s priv_flags: %hX\n",
301 __FUNCTION__, dev->name, dev->priv_flags); 301 __FUNCTION__, dev->name, dev->priv_flags);
@@ -315,7 +315,7 @@ static int unregister_vlan_device(const char *vlan_IF_name)
315static void vlan_setup(struct net_device *new_dev) 315static void vlan_setup(struct net_device *new_dev)
316{ 316{
317 SET_MODULE_OWNER(new_dev); 317 SET_MODULE_OWNER(new_dev);
318 318
319 /* new_dev->ifindex = 0; it will be set when added to 319 /* new_dev->ifindex = 0; it will be set when added to
320 * the global list. 320 * the global list.
321 * iflink is set as well. 321 * iflink is set as well.
@@ -324,7 +324,7 @@ static void vlan_setup(struct net_device *new_dev)
324 324
325 /* Make this thing known as a VLAN device */ 325 /* Make this thing known as a VLAN device */
326 new_dev->priv_flags |= IFF_802_1Q_VLAN; 326 new_dev->priv_flags |= IFF_802_1Q_VLAN;
327 327
328 /* Set us up to have no queue, as the underlying Hardware device 328 /* Set us up to have no queue, as the underlying Hardware device
329 * can do all the queueing we could want. 329 * can do all the queueing we could want.
330 */ 330 */
@@ -461,7 +461,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
461 default: 461 default:
462 snprintf(name, IFNAMSIZ, "vlan%.4i", VLAN_ID); 462 snprintf(name, IFNAMSIZ, "vlan%.4i", VLAN_ID);
463 }; 463 };
464 464
465 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, 465 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name,
466 vlan_setup); 466 vlan_setup);
467 467
@@ -477,7 +477,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
477 477
478 new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | 478 new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
479 (1<<__LINK_STATE_DORMANT))) | 479 (1<<__LINK_STATE_DORMANT))) |
480 (1<<__LINK_STATE_PRESENT); 480 (1<<__LINK_STATE_PRESENT);
481 481
482 /* need 4 bytes for extra VLAN header info, 482 /* need 4 bytes for extra VLAN header info,
483 * hope the underlying device can handle it. 483 * hope the underlying device can handle it.
@@ -496,7 +496,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
496 VLAN_MEM_DBG("new_dev->priv malloc, addr: %p size: %i\n", 496 VLAN_MEM_DBG("new_dev->priv malloc, addr: %p size: %i\n",
497 new_dev->priv, 497 new_dev->priv,
498 sizeof(struct vlan_dev_info)); 498 sizeof(struct vlan_dev_info));
499 499
500 memcpy(new_dev->broadcast, real_dev->broadcast, real_dev->addr_len); 500 memcpy(new_dev->broadcast, real_dev->broadcast, real_dev->addr_len);
501 memcpy(new_dev->dev_addr, real_dev->dev_addr, real_dev->addr_len); 501 memcpy(new_dev->dev_addr, real_dev->dev_addr, real_dev->addr_len);
502 new_dev->addr_len = real_dev->addr_len; 502 new_dev->addr_len = real_dev->addr_len;
@@ -521,7 +521,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
521 printk(VLAN_DBG "About to go find the group for idx: %i\n", 521 printk(VLAN_DBG "About to go find the group for idx: %i\n",
522 real_dev->ifindex); 522 real_dev->ifindex);
523#endif 523#endif
524 524
525 if (register_netdevice(new_dev)) 525 if (register_netdevice(new_dev))
526 goto out_free_newdev; 526 goto out_free_newdev;
527 527
@@ -543,22 +543,22 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
543 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); 543 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
544 if (!grp) 544 if (!grp)
545 goto out_free_unregister; 545 goto out_free_unregister;
546 546
547 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 547 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
548 grp->real_dev_ifindex = real_dev->ifindex; 548 grp->real_dev_ifindex = real_dev->ifindex;
549 549
550 hlist_add_head_rcu(&grp->hlist, 550 hlist_add_head_rcu(&grp->hlist,
551 &vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]); 551 &vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]);
552 552
553 if (real_dev->features & NETIF_F_HW_VLAN_RX) 553 if (real_dev->features & NETIF_F_HW_VLAN_RX)
554 real_dev->vlan_rx_register(real_dev, grp); 554 real_dev->vlan_rx_register(real_dev, grp);
555 } 555 }
556 556
557 grp->vlan_devices[VLAN_ID] = new_dev; 557 grp->vlan_devices[VLAN_ID] = new_dev;
558 558
559 if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */ 559 if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */
560 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n", 560 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n",
561 new_dev->name); 561 new_dev->name);
562 562
563 if (real_dev->features & NETIF_F_HW_VLAN_FILTER) 563 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
564 real_dev->vlan_rx_add_vid(real_dev, VLAN_ID); 564 real_dev->vlan_rx_add_vid(real_dev, VLAN_ID);
@@ -635,7 +635,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
635 vlandev = grp->vlan_devices[i]; 635 vlandev = grp->vlan_devices[i];
636 if (!vlandev) 636 if (!vlandev)
637 continue; 637 continue;
638 638
639 flgs = vlandev->flags; 639 flgs = vlandev->flags;
640 if (flgs & IFF_UP) 640 if (flgs & IFF_UP)
641 continue; 641 continue;
@@ -643,7 +643,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
643 dev_change_flags(vlandev, flgs | IFF_UP); 643 dev_change_flags(vlandev, flgs | IFF_UP);
644 } 644 }
645 break; 645 break;
646 646
647 case NETDEV_UNREGISTER: 647 case NETDEV_UNREGISTER:
648 /* Delete all VLANs for this dev. */ 648 /* Delete all VLANs for this dev. */
649 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 649 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
@@ -755,8 +755,8 @@ static int vlan_ioctl_handler(void __user *arg)
755 /* TODO: Implement 755 /* TODO: Implement
756 err = vlan_dev_get_ingress_priority(args); 756 err = vlan_dev_get_ingress_priority(args);
757 if (copy_to_user((void*)arg, &args, 757 if (copy_to_user((void*)arg, &args,
758 sizeof(struct vlan_ioctl_args))) { 758 sizeof(struct vlan_ioctl_args))) {
759 err = -EFAULT; 759 err = -EFAULT;
760 } 760 }
761 */ 761 */
762 err = -EINVAL; 762 err = -EINVAL;
@@ -765,8 +765,8 @@ static int vlan_ioctl_handler(void __user *arg)
765 /* TODO: Implement 765 /* TODO: Implement
766 err = vlan_dev_get_egress_priority(args.device1, &(args.args); 766 err = vlan_dev_get_egress_priority(args.device1, &(args.args);
767 if (copy_to_user((void*)arg, &args, 767 if (copy_to_user((void*)arg, &args,
768 sizeof(struct vlan_ioctl_args))) { 768 sizeof(struct vlan_ioctl_args))) {
769 err = -EFAULT; 769 err = -EFAULT;
770 } 770 }
771 */ 771 */
772 err = -EINVAL; 772 err = -EINVAL;
@@ -788,7 +788,7 @@ static int vlan_ioctl_handler(void __user *arg)
788 args.u.VID = vid; 788 args.u.VID = vid;
789 if (copy_to_user(arg, &args, 789 if (copy_to_user(arg, &args,
790 sizeof(struct vlan_ioctl_args))) { 790 sizeof(struct vlan_ioctl_args))) {
791 err = -EFAULT; 791 err = -EFAULT;
792 } 792 }
793 break; 793 break;
794 794
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9ae3a14dd016..1976cdba8f72 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -9,8 +9,8 @@
9#define VLAN_ERR KERN_ERR 9#define VLAN_ERR KERN_ERR
10#define VLAN_INF KERN_INFO 10#define VLAN_INF KERN_INFO
11#define VLAN_DBG KERN_ALERT /* change these... to debug, having a hard time 11#define VLAN_DBG KERN_ALERT /* change these... to debug, having a hard time
12 * changing the log level at run-time..for some reason. 12 * changing the log level at run-time..for some reason.
13 */ 13 */
14 14
15/* 15/*
16 16
@@ -24,7 +24,7 @@ I'll bet they might prove useful again... --Ben
24*/ 24*/
25 25
26/* This way they don't do anything! */ 26/* This way they don't do anything! */
27#define VLAN_MEM_DBG(x, y, z) 27#define VLAN_MEM_DBG(x, y, z)
28#define VLAN_FMEM_DBG(x, y) 28#define VLAN_FMEM_DBG(x, y)
29 29
30 30
@@ -51,10 +51,10 @@ struct net_device *__find_vlan_dev(struct net_device* real_dev,
51/* found in vlan_dev.c */ 51/* found in vlan_dev.c */
52int vlan_dev_rebuild_header(struct sk_buff *skb); 52int vlan_dev_rebuild_header(struct sk_buff *skb);
53int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, 53int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
54 struct packet_type *ptype, struct net_device *orig_dev); 54 struct packet_type *ptype, struct net_device *orig_dev);
55int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, 55int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
56 unsigned short type, void *daddr, void *saddr, 56 unsigned short type, void *daddr, void *saddr,
57 unsigned len); 57 unsigned len);
58int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); 58int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
59int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); 59int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
60int vlan_dev_change_mtu(struct net_device *dev, int new_mtu); 60int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 60a508eb1945..2fc8fe2cb366 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -5,14 +5,14 @@
5 * Authors: Ben Greear <greearb@candelatech.com> 5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: vlan@scry.wanfear.com 6 * Please send support related email to: vlan@scry.wanfear.com
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html 7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8 * 8 *
9 * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com> 9 * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
10 * - reset skb->pkt_type on incoming packets when MAC was changed 10 * - reset skb->pkt_type on incoming packets when MAC was changed
11 * - see that changed MAC is saddr for outgoing packets 11 * - see that changed MAC is saddr for outgoing packets
12 * Oct 20, 2001: Ard van Breeman: 12 * Oct 20, 2001: Ard van Breeman:
13 * - Fix MC-list, finally. 13 * - Fix MC-list, finally.
14 * - Flush MC-list on VLAN destroy. 14 * - Flush MC-list on VLAN destroy.
15 * 15 *
16 * 16 *
17 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License 18 * modify it under the terms of the GNU General Public License
@@ -58,12 +58,12 @@ int vlan_dev_rebuild_header(struct sk_buff *skb)
58 58
59 /* TODO: Confirm this will work with VLAN headers... */ 59 /* TODO: Confirm this will work with VLAN headers... */
60 return arp_find(veth->h_dest, skb); 60 return arp_find(veth->h_dest, skb);
61#endif 61#endif
62 default: 62 default:
63 printk(VLAN_DBG 63 printk(VLAN_DBG
64 "%s: unable to resolve type %X addresses.\n", 64 "%s: unable to resolve type %X addresses.\n",
65 dev->name, ntohs(veth->h_vlan_encapsulated_proto)); 65 dev->name, ntohs(veth->h_vlan_encapsulated_proto));
66 66
67 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); 67 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
68 break; 68 break;
69 }; 69 };
@@ -91,7 +91,7 @@ static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
91} 91}
92 92
93/* 93/*
94 * Determine the packet's protocol ID. The rule here is that we 94 * Determine the packet's protocol ID. The rule here is that we
95 * assume 802.3 if the type field is short enough to be a length. 95 * assume 802.3 if the type field is short enough to be a length.
96 * This is normal practice and works for any 'now in use' protocol. 96 * This is normal practice and works for any 'now in use' protocol.
97 * 97 *
@@ -113,7 +113,7 @@ static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
113 * 113 *
114 */ 114 */
115int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, 115int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
116 struct packet_type* ptype, struct net_device *orig_dev) 116 struct packet_type* ptype, struct net_device *orig_dev)
117{ 117{
118 unsigned char *rawp = NULL; 118 unsigned char *rawp = NULL;
119 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data); 119 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data);
@@ -175,8 +175,8 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
175 175
176#ifdef VLAN_DEBUG 176#ifdef VLAN_DEBUG
177 printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n", 177 printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n",
178 __FUNCTION__, skb, dev->name, 178 __FUNCTION__, skb, dev->name,
179 VLAN_DEV_INFO(skb->dev)->real_dev->name, 179 VLAN_DEV_INFO(skb->dev)->real_dev->name,
180 skb->dev->name); 180 skb->dev->name);
181#endif 181#endif
182 kfree_skb(skb); 182 kfree_skb(skb);
@@ -191,7 +191,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
191 191
192#ifdef VLAN_DEBUG 192#ifdef VLAN_DEBUG
193 printk(VLAN_DBG "%s: priority: %lu for TCI: %hu (hbo)\n", 193 printk(VLAN_DBG "%s: priority: %lu for TCI: %hu (hbo)\n",
194 __FUNCTION__, (unsigned long)(skb->priority), 194 __FUNCTION__, (unsigned long)(skb->priority),
195 ntohs(vhdr->h_vlan_TCI)); 195 ntohs(vhdr->h_vlan_TCI));
196#endif 196#endif
197 197
@@ -207,7 +207,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
207 stats->multicast++; 207 stats->multicast++;
208 break; 208 break;
209 209
210 case PACKET_OTHERHOST: 210 case PACKET_OTHERHOST:
211 /* Our lower layer thinks this is not local, let's make sure. 211 /* Our lower layer thinks this is not local, let's make sure.
212 * This allows the VLAN to have a different MAC than the underlying 212 * This allows the VLAN to have a different MAC than the underlying
213 * device, and still route correctly. 213 * device, and still route correctly.
@@ -319,7 +319,7 @@ static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev
319} 319}
320 320
321/* 321/*
322 * Create the VLAN header for an arbitrary protocol layer 322 * Create the VLAN header for an arbitrary protocol layer
323 * 323 *
324 * saddr=NULL means use device source address 324 * saddr=NULL means use device source address
325 * daddr=NULL means leave destination address (eg unresolved arp) 325 * daddr=NULL means leave destination address (eg unresolved arp)
@@ -328,8 +328,8 @@ static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev
328 * physical devices. 328 * physical devices.
329 */ 329 */
330int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, 330int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
331 unsigned short type, void *daddr, void *saddr, 331 unsigned short type, void *daddr, void *saddr,
332 unsigned len) 332 unsigned len)
333{ 333{
334 struct vlan_hdr *vhdr; 334 struct vlan_hdr *vhdr;
335 unsigned short veth_TCI = 0; 335 unsigned short veth_TCI = 0;
@@ -346,7 +346,7 @@ int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
346 * fixes some programs that get confused when they see a VLAN device 346 * fixes some programs that get confused when they see a VLAN device
347 * sending a frame that is VLAN encoded (the consensus is that the VLAN 347 * sending a frame that is VLAN encoded (the consensus is that the VLAN
348 * device should look completely like an Ethernet device when the 348 * device should look completely like an Ethernet device when the
349 * REORDER_HEADER flag is set) The drawback to this is some extra 349 * REORDER_HEADER flag is set) The drawback to this is some extra
350 * header shuffling in the hard_start_xmit. Users can turn off this 350 * header shuffling in the hard_start_xmit. Users can turn off this
351 * REORDER behaviour with the vconfig tool. 351 * REORDER behaviour with the vconfig tool.
352 */ 352 */
@@ -553,7 +553,7 @@ int vlan_dev_set_egress_priority(char *dev_name, __u32 skb_prio, short vlan_prio
553 struct net_device *dev = dev_get_by_name(dev_name); 553 struct net_device *dev = dev_get_by_name(dev_name);
554 struct vlan_priority_tci_mapping *mp = NULL; 554 struct vlan_priority_tci_mapping *mp = NULL;
555 struct vlan_priority_tci_mapping *np; 555 struct vlan_priority_tci_mapping *np;
556 556
557 if (dev) { 557 if (dev) {
558 if (dev->priv_flags & IFF_802_1Q_VLAN) { 558 if (dev->priv_flags & IFF_802_1Q_VLAN) {
559 /* See if a priority mapping exists.. */ 559 /* See if a priority mapping exists.. */
@@ -610,13 +610,13 @@ int vlan_dev_set_vlan_flag(char *dev_name, __u32 flag, short flag_val)
610 return -EINVAL; 610 return -EINVAL;
611 } 611 }
612 } else { 612 } else {
613 printk(KERN_ERR 613 printk(KERN_ERR
614 "%s: %s is not a vlan device, priv_flags: %hX.\n", 614 "%s: %s is not a vlan device, priv_flags: %hX.\n",
615 __FUNCTION__, dev->name, dev->priv_flags); 615 __FUNCTION__, dev->name, dev->priv_flags);
616 dev_put(dev); 616 dev_put(dev);
617 } 617 }
618 } else { 618 } else {
619 printk(KERN_ERR "%s: Could not find device: %s\n", 619 printk(KERN_ERR "%s: Could not find device: %s\n",
620 __FUNCTION__, dev_name); 620 __FUNCTION__, dev_name);
621 } 621 }
622 622
@@ -700,7 +700,7 @@ int vlan_dev_set_mac_address(struct net_device *dev, void *addr_struct_p)
700} 700}
701 701
702static inline int vlan_dmi_equals(struct dev_mc_list *dmi1, 702static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
703 struct dev_mc_list *dmi2) 703 struct dev_mc_list *dmi2)
704{ 704{
705 return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) && 705 return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
706 (memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0)); 706 (memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
@@ -810,7 +810,7 @@ int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
810 case SIOCGMIIPHY: 810 case SIOCGMIIPHY:
811 case SIOCGMIIREG: 811 case SIOCGMIIREG:
812 case SIOCSMIIREG: 812 case SIOCSMIIREG:
813 if (real_dev->do_ioctl && netif_device_present(real_dev)) 813 if (real_dev->do_ioctl && netif_device_present(real_dev))
814 err = real_dev->do_ioctl(real_dev, &ifrr, cmd); 814 err = real_dev->do_ioctl(real_dev, &ifrr, cmd);
815 break; 815 break;
816 816
@@ -818,7 +818,7 @@ int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
818 err = dev_ethtool(&ifrr); 818 err = dev_ethtool(&ifrr);
819 } 819 }
820 820
821 if (!err) 821 if (!err)
822 ifr->ifr_ifru = ifrr.ifr_ifru; 822 ifr->ifr_ifru = ifrr.ifr_ifru;
823 823
824 return err; 824 return err;
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index a8fc0de1f969..1b72c9854d6f 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -51,7 +51,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *v);
51 51
52 52
53/* 53/*
54 * Names of the proc directory entries 54 * Names of the proc directory entries
55 */ 55 */
56 56
57static const char name_root[] = "vlan"; 57static const char name_root[] = "vlan";
@@ -66,7 +66,7 @@ static const char name_conf[] = "config";
66 */ 66 */
67 67
68/* 68/*
69 * Generic /proc/net/vlan/<file> file and inode operations 69 * Generic /proc/net/vlan/<file> file and inode operations
70 */ 70 */
71 71
72static struct seq_operations vlan_seq_ops = { 72static struct seq_operations vlan_seq_ops = {
@@ -111,13 +111,13 @@ static struct file_operations vlandev_fops = {
111 */ 111 */
112 112
113/* 113/*
114 * /proc/net/vlan 114 * /proc/net/vlan
115 */ 115 */
116 116
117static struct proc_dir_entry *proc_vlan_dir; 117static struct proc_dir_entry *proc_vlan_dir;
118 118
119/* 119/*
120 * /proc/net/vlan/config 120 * /proc/net/vlan/config
121 */ 121 */
122 122
123static struct proc_dir_entry *proc_vlan_conf; 123static struct proc_dir_entry *proc_vlan_conf;
@@ -238,15 +238,15 @@ int vlan_proc_rem_dev(struct net_device *vlandev)
238 */ 238 */
239 239
240/* starting at dev, find a VLAN device */ 240/* starting at dev, find a VLAN device */
241static struct net_device *vlan_skip(struct net_device *dev) 241static struct net_device *vlan_skip(struct net_device *dev)
242{ 242{
243 while (dev && !(dev->priv_flags & IFF_802_1Q_VLAN)) 243 while (dev && !(dev->priv_flags & IFF_802_1Q_VLAN))
244 dev = dev->next; 244 dev = dev->next;
245 245
246 return dev; 246 return dev;
247} 247}
248 248
249/* start read of /proc/net/vlan/config */ 249/* start read of /proc/net/vlan/config */
250static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) 250static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
251{ 251{
252 struct net_device *dev; 252 struct net_device *dev;
@@ -256,19 +256,19 @@ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
256 256
257 if (*pos == 0) 257 if (*pos == 0)
258 return SEQ_START_TOKEN; 258 return SEQ_START_TOKEN;
259 259
260 for (dev = vlan_skip(dev_base); dev && i < *pos; 260 for (dev = vlan_skip(dev_base); dev && i < *pos;
261 dev = vlan_skip(dev->next), ++i); 261 dev = vlan_skip(dev->next), ++i);
262 262
263 return (i == *pos) ? dev : NULL; 263 return (i == *pos) ? dev : NULL;
264} 264}
265 265
266static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) 266static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
267{ 267{
268 ++*pos; 268 ++*pos;
269 269
270 return vlan_skip((v == SEQ_START_TOKEN) 270 return vlan_skip((v == SEQ_START_TOKEN)
271 ? dev_base 271 ? dev_base
272 : ((struct net_device *)v)->next); 272 : ((struct net_device *)v)->next);
273} 273}
274 274
@@ -287,13 +287,13 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
287 if (vlan_name_type < ARRAY_SIZE(vlan_name_type_str)) 287 if (vlan_name_type < ARRAY_SIZE(vlan_name_type_str))
288 nmtype = vlan_name_type_str[vlan_name_type]; 288 nmtype = vlan_name_type_str[vlan_name_type];
289 289
290 seq_printf(seq, "Name-Type: %s\n", 290 seq_printf(seq, "Name-Type: %s\n",
291 nmtype ? nmtype : "UNKNOWN" ); 291 nmtype ? nmtype : "UNKNOWN" );
292 } else { 292 } else {
293 const struct net_device *vlandev = v; 293 const struct net_device *vlandev = v;
294 const struct vlan_dev_info *dev_info = VLAN_DEV_INFO(vlandev); 294 const struct vlan_dev_info *dev_info = VLAN_DEV_INFO(vlandev);
295 295
296 seq_printf(seq, "%-15s| %d | %s\n", vlandev->name, 296 seq_printf(seq, "%-15s| %d | %s\n", vlandev->name,
297 dev_info->vlan_id, dev_info->real_dev->name); 297 dev_info->vlan_id, dev_info->real_dev->name);
298 } 298 }
299 return 0; 299 return 0;
@@ -323,13 +323,13 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
323 seq_puts(seq, "\n"); 323 seq_puts(seq, "\n");
324 seq_printf(seq, fmt, "total frames transmitted", stats->tx_packets); 324 seq_printf(seq, fmt, "total frames transmitted", stats->tx_packets);
325 seq_printf(seq, fmt, "total bytes transmitted", stats->tx_bytes); 325 seq_printf(seq, fmt, "total bytes transmitted", stats->tx_bytes);
326 seq_printf(seq, fmt, "total headroom inc", 326 seq_printf(seq, fmt, "total headroom inc",
327 dev_info->cnt_inc_headroom_on_tx); 327 dev_info->cnt_inc_headroom_on_tx);
328 seq_printf(seq, fmt, "total encap on xmit", 328 seq_printf(seq, fmt, "total encap on xmit",
329 dev_info->cnt_encap_on_xmit); 329 dev_info->cnt_encap_on_xmit);
330 seq_printf(seq, "Device: %s", dev_info->real_dev->name); 330 seq_printf(seq, "Device: %s", dev_info->real_dev->name);
331 /* now show all PRIORITY mappings relating to this VLAN */ 331 /* now show all PRIORITY mappings relating to this VLAN */
332 seq_printf(seq, 332 seq_printf(seq,
333 "\nINGRESS priority mappings: 0:%lu 1:%lu 2:%lu 3:%lu 4:%lu 5:%lu 6:%lu 7:%lu\n", 333 "\nINGRESS priority mappings: 0:%lu 1:%lu 2:%lu 3:%lu 4:%lu 5:%lu 6:%lu 7:%lu\n",
334 dev_info->ingress_priority_map[0], 334 dev_info->ingress_priority_map[0],
335 dev_info->ingress_priority_map[1], 335 dev_info->ingress_priority_map[1],
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index f3777ec5bcb9..27e845d260a9 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -895,7 +895,7 @@ struct aarp_iter_state {
895 895
896/* 896/*
897 * Get the aarp entry that is in the chain described 897 * Get the aarp entry that is in the chain described
898 * by the iterator. 898 * by the iterator.
899 * If pos is set then skip till that index. 899 * If pos is set then skip till that index.
900 * pos = 1 is the first entry 900 * pos = 1 is the first entry
901 */ 901 */
@@ -905,7 +905,7 @@ static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos)
905 struct aarp_entry **table = iter->table; 905 struct aarp_entry **table = iter->table;
906 loff_t off = 0; 906 loff_t off = 0;
907 struct aarp_entry *entry; 907 struct aarp_entry *entry;
908 908
909 rescan: 909 rescan:
910 while(ct < AARP_HASH_SIZE) { 910 while(ct < AARP_HASH_SIZE) {
911 for (entry = table[ct]; entry; entry = entry->next) { 911 for (entry = table[ct]; entry; entry = entry->next) {
@@ -950,9 +950,9 @@ static void *aarp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
950 ++*pos; 950 ++*pos;
951 951
952 /* first line after header */ 952 /* first line after header */
953 if (v == SEQ_START_TOKEN) 953 if (v == SEQ_START_TOKEN)
954 entry = iter_next(iter, NULL); 954 entry = iter_next(iter, NULL);
955 955
956 /* next entry in current bucket */ 956 /* next entry in current bucket */
957 else if (entry->next) 957 else if (entry->next)
958 entry = entry->next; 958 entry = entry->next;
@@ -986,7 +986,7 @@ static int aarp_seq_show(struct seq_file *seq, void *v)
986 unsigned long now = jiffies; 986 unsigned long now = jiffies;
987 987
988 if (v == SEQ_START_TOKEN) 988 if (v == SEQ_START_TOKEN)
989 seq_puts(seq, 989 seq_puts(seq,
990 "Address Interface Hardware Address" 990 "Address Interface Hardware Address"
991 " Expires LastSend Retry Status\n"); 991 " Expires LastSend Retry Status\n");
992 else { 992 else {
@@ -1014,7 +1014,7 @@ static int aarp_seq_show(struct seq_file *seq, void *v)
1014 : (iter->table == unresolved) ? "unresolved" 1014 : (iter->table == unresolved) ? "unresolved"
1015 : (iter->table == proxies) ? "proxies" 1015 : (iter->table == proxies) ? "proxies"
1016 : "unknown"); 1016 : "unknown");
1017 } 1017 }
1018 return 0; 1018 return 0;
1019} 1019}
1020 1020
@@ -1030,7 +1030,7 @@ static int aarp_seq_open(struct inode *inode, struct file *file)
1030 struct seq_file *seq; 1030 struct seq_file *seq;
1031 int rc = -ENOMEM; 1031 int rc = -ENOMEM;
1032 struct aarp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1032 struct aarp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1033 1033
1034 if (!s) 1034 if (!s)
1035 goto out; 1035 goto out;
1036 1036
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 7ae4916cd26d..bc3015f277ba 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -292,7 +292,7 @@ int __init atalk_proc_init(void)
292 p->proc_fops = &atalk_seq_socket_fops; 292 p->proc_fops = &atalk_seq_socket_fops;
293 293
294 p = create_proc_entry("arp", S_IRUGO, atalk_proc_dir); 294 p = create_proc_entry("arp", S_IRUGO, atalk_proc_dir);
295 if (!p) 295 if (!p)
296 goto out_arp; 296 goto out_arp;
297 p->proc_fops = &atalk_seq_arp_fops; 297 p->proc_fops = &atalk_seq_arp_fops;
298 298
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 3a7052207708..113c175f1715 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -28,16 +28,16 @@
28 * Bradford Johnson : IP-over-DDP (experimental) 28 * Bradford Johnson : IP-over-DDP (experimental)
29 * Jay Schulist : Moved IP-over-DDP to its own 29 * Jay Schulist : Moved IP-over-DDP to its own
30 * driver file. (ipddp.c & ipddp.h) 30 * driver file. (ipddp.c & ipddp.h)
31 * Jay Schulist : Made work as module with 31 * Jay Schulist : Made work as module with
32 * AppleTalk drivers, cleaned it. 32 * AppleTalk drivers, cleaned it.
33 * Rob Newberry : Added proxy AARP and AARP 33 * Rob Newberry : Added proxy AARP and AARP
34 * procfs, moved probing to AARP 34 * procfs, moved probing to AARP
35 * module. 35 * module.
36 * Adrian Sun/ 36 * Adrian Sun/
37 * Michael Zuelsdorff : fix for net.0 packets. don't 37 * Michael Zuelsdorff : fix for net.0 packets. don't
38 * allow illegal ether/tokentalk 38 * allow illegal ether/tokentalk
39 * port assignment. we lose a 39 * port assignment. we lose a
40 * valid localtalk port as a 40 * valid localtalk port as a
41 * result. 41 * result.
42 * Arnaldo C. de Melo : Cleanup, in preparation for 42 * Arnaldo C. de Melo : Cleanup, in preparation for
43 * shared skb support 8) 43 * shared skb support 8)
@@ -48,7 +48,7 @@
48 * modify it under the terms of the GNU General Public License 48 * modify it under the terms of the GNU General Public License
49 * as published by the Free Software Foundation; either version 49 * as published by the Free Software Foundation; either version
50 * 2 of the License, or (at your option) any later version. 50 * 2 of the License, or (at your option) any later version.
51 * 51 *
52 */ 52 */
53 53
54#include <linux/capability.h> 54#include <linux/capability.h>
@@ -100,17 +100,17 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
100 if (to->sat_port != at->src_port) 100 if (to->sat_port != at->src_port)
101 continue; 101 continue;
102 102
103 if (to->sat_addr.s_net == ATADDR_ANYNET && 103 if (to->sat_addr.s_net == ATADDR_ANYNET &&
104 to->sat_addr.s_node == ATADDR_BCAST) 104 to->sat_addr.s_node == ATADDR_BCAST)
105 goto found; 105 goto found;
106 106
107 if (to->sat_addr.s_net == at->src_net && 107 if (to->sat_addr.s_net == at->src_net &&
108 (to->sat_addr.s_node == at->src_node || 108 (to->sat_addr.s_node == at->src_node ||
109 to->sat_addr.s_node == ATADDR_BCAST || 109 to->sat_addr.s_node == ATADDR_BCAST ||
110 to->sat_addr.s_node == ATADDR_ANYNODE)) 110 to->sat_addr.s_node == ATADDR_ANYNODE))
111 goto found; 111 goto found;
112 112
113 /* XXXX.0 -- we got a request for this router. make sure 113 /* XXXX.0 -- we got a request for this router. make sure
114 * that the node is appropriately set. */ 114 * that the node is appropriately set. */
115 if (to->sat_addr.s_node == ATADDR_ANYNODE && 115 if (to->sat_addr.s_node == ATADDR_ANYNODE &&
116 to->sat_addr.s_net != ATADDR_ANYNET && 116 to->sat_addr.s_net != ATADDR_ANYNET &&
@@ -314,7 +314,7 @@ static int atif_proxy_probe_device(struct atalk_iface *atif,
314 314
315 if (probe_node == ATADDR_ANYNODE) 315 if (probe_node == ATADDR_ANYNODE)
316 probe_node = jiffies & 0xFF; 316 probe_node = jiffies & 0xFF;
317 317
318 /* Scan the networks */ 318 /* Scan the networks */
319 for (netct = 0; netct <= netrange; netct++) { 319 for (netct = 0; netct <= netrange; netct++) {
320 /* Sweep the available nodes from a given start */ 320 /* Sweep the available nodes from a given start */
@@ -417,7 +417,7 @@ static struct atalk_iface *atalk_find_interface(__be16 net, int node)
417 if (node == ATADDR_ANYNODE && net != ATADDR_ANYNET && 417 if (node == ATADDR_ANYNODE && net != ATADDR_ANYNET &&
418 ntohs(iface->nets.nr_firstnet) <= ntohs(net) && 418 ntohs(iface->nets.nr_firstnet) <= ntohs(net) &&
419 ntohs(net) <= ntohs(iface->nets.nr_lastnet)) 419 ntohs(net) <= ntohs(iface->nets.nr_lastnet))
420 break; 420 break;
421 } 421 }
422 read_unlock_bh(&atalk_interfaces_lock); 422 read_unlock_bh(&atalk_interfaces_lock);
423 return iface; 423 return iface;
@@ -432,13 +432,13 @@ static struct atalk_iface *atalk_find_interface(__be16 net, int node)
432static struct atalk_route *atrtr_find(struct atalk_addr *target) 432static struct atalk_route *atrtr_find(struct atalk_addr *target)
433{ 433{
434 /* 434 /*
435 * we must search through all routes unless we find a 435 * we must search through all routes unless we find a
436 * host route, because some host routes might overlap 436 * host route, because some host routes might overlap
437 * network routes 437 * network routes
438 */ 438 */
439 struct atalk_route *net_route = NULL; 439 struct atalk_route *net_route = NULL;
440 struct atalk_route *r; 440 struct atalk_route *r;
441 441
442 read_lock_bh(&atalk_routes_lock); 442 read_lock_bh(&atalk_routes_lock);
443 for (r = atalk_routes; r; r = r->next) { 443 for (r = atalk_routes; r; r = r->next) {
444 if (!(r->flags & RTF_UP)) 444 if (!(r->flags & RTF_UP))
@@ -460,8 +460,8 @@ static struct atalk_route *atrtr_find(struct atalk_addr *target)
460 net_route = r; 460 net_route = r;
461 } 461 }
462 } 462 }
463 463
464 /* 464 /*
465 * if we found a network route but not a direct host 465 * if we found a network route but not a direct host
466 * route, then return it 466 * route, then return it
467 */ 467 */
@@ -540,15 +540,15 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
540 for (iface = atalk_interfaces; iface; iface = iface->next) { 540 for (iface = atalk_interfaces; iface; iface = iface->next) {
541 if (!riface && 541 if (!riface &&
542 ntohs(ga->sat_addr.s_net) >= 542 ntohs(ga->sat_addr.s_net) >=
543 ntohs(iface->nets.nr_firstnet) && 543 ntohs(iface->nets.nr_firstnet) &&
544 ntohs(ga->sat_addr.s_net) <= 544 ntohs(ga->sat_addr.s_net) <=
545 ntohs(iface->nets.nr_lastnet)) 545 ntohs(iface->nets.nr_lastnet))
546 riface = iface; 546 riface = iface;
547 547
548 if (ga->sat_addr.s_net == iface->address.s_net && 548 if (ga->sat_addr.s_net == iface->address.s_net &&
549 ga->sat_addr.s_node == iface->address.s_node) 549 ga->sat_addr.s_node == iface->address.s_node)
550 riface = iface; 550 riface = iface;
551 } 551 }
552 read_unlock_bh(&atalk_interfaces_lock); 552 read_unlock_bh(&atalk_interfaces_lock);
553 553
554 retval = -ENETUNREACH; 554 retval = -ENETUNREACH;
@@ -649,7 +649,7 @@ static int ddp_device_event(struct notifier_block *this, unsigned long event,
649{ 649{
650 if (event == NETDEV_DOWN) 650 if (event == NETDEV_DOWN)
651 /* Discard any use of this */ 651 /* Discard any use of this */
652 atalk_dev_down(ptr); 652 atalk_dev_down(ptr);
653 653
654 return NOTIFY_DONE; 654 return NOTIFY_DONE;
655} 655}
@@ -701,13 +701,13 @@ static int atif_ioctl(int cmd, void __user *arg)
701 */ 701 */
702 if ((dev->flags & IFF_POINTOPOINT) && 702 if ((dev->flags & IFF_POINTOPOINT) &&
703 atalk_find_interface(sa->sat_addr.s_net, 703 atalk_find_interface(sa->sat_addr.s_net,
704 sa->sat_addr.s_node)) { 704 sa->sat_addr.s_node)) {
705 printk(KERN_DEBUG "AppleTalk: point-to-point " 705 printk(KERN_DEBUG "AppleTalk: point-to-point "
706 "interface added with " 706 "interface added with "
707 "existing address\n"); 707 "existing address\n");
708 add_route = 0; 708 add_route = 0;
709 } 709 }
710 710
711 /* 711 /*
712 * Phase 1 is fine on LocalTalk but we don't do 712 * Phase 1 is fine on LocalTalk but we don't do
713 * EtherTalk phase 1. Anyone wanting to add it go ahead. 713 * EtherTalk phase 1. Anyone wanting to add it go ahead.
@@ -797,78 +797,78 @@ static int atif_ioctl(int cmd, void __user *arg)
797 sa->sat_addr.s_node = ATADDR_BCAST; 797 sa->sat_addr.s_node = ATADDR_BCAST;
798 break; 798 break;
799 799
800 case SIOCATALKDIFADDR: 800 case SIOCATALKDIFADDR:
801 case SIOCDIFADDR: 801 case SIOCDIFADDR:
802 if (!capable(CAP_NET_ADMIN)) 802 if (!capable(CAP_NET_ADMIN))
803 return -EPERM; 803 return -EPERM;
804 if (sa->sat_family != AF_APPLETALK) 804 if (sa->sat_family != AF_APPLETALK)
805 return -EINVAL; 805 return -EINVAL;
806 atalk_dev_down(dev); 806 atalk_dev_down(dev);
807 break; 807 break;
808 808
809 case SIOCSARP: 809 case SIOCSARP:
810 if (!capable(CAP_NET_ADMIN)) 810 if (!capable(CAP_NET_ADMIN))
811 return -EPERM; 811 return -EPERM;
812 if (sa->sat_family != AF_APPLETALK) 812 if (sa->sat_family != AF_APPLETALK)
813 return -EINVAL; 813 return -EINVAL;
814 if (!atif) 814 if (!atif)
815 return -EADDRNOTAVAIL; 815 return -EADDRNOTAVAIL;
816 816
817 /* 817 /*
818 * for now, we only support proxy AARP on ELAP; 818 * for now, we only support proxy AARP on ELAP;
819 * we should be able to do it for LocalTalk, too. 819 * we should be able to do it for LocalTalk, too.
820 */ 820 */
821 if (dev->type != ARPHRD_ETHER) 821 if (dev->type != ARPHRD_ETHER)
822 return -EPROTONOSUPPORT; 822 return -EPROTONOSUPPORT;
823 823
824 /* 824 /*
825 * atif points to the current interface on this network; 825 * atif points to the current interface on this network;
826 * we aren't concerned about its current status (at 826 * we aren't concerned about its current status (at
827 * least for now), but it has all the settings about 827 * least for now), but it has all the settings about
828 * the network we're going to probe. Consequently, it 828 * the network we're going to probe. Consequently, it
829 * must exist. 829 * must exist.
830 */ 830 */
831 if (!atif) 831 if (!atif)
832 return -EADDRNOTAVAIL; 832 return -EADDRNOTAVAIL;
833 833
834 nr = (struct atalk_netrange *)&(atif->nets); 834 nr = (struct atalk_netrange *)&(atif->nets);
835 /* 835 /*
836 * Phase 1 is fine on Localtalk but we don't do 836 * Phase 1 is fine on Localtalk but we don't do
837 * Ethertalk phase 1. Anyone wanting to add it go ahead. 837 * Ethertalk phase 1. Anyone wanting to add it go ahead.
838 */ 838 */
839 if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2) 839 if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
840 return -EPROTONOSUPPORT; 840 return -EPROTONOSUPPORT;
841 841
842 if (sa->sat_addr.s_node == ATADDR_BCAST || 842 if (sa->sat_addr.s_node == ATADDR_BCAST ||
843 sa->sat_addr.s_node == 254) 843 sa->sat_addr.s_node == 254)
844 return -EINVAL; 844 return -EINVAL;
845 845
846 /*
847 * Check if the chosen address is used. If so we
848 * error and ATCP will try another.
849 */
850 if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0)
851 return -EADDRINUSE;
852
853 /* 846 /*
854 * We now have an address on the local network, and 847 * Check if the chosen address is used. If so we
848 * error and ATCP will try another.
849 */
850 if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0)
851 return -EADDRINUSE;
852
853 /*
854 * We now have an address on the local network, and
855 * the AARP code will defend it for us until we take it 855 * the AARP code will defend it for us until we take it
856 * down. We don't set up any routes right now, because 856 * down. We don't set up any routes right now, because
857 * ATCP will install them manually via SIOCADDRT. 857 * ATCP will install them manually via SIOCADDRT.
858 */ 858 */
859 break; 859 break;
860 860
861 case SIOCDARP: 861 case SIOCDARP:
862 if (!capable(CAP_NET_ADMIN)) 862 if (!capable(CAP_NET_ADMIN))
863 return -EPERM; 863 return -EPERM;
864 if (sa->sat_family != AF_APPLETALK) 864 if (sa->sat_family != AF_APPLETALK)
865 return -EINVAL; 865 return -EINVAL;
866 if (!atif) 866 if (!atif)
867 return -EADDRNOTAVAIL; 867 return -EADDRNOTAVAIL;
868 868
869 /* give to aarp module to remove proxy entry */ 869 /* give to aarp module to remove proxy entry */
870 aarp_proxy_remove(atif->dev, &(sa->sat_addr)); 870 aarp_proxy_remove(atif->dev, &(sa->sat_addr));
871 return 0; 871 return 0;
872 } 872 }
873 873
874 return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0; 874 return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0;
@@ -899,7 +899,7 @@ static int atrtr_ioctl(unsigned int cmd, void __user *arg)
899 dev = __dev_get_by_name(name); 899 dev = __dev_get_by_name(name);
900 if (!dev) 900 if (!dev)
901 return -ENODEV; 901 return -ENODEV;
902 } 902 }
903 return atrtr_create(&rt, dev); 903 return atrtr_create(&rt, dev);
904 } 904 }
905 } 905 }
@@ -917,7 +917,7 @@ static int atrtr_ioctl(unsigned int cmd, void __user *arg)
917 * Checksum: This is 'optional'. It's quite likely also a good 917 * Checksum: This is 'optional'. It's quite likely also a good
918 * candidate for assembler hackery 8) 918 * candidate for assembler hackery 8)
919 */ 919 */
920static unsigned long atalk_sum_partial(const unsigned char *data, 920static unsigned long atalk_sum_partial(const unsigned char *data,
921 int len, unsigned long sum) 921 int len, unsigned long sum)
922{ 922{
923 /* This ought to be unwrapped neatly. I'll trust gcc for now */ 923 /* This ought to be unwrapped neatly. I'll trust gcc for now */
@@ -945,7 +945,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
945 if (copy > len) 945 if (copy > len)
946 copy = len; 946 copy = len;
947 sum = atalk_sum_partial(skb->data + offset, copy, sum); 947 sum = atalk_sum_partial(skb->data + offset, copy, sum);
948 if ( (len -= copy) == 0) 948 if ( (len -= copy) == 0)
949 return sum; 949 return sum;
950 950
951 offset += copy; 951 offset += copy;
@@ -1031,7 +1031,7 @@ static int atalk_create(struct socket *sock, int protocol)
1031 1031
1032 /* 1032 /*
1033 * We permit SOCK_DGRAM and RAW is an extension. It is trivial to do 1033 * We permit SOCK_DGRAM and RAW is an extension. It is trivial to do
1034 * and gives you the full ELAP frame. Should be handy for CAP 8) 1034 * and gives you the full ELAP frame. Should be handy for CAP 8)
1035 */ 1035 */
1036 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) 1036 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1037 goto out; 1037 goto out;
@@ -1196,14 +1196,14 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
1196 1196
1197 if (addr->sat_addr.s_node == ATADDR_BCAST && 1197 if (addr->sat_addr.s_node == ATADDR_BCAST &&
1198 !sock_flag(sk, SOCK_BROADCAST)) { 1198 !sock_flag(sk, SOCK_BROADCAST)) {
1199#if 1 1199#if 1
1200 printk(KERN_WARNING "%s is broken and did not set " 1200 printk(KERN_WARNING "%s is broken and did not set "
1201 "SO_BROADCAST. It will break when 2.2 is " 1201 "SO_BROADCAST. It will break when 2.2 is "
1202 "released.\n", 1202 "released.\n",
1203 current->comm); 1203 current->comm);
1204#else 1204#else
1205 return -EACCES; 1205 return -EACCES;
1206#endif 1206#endif
1207 } 1207 }
1208 1208
1209 if (sock_flag(sk, SOCK_ZAPPED)) 1209 if (sock_flag(sk, SOCK_ZAPPED))
@@ -1260,27 +1260,27 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
1260#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE) 1260#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE)
1261static __inline__ int is_ip_over_ddp(struct sk_buff *skb) 1261static __inline__ int is_ip_over_ddp(struct sk_buff *skb)
1262{ 1262{
1263 return skb->data[12] == 22; 1263 return skb->data[12] == 22;
1264} 1264}
1265 1265
1266static int handle_ip_over_ddp(struct sk_buff *skb) 1266static int handle_ip_over_ddp(struct sk_buff *skb)
1267{ 1267{
1268 struct net_device *dev = __dev_get_by_name("ipddp0"); 1268 struct net_device *dev = __dev_get_by_name("ipddp0");
1269 struct net_device_stats *stats; 1269 struct net_device_stats *stats;
1270 1270
1271 /* This needs to be able to handle ipddp"N" devices */ 1271 /* This needs to be able to handle ipddp"N" devices */
1272 if (!dev) 1272 if (!dev)
1273 return -ENODEV; 1273 return -ENODEV;
1274 1274
1275 skb->protocol = htons(ETH_P_IP); 1275 skb->protocol = htons(ETH_P_IP);
1276 skb_pull(skb, 13); 1276 skb_pull(skb, 13);
1277 skb->dev = dev; 1277 skb->dev = dev;
1278 skb->h.raw = skb->data; 1278 skb->h.raw = skb->data;
1279 1279
1280 stats = dev->priv; 1280 stats = dev->priv;
1281 stats->rx_packets++; 1281 stats->rx_packets++;
1282 stats->rx_bytes += skb->len + 13; 1282 stats->rx_bytes += skb->len + 13;
1283 netif_rx(skb); /* Send the SKB up to a higher place. */ 1283 netif_rx(skb); /* Send the SKB up to a higher place. */
1284 return 0; 1284 return 0;
1285} 1285}
1286#else 1286#else
@@ -1298,7 +1298,7 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
1298 1298
1299 /* 1299 /*
1300 * Don't route multicast, etc., packets, or packets sent to "this 1300 * Don't route multicast, etc., packets, or packets sent to "this
1301 * network" 1301 * network"
1302 */ 1302 */
1303 if (skb->pkt_type != PACKET_HOST || !ddp->deh_dnet) { 1303 if (skb->pkt_type != PACKET_HOST || !ddp->deh_dnet) {
1304 /* 1304 /*
@@ -1335,8 +1335,8 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
1335 ta.s_node = rt->gateway.s_node; 1335 ta.s_node = rt->gateway.s_node;
1336 } 1336 }
1337 1337
1338 /* Fix up skb->len field */ 1338 /* Fix up skb->len field */
1339 skb_trim(skb, min_t(unsigned int, origlen, 1339 skb_trim(skb, min_t(unsigned int, origlen,
1340 (rt->dev->hard_header_len + 1340 (rt->dev->hard_header_len +
1341 ddp_dl->header_length + (len_hops & 1023)))); 1341 ddp_dl->header_length + (len_hops & 1023))));
1342 1342
@@ -1358,12 +1358,12 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
1358 /* 22 bytes - 12 ether, 2 len, 3 802.2 5 snap */ 1358 /* 22 bytes - 12 ether, 2 len, 3 802.2 5 snap */
1359 struct sk_buff *nskb = skb_realloc_headroom(skb, 32); 1359 struct sk_buff *nskb = skb_realloc_headroom(skb, 32);
1360 kfree_skb(skb); 1360 kfree_skb(skb);
1361 if (!nskb) 1361 if (!nskb)
1362 goto out; 1362 goto out;
1363 skb = nskb; 1363 skb = nskb;
1364 } else 1364 } else
1365 skb = skb_unshare(skb, GFP_ATOMIC); 1365 skb = skb_unshare(skb, GFP_ATOMIC);
1366 1366
1367 /* 1367 /*
1368 * If the buffer didn't vanish into the lack of space bitbucket we can 1368 * If the buffer didn't vanish into the lack of space bitbucket we can
1369 * send it. 1369 * send it.
@@ -1395,13 +1395,13 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1395 struct sock *sock; 1395 struct sock *sock;
1396 struct atalk_iface *atif; 1396 struct atalk_iface *atif;
1397 struct sockaddr_at tosat; 1397 struct sockaddr_at tosat;
1398 int origlen; 1398 int origlen;
1399 __u16 len_hops; 1399 __u16 len_hops;
1400 1400
1401 /* Don't mangle buffer if shared */ 1401 /* Don't mangle buffer if shared */
1402 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 1402 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
1403 goto out; 1403 goto out;
1404 1404
1405 /* Size check and make sure header is contiguous */ 1405 /* Size check and make sure header is contiguous */
1406 if (!pskb_may_pull(skb, sizeof(*ddp))) 1406 if (!pskb_may_pull(skb, sizeof(*ddp)))
1407 goto freeit; 1407 goto freeit;
@@ -1490,7 +1490,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
1490 goto freeit; 1490 goto freeit;
1491 1491
1492 /* Don't mangle buffer if shared */ 1492 /* Don't mangle buffer if shared */
1493 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 1493 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
1494 return 0; 1494 return 0;
1495 1495
1496 /* 1496 /*
@@ -1501,11 +1501,11 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
1501 1501
1502 /* Now fill in the long header */ 1502 /* Now fill in the long header */
1503 1503
1504 /* 1504 /*
1505 * These two first. The mac overlays the new source/dest 1505 * These two first. The mac overlays the new source/dest
1506 * network information so we MUST copy these before 1506 * network information so we MUST copy these before
1507 * we write the network numbers ! 1507 * we write the network numbers !
1508 */ 1508 */
1509 1509
1510 ddp->deh_dnode = skb->mac.raw[0]; /* From physical header */ 1510 ddp->deh_dnode = skb->mac.raw[0]; /* From physical header */
1511 ddp->deh_snode = skb->mac.raw[1]; /* From physical header */ 1511 ddp->deh_snode = skb->mac.raw[1]; /* From physical header */
@@ -1605,7 +1605,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1605 skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); 1605 skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
1606 if (!skb) 1606 if (!skb)
1607 return err; 1607 return err;
1608 1608
1609 skb->sk = sk; 1609 skb->sk = sk;
1610 skb_reserve(skb, ddp_dl->header_length); 1610 skb_reserve(skb, ddp_dl->header_length);
1611 skb_reserve(skb, dev->hard_header_len); 1611 skb_reserve(skb, dev->hard_header_len);
diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c
index 1237e208e246..9e4dffc1e423 100644
--- a/net/appletalk/dev.c
+++ b/net/appletalk/dev.c
@@ -15,14 +15,14 @@ static int ltalk_change_mtu(struct net_device *dev, int mtu)
15} 15}
16 16
17static int ltalk_mac_addr(struct net_device *dev, void *addr) 17static int ltalk_mac_addr(struct net_device *dev, void *addr)
18{ 18{
19 return -EINVAL; 19 return -EINVAL;
20} 20}
21 21
22static void ltalk_setup(struct net_device *dev) 22static void ltalk_setup(struct net_device *dev)
23{ 23{
24 /* Fill in the fields of the device structure with localtalk-generic values. */ 24 /* Fill in the fields of the device structure with localtalk-generic values. */
25 25
26 dev->change_mtu = ltalk_change_mtu; 26 dev->change_mtu = ltalk_change_mtu;
27 dev->hard_header = NULL; 27 dev->hard_header = NULL;
28 dev->rebuild_header = NULL; 28 dev->rebuild_header = NULL;
@@ -34,8 +34,8 @@ static void ltalk_setup(struct net_device *dev)
34 dev->hard_header_len = LTALK_HLEN; 34 dev->hard_header_len = LTALK_HLEN;
35 dev->mtu = LTALK_MTU; 35 dev->mtu = LTALK_MTU;
36 dev->addr_len = LTALK_ALEN; 36 dev->addr_len = LTALK_ALEN;
37 dev->tx_queue_len = 10; 37 dev->tx_queue_len = 10;
38 38
39 dev->broadcast[0] = 0xFF; 39 dev->broadcast[0] = 0xFF;
40 40
41 dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP; 41 dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP;
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index 62f6ed1f2f98..f094a0879c16 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -30,15 +30,15 @@ static ssize_t show_address(struct class_device *cdev, char *buf)
30 30
31static ssize_t show_atmaddress(struct class_device *cdev, char *buf) 31static ssize_t show_atmaddress(struct class_device *cdev, char *buf)
32{ 32{
33 unsigned long flags; 33 unsigned long flags;
34 char *pos = buf; 34 char *pos = buf;
35 struct atm_dev *adev = to_atm_dev(cdev); 35 struct atm_dev *adev = to_atm_dev(cdev);
36 struct atm_dev_addr *aaddr; 36 struct atm_dev_addr *aaddr;
37 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin; 37 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin;
38 int i, j; 38 int i, j;
39 39
40 spin_lock_irqsave(&adev->lock, flags); 40 spin_lock_irqsave(&adev->lock, flags);
41 list_for_each_entry(aaddr, &adev->local, entry) { 41 list_for_each_entry(aaddr, &adev->local, entry) {
42 for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 42 for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
43 if (j == *fmt) { 43 if (j == *fmt) {
44 pos += sprintf(pos, "."); 44 pos += sprintf(pos, ".");
@@ -49,7 +49,7 @@ static ssize_t show_atmaddress(struct class_device *cdev, char *buf)
49 } 49 }
50 pos += sprintf(pos, "\n"); 50 pos += sprintf(pos, "\n");
51 } 51 }
52 spin_unlock_irqrestore(&adev->lock, flags); 52 spin_unlock_irqrestore(&adev->lock, flags);
53 53
54 return pos - buf; 54 return pos - buf;
55} 55}
@@ -61,7 +61,7 @@ static ssize_t show_carrier(struct class_device *cdev, char *buf)
61 61
62 pos += sprintf(pos, "%d\n", 62 pos += sprintf(pos, "%d\n",
63 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1); 63 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
64 64
65 return pos - buf; 65 return pos - buf;
66} 66}
67 67
@@ -86,7 +86,7 @@ static ssize_t show_link_rate(struct class_device *cdev, char *buf)
86 link_rate = adev->link_rate * 8 * 53; 86 link_rate = adev->link_rate * 8 * 53;
87 } 87 }
88 pos += sprintf(pos, "%d\n", link_rate); 88 pos += sprintf(pos, "%d\n", link_rate);
89 89
90 return pos - buf; 90 return pos - buf;
91} 91}
92 92
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 83a1c1b1d6cd..f949b5c74ec1 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -182,7 +182,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev,
182 ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; 182 ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
183 DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); 183 DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
184 if (!atm_may_send(atmvcc, skb->truesize)) { 184 if (!atm_may_send(atmvcc, skb->truesize)) {
185 /* we free this here for now, because we cannot know in a higher 185 /* we free this here for now, because we cannot know in a higher
186 layer whether the skb point it supplied wasn't freed yet. 186 layer whether the skb point it supplied wasn't freed yet.
187 now, it always is. 187 now, it always is.
188 */ 188 */
@@ -718,7 +718,7 @@ static void *br2684_seq_next(struct seq_file *seq, void *v, loff_t *pos)
718 718
719 ++*pos; 719 ++*pos;
720 720
721 brd = list_entry(brd->br2684_devs.next, 721 brd = list_entry(brd->br2684_devs.next,
722 struct br2684_dev, br2684_devs); 722 struct br2684_dev, br2684_devs);
723 return (&brd->br2684_devs != &br2684_devs) ? brd : NULL; 723 return (&brd->br2684_devs != &br2684_devs) ? brd : NULL;
724} 724}
diff --git a/net/atm/common.c b/net/atm/common.c
index a2878e92c3ab..282d761454ba 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -109,11 +109,11 @@ static inline int vcc_writable(struct sock *sk)
109 struct atm_vcc *vcc = atm_sk(sk); 109 struct atm_vcc *vcc = atm_sk(sk);
110 110
111 return (vcc->qos.txtp.max_sdu + 111 return (vcc->qos.txtp.max_sdu +
112 atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; 112 atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
113} 113}
114 114
115static void vcc_write_space(struct sock *sk) 115static void vcc_write_space(struct sock *sk)
116{ 116{
117 read_lock(&sk->sk_callback_lock); 117 read_lock(&sk->sk_callback_lock);
118 118
119 if (vcc_writable(sk)) { 119 if (vcc_writable(sk)) {
@@ -131,7 +131,7 @@ static struct proto vcc_proto = {
131 .owner = THIS_MODULE, 131 .owner = THIS_MODULE,
132 .obj_size = sizeof(struct atm_vcc), 132 .obj_size = sizeof(struct atm_vcc),
133}; 133};
134 134
135int vcc_create(struct socket *sock, int protocol, int family) 135int vcc_create(struct socket *sock, int protocol, int family)
136{ 136{
137 struct sock *sk; 137 struct sock *sk;
@@ -359,7 +359,7 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
359 return error; 359 return error;
360 vcc->dev = dev; 360 vcc->dev = dev;
361 write_lock_irq(&vcc_sklist_lock); 361 write_lock_irq(&vcc_sklist_lock);
362 if (test_bit(ATM_DF_REMOVED, &dev->flags) || 362 if (test_bit(ATM_DF_REMOVED, &dev->flags) ||
363 (error = find_ci(vcc, &vpi, &vci))) { 363 (error = find_ci(vcc, &vpi, &vci))) {
364 write_unlock_irq(&vcc_sklist_lock); 364 write_unlock_irq(&vcc_sklist_lock);
365 goto fail_module_put; 365 goto fail_module_put;
@@ -494,20 +494,20 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
494 if (!skb) 494 if (!skb)
495 return error; 495 return error;
496 496
497 copied = skb->len; 497 copied = skb->len;
498 if (copied > size) { 498 if (copied > size) {
499 copied = size; 499 copied = size;
500 msg->msg_flags |= MSG_TRUNC; 500 msg->msg_flags |= MSG_TRUNC;
501 } 501 }
502 502
503 error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 503 error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
504 if (error) 504 if (error)
505 return error; 505 return error;
506 sock_recv_timestamp(msg, sk, skb); 506 sock_recv_timestamp(msg, sk, skb);
507 DPRINTK("RcvM %d -= %d\n", atomic_read(&sk->rmem_alloc), skb->truesize); 507 DPRINTK("RcvM %d -= %d\n", atomic_read(&sk->rmem_alloc), skb->truesize);
508 atm_return(vcc, skb->truesize); 508 atm_return(vcc, skb->truesize);
509 skb_free_datagram(sk, skb); 509 skb_free_datagram(sk, skb);
510 return copied; 510 return copied;
511} 511}
512 512
513 513
@@ -675,7 +675,7 @@ static int check_qos(struct atm_qos *qos)
675 int error; 675 int error;
676 676
677 if (!qos->txtp.traffic_class && !qos->rxtp.traffic_class) 677 if (!qos->txtp.traffic_class && !qos->rxtp.traffic_class)
678 return -EINVAL; 678 return -EINVAL;
679 if (qos->txtp.traffic_class != qos->rxtp.traffic_class && 679 if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
680 qos->txtp.traffic_class && qos->rxtp.traffic_class && 680 qos->txtp.traffic_class && qos->rxtp.traffic_class &&
681 qos->txtp.traffic_class != ATM_ANYCLASS && 681 qos->txtp.traffic_class != ATM_ANYCLASS &&
@@ -786,11 +786,11 @@ static int __init atm_init(void)
786 printk(KERN_ERR "atmsvc_init() failed with %d\n", error); 786 printk(KERN_ERR "atmsvc_init() failed with %d\n", error);
787 goto out_atmpvc_exit; 787 goto out_atmpvc_exit;
788 } 788 }
789 if ((error = atm_proc_init()) < 0) { 789 if ((error = atm_proc_init()) < 0) {
790 printk(KERN_ERR "atm_proc_init() failed with %d\n",error); 790 printk(KERN_ERR "atm_proc_init() failed with %d\n",error);
791 goto out_atmsvc_exit; 791 goto out_atmsvc_exit;
792 } 792 }
793 if ((error = atm_sysfs_init()) < 0) { 793 if ((error = atm_sysfs_init()) < 0) {
794 printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error); 794 printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error);
795 goto out_atmproc_exit; 795 goto out_atmproc_exit;
796 } 796 }
diff --git a/net/atm/common.h b/net/atm/common.h
index a422da7788fb..ad78c9e1117d 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -1,5 +1,5 @@
1/* net/atm/common.h - ATM sockets (common part for PVC and SVC) */ 1/* net/atm/common.h - ATM sockets (common part for PVC and SVC) */
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5 5
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 8c2022c3e81d..8ccee4591f65 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -76,7 +76,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
76 } 76 }
77 skb = skb_peek(&sk->sk_receive_queue); 77 skb = skb_peek(&sk->sk_receive_queue);
78 error = put_user(skb ? skb->len : 0, 78 error = put_user(skb ? skb->len : 0,
79 (int __user *)argp) ? -EFAULT : 0; 79 (int __user *)argp) ? -EFAULT : 0;
80 goto done; 80 goto done;
81 } 81 }
82 case SIOCGSTAMP: /* borrowed from IP */ 82 case SIOCGSTAMP: /* borrowed from IP */
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 3fc0abeeaf34..57dc2ab1b65d 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * lec.c: Lan Emulation driver 2 * lec.c: Lan Emulation driver
3 * 3 *
4 * Marko Kiiskila <mkiiskila@yahoo.com> 4 * Marko Kiiskila <mkiiskila@yahoo.com>
5 */ 5 */
@@ -1457,7 +1457,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr,
1457static void lec_arp_check_expire(struct work_struct *work); 1457static void lec_arp_check_expire(struct work_struct *work);
1458static void lec_arp_expire_arp(unsigned long data); 1458static void lec_arp_expire_arp(unsigned long data);
1459 1459
1460/* 1460/*
1461 * Arp table funcs 1461 * Arp table funcs
1462 */ 1462 */
1463 1463
@@ -1473,9 +1473,9 @@ static void lec_arp_init(struct lec_priv *priv)
1473 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1473 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1474 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1474 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1475 } 1475 }
1476 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1476 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1477 INIT_HLIST_HEAD(&priv->lec_no_forward); 1477 INIT_HLIST_HEAD(&priv->lec_no_forward);
1478 INIT_HLIST_HEAD(&priv->mcast_fwds); 1478 INIT_HLIST_HEAD(&priv->mcast_fwds);
1479 spin_lock_init(&priv->lec_arp_lock); 1479 spin_lock_init(&priv->lec_arp_lock);
1480 INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); 1480 INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
1481 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); 1481 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
@@ -1770,7 +1770,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1770 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 1770 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1771} 1771}
1772 1772
1773/* 1773/*
1774 * Find entry by mac_address 1774 * Find entry by mac_address
1775 */ 1775 */
1776static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 1776static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
@@ -1949,7 +1949,7 @@ restart:
1949 1949
1950/* 1950/*
1951 * Try to find vcc where mac_address is attached. 1951 * Try to find vcc where mac_address is attached.
1952 * 1952 *
1953 */ 1953 */
1954static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 1954static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1955 unsigned char *mac_to_find, int is_rdesc, 1955 unsigned char *mac_to_find, int is_rdesc,
@@ -2075,7 +2075,7 @@ lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr,
2075} 2075}
2076 2076
2077/* 2077/*
2078 * Notifies: Response to arp_request (atm_addr != NULL) 2078 * Notifies: Response to arp_request (atm_addr != NULL)
2079 */ 2079 */
2080static void 2080static void
2081lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, 2081lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr,
@@ -2176,7 +2176,7 @@ out:
2176} 2176}
2177 2177
2178/* 2178/*
2179 * Notifies: Vcc setup ready 2179 * Notifies: Vcc setup ready
2180 */ 2180 */
2181static void 2181static void
2182lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, 2182lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data,
@@ -2380,7 +2380,7 @@ lec_set_flush_tran_id(struct lec_priv *priv,
2380 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { 2380 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
2381 entry->flush_tran_id = tran_id; 2381 entry->flush_tran_id = tran_id;
2382 DPRINTK("Set flush transaction id to %lx for %p\n", 2382 DPRINTK("Set flush transaction id to %lx for %p\n",
2383 tran_id, entry); 2383 tran_id, entry);
2384 } 2384 }
2385 } 2385 }
2386 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2386 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 99136babd535..b41cda7ea1e1 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -52,12 +52,12 @@ struct lane2_ops {
52 52
53/* 53/*
54 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType 54 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
55 * frames. 55 * frames.
56 * 56 *
57 * 1. Dix Ethernet EtherType frames encoded by placing EtherType 57 * 1. Dix Ethernet EtherType frames encoded by placing EtherType
58 * field in h_type field. Data follows immediatelly after header. 58 * field in h_type field. Data follows immediatelly after header.
59 * 2. LLC Data frames whose total length, including LLC field and data, 59 * 2. LLC Data frames whose total length, including LLC field and data,
60 * but not padding required to meet the minimum data frame length, 60 * but not padding required to meet the minimum data frame length,
61 * is less than 1536(0x0600) MUST be encoded by placing that length 61 * is less than 1536(0x0600) MUST be encoded by placing that length
62 * in the h_type field. The LLC field follows header immediatelly. 62 * in the h_type field. The LLC field follows header immediatelly.
63 * 3. LLC data frames longer than this maximum MUST be encoded by placing 63 * 3. LLC data frames longer than this maximum MUST be encoded by placing
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index c18f73715ef9..cb3c004ff022 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -32,7 +32,7 @@
32#include "resources.h" 32#include "resources.h"
33 33
34/* 34/*
35 * mpc.c: Implementation of MPOA client kernel part 35 * mpc.c: Implementation of MPOA client kernel part
36 */ 36 */
37 37
38#if 0 38#if 0
@@ -80,17 +80,17 @@ static struct llc_snap_hdr llc_snap_mpoa_ctrl = {
80 0xaa, 0xaa, 0x03, 80 0xaa, 0xaa, 0x03,
81 {0x00, 0x00, 0x5e}, 81 {0x00, 0x00, 0x5e},
82 {0x00, 0x03} /* For MPOA control PDUs */ 82 {0x00, 0x03} /* For MPOA control PDUs */
83}; 83};
84static struct llc_snap_hdr llc_snap_mpoa_data = { 84static struct llc_snap_hdr llc_snap_mpoa_data = {
85 0xaa, 0xaa, 0x03, 85 0xaa, 0xaa, 0x03,
86 {0x00, 0x00, 0x00}, 86 {0x00, 0x00, 0x00},
87 {0x08, 0x00} /* This is for IP PDUs only */ 87 {0x08, 0x00} /* This is for IP PDUs only */
88}; 88};
89static struct llc_snap_hdr llc_snap_mpoa_data_tagged = { 89static struct llc_snap_hdr llc_snap_mpoa_data_tagged = {
90 0xaa, 0xaa, 0x03, 90 0xaa, 0xaa, 0x03,
91 {0x00, 0x00, 0x00}, 91 {0x00, 0x00, 0x00},
92 {0x88, 0x4c} /* This is for tagged data PDUs */ 92 {0x88, 0x4c} /* This is for tagged data PDUs */
93}; 93};
94 94
95static struct notifier_block mpoa_notifier = { 95static struct notifier_block mpoa_notifier = {
96 mpoa_event_listener, 96 mpoa_event_listener,
@@ -106,12 +106,12 @@ static DEFINE_TIMER(mpc_timer, NULL, 0, 0);
106static struct mpoa_client *find_mpc_by_itfnum(int itf) 106static struct mpoa_client *find_mpc_by_itfnum(int itf)
107{ 107{
108 struct mpoa_client *mpc; 108 struct mpoa_client *mpc;
109 109
110 mpc = mpcs; /* our global linked list */ 110 mpc = mpcs; /* our global linked list */
111 while (mpc != NULL) { 111 while (mpc != NULL) {
112 if (mpc->dev_num == itf) 112 if (mpc->dev_num == itf)
113 return mpc; 113 return mpc;
114 mpc = mpc->next; 114 mpc = mpc->next;
115 } 115 }
116 116
117 return NULL; /* not found */ 117 return NULL; /* not found */
@@ -120,7 +120,7 @@ static struct mpoa_client *find_mpc_by_itfnum(int itf)
120static struct mpoa_client *find_mpc_by_vcc(struct atm_vcc *vcc) 120static struct mpoa_client *find_mpc_by_vcc(struct atm_vcc *vcc)
121{ 121{
122 struct mpoa_client *mpc; 122 struct mpoa_client *mpc;
123 123
124 mpc = mpcs; /* our global linked list */ 124 mpc = mpcs; /* our global linked list */
125 while (mpc != NULL) { 125 while (mpc != NULL) {
126 if (mpc->mpoad_vcc == vcc) 126 if (mpc->mpoad_vcc == vcc)
@@ -134,7 +134,7 @@ static struct mpoa_client *find_mpc_by_vcc(struct atm_vcc *vcc)
134static struct mpoa_client *find_mpc_by_lec(struct net_device *dev) 134static struct mpoa_client *find_mpc_by_lec(struct net_device *dev)
135{ 135{
136 struct mpoa_client *mpc; 136 struct mpoa_client *mpc;
137 137
138 mpc = mpcs; /* our global linked list */ 138 mpc = mpcs; /* our global linked list */
139 while (mpc != NULL) { 139 while (mpc != NULL) {
140 if (mpc->dev == dev) 140 if (mpc->dev == dev)
@@ -190,7 +190,7 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
190 } 190 }
191 191
192 return qos; 192 return qos;
193} 193}
194 194
195/* 195/*
196 * Returns 0 for failure 196 * Returns 0 for failure
@@ -245,7 +245,7 @@ static struct net_device *find_lec_by_itfnum(int itf)
245 245
246 sprintf(name, "lec%d", itf); 246 sprintf(name, "lec%d", itf);
247 dev = dev_get_by_name(name); 247 dev = dev_get_by_name(name);
248 248
249 return dev; 249 return dev;
250} 250}
251 251
@@ -265,25 +265,25 @@ static struct mpoa_client *alloc_mpc(void)
265 mpc->parameters.mpc_p2 = MPC_P2; 265 mpc->parameters.mpc_p2 = MPC_P2;
266 memset(mpc->parameters.mpc_p3,0,sizeof(mpc->parameters.mpc_p3)); 266 memset(mpc->parameters.mpc_p3,0,sizeof(mpc->parameters.mpc_p3));
267 mpc->parameters.mpc_p4 = MPC_P4; 267 mpc->parameters.mpc_p4 = MPC_P4;
268 mpc->parameters.mpc_p5 = MPC_P5; 268 mpc->parameters.mpc_p5 = MPC_P5;
269 mpc->parameters.mpc_p6 = MPC_P6; 269 mpc->parameters.mpc_p6 = MPC_P6;
270 270
271 mpcs = mpc; 271 mpcs = mpc;
272 272
273 return mpc; 273 return mpc;
274} 274}
275 275
276/* 276/*
277 * 277 *
278 * start_mpc() puts the MPC on line. All the packets destined 278 * start_mpc() puts the MPC on line. All the packets destined
279 * to the lec underneath us are now being monitored and 279 * to the lec underneath us are now being monitored and
280 * shortcuts will be established. 280 * shortcuts will be established.
281 * 281 *
282 */ 282 */
283static void start_mpc(struct mpoa_client *mpc, struct net_device *dev) 283static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
284{ 284{
285 285
286 dprintk("mpoa: (%s) start_mpc:\n", mpc->dev->name); 286 dprintk("mpoa: (%s) start_mpc:\n", mpc->dev->name);
287 if (dev->hard_start_xmit == NULL) { 287 if (dev->hard_start_xmit == NULL) {
288 printk("mpoa: (%s) start_mpc: dev->hard_start_xmit == NULL, not starting\n", 288 printk("mpoa: (%s) start_mpc: dev->hard_start_xmit == NULL, not starting\n",
289 dev->name); 289 dev->name);
@@ -297,8 +297,8 @@ static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
297 297
298static void stop_mpc(struct mpoa_client *mpc) 298static void stop_mpc(struct mpoa_client *mpc)
299{ 299{
300 300
301 dprintk("mpoa: (%s) stop_mpc:", mpc->dev->name); 301 dprintk("mpoa: (%s) stop_mpc:", mpc->dev->name);
302 302
303 /* Lets not nullify lec device's dev->hard_start_xmit */ 303 /* Lets not nullify lec device's dev->hard_start_xmit */
304 if (mpc->dev->hard_start_xmit != mpc_send_packet) { 304 if (mpc->dev->hard_start_xmit != mpc_send_packet) {
@@ -309,7 +309,7 @@ static void stop_mpc(struct mpoa_client *mpc)
309 mpc->dev->hard_start_xmit = mpc->old_hard_start_xmit; 309 mpc->dev->hard_start_xmit = mpc->old_hard_start_xmit;
310 mpc->old_hard_start_xmit = NULL; 310 mpc->old_hard_start_xmit = NULL;
311 /* close_shortcuts(mpc); ??? FIXME */ 311 /* close_shortcuts(mpc); ??? FIXME */
312 312
313 return; 313 return;
314} 314}
315 315
@@ -358,7 +358,7 @@ static void lane2_assoc_ind(struct net_device *dev, uint8_t *mac_addr,
358 uint8_t length, mpoa_device_type, number_of_mps_macs; 358 uint8_t length, mpoa_device_type, number_of_mps_macs;
359 uint8_t *end_of_tlvs; 359 uint8_t *end_of_tlvs;
360 struct mpoa_client *mpc; 360 struct mpoa_client *mpc;
361 361
362 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */ 362 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */
363 dprintk("mpoa: (%s) lane2_assoc_ind: received TLV(s), ", dev->name); 363 dprintk("mpoa: (%s) lane2_assoc_ind: received TLV(s), ", dev->name);
364 dprintk("total length of all TLVs %d\n", sizeoftlvs); 364 dprintk("total length of all TLVs %d\n", sizeoftlvs);
@@ -377,7 +377,7 @@ static void lane2_assoc_ind(struct net_device *dev, uint8_t *mac_addr,
377 printk("TLV value extends past its buffer, aborting parse\n"); 377 printk("TLV value extends past its buffer, aborting parse\n");
378 return; 378 return;
379 } 379 }
380 380
381 if (type == 0) { 381 if (type == 0) {
382 printk("mpoa: (%s) lane2_assoc_ind: TLV type was 0, returning\n", dev->name); 382 printk("mpoa: (%s) lane2_assoc_ind: TLV type was 0, returning\n", dev->name);
383 return; 383 return;
@@ -412,10 +412,10 @@ static void lane2_assoc_ind(struct net_device *dev, uint8_t *mac_addr,
412 continue; /* someone should read the spec */ 412 continue; /* someone should read the spec */
413 } 413 }
414 dprintk("this MPS has %d MAC addresses\n", number_of_mps_macs); 414 dprintk("this MPS has %d MAC addresses\n", number_of_mps_macs);
415 415
416 /* ok, now we can go and tell our daemon the control address of MPS */ 416 /* ok, now we can go and tell our daemon the control address of MPS */
417 send_set_mps_ctrl_addr(tlvs, mpc); 417 send_set_mps_ctrl_addr(tlvs, mpc);
418 418
419 tlvs = copy_macs(mpc, mac_addr, tlvs, number_of_mps_macs, mpoa_device_type); 419 tlvs = copy_macs(mpc, mac_addr, tlvs, number_of_mps_macs, mpoa_device_type);
420 if (tlvs == NULL) return; 420 if (tlvs == NULL) return;
421 } 421 }
@@ -474,7 +474,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
474 iph = (struct iphdr *)buff; 474 iph = (struct iphdr *)buff;
475 ipaddr = iph->daddr; 475 ipaddr = iph->daddr;
476 476
477 ddprintk("mpoa: (%s) send_via_shortcut: ipaddr 0x%x\n", mpc->dev->name, ipaddr); 477 ddprintk("mpoa: (%s) send_via_shortcut: ipaddr 0x%x\n", mpc->dev->name, ipaddr);
478 478
479 entry = mpc->in_ops->get(ipaddr, mpc); 479 entry = mpc->in_ops->get(ipaddr, mpc);
480 if (entry == NULL) { 480 if (entry == NULL) {
@@ -483,15 +483,15 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
483 return 1; 483 return 1;
484 } 484 }
485 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN){ /* threshold not exceeded or VCC not ready */ 485 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN){ /* threshold not exceeded or VCC not ready */
486 ddprintk("mpoa: (%s) send_via_shortcut: cache_hit: returns != OPEN\n", mpc->dev->name); 486 ddprintk("mpoa: (%s) send_via_shortcut: cache_hit: returns != OPEN\n", mpc->dev->name);
487 mpc->in_ops->put(entry); 487 mpc->in_ops->put(entry);
488 return 1; 488 return 1;
489 } 489 }
490 490
491 ddprintk("mpoa: (%s) send_via_shortcut: using shortcut\n", mpc->dev->name); 491 ddprintk("mpoa: (%s) send_via_shortcut: using shortcut\n", mpc->dev->name);
492 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */ 492 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
493 if (iph->ttl <= 1) { 493 if (iph->ttl <= 1) {
494 ddprintk("mpoa: (%s) send_via_shortcut: IP ttl = %u, using LANE\n", mpc->dev->name, iph->ttl); 494 ddprintk("mpoa: (%s) send_via_shortcut: IP ttl = %u, using LANE\n", mpc->dev->name, iph->ttl);
495 mpc->in_ops->put(entry); 495 mpc->in_ops->put(entry);
496 return 1; 496 return 1;
497 } 497 }
@@ -529,7 +529,7 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev)
529 struct mpoa_client *mpc; 529 struct mpoa_client *mpc;
530 struct ethhdr *eth; 530 struct ethhdr *eth;
531 int i = 0; 531 int i = 0;
532 532
533 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */ 533 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */
534 if(mpc == NULL) { 534 if(mpc == NULL) {
535 printk("mpoa: (%s) mpc_send_packet: no MPC found\n", dev->name); 535 printk("mpoa: (%s) mpc_send_packet: no MPC found\n", dev->name);
@@ -549,7 +549,7 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev)
549 549
550 non_ip: 550 non_ip:
551 retval = mpc->old_hard_start_xmit(skb,dev); 551 retval = mpc->old_hard_start_xmit(skb,dev);
552 552
553 return retval; 553 return retval;
554} 554}
555 555
@@ -569,11 +569,11 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
569 ipaddr = ioc_data.ipaddr; 569 ipaddr = ioc_data.ipaddr;
570 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF) 570 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
571 return -EINVAL; 571 return -EINVAL;
572 572
573 mpc = find_mpc_by_itfnum(ioc_data.dev_num); 573 mpc = find_mpc_by_itfnum(ioc_data.dev_num);
574 if (mpc == NULL) 574 if (mpc == NULL)
575 return -EINVAL; 575 return -EINVAL;
576 576
577 if (ioc_data.type == MPC_SOCKET_INGRESS) { 577 if (ioc_data.type == MPC_SOCKET_INGRESS) {
578 in_entry = mpc->in_ops->get(ipaddr, mpc); 578 in_entry = mpc->in_ops->get(ipaddr, mpc);
579 if (in_entry == NULL || in_entry->entry_state < INGRESS_RESOLVED) { 579 if (in_entry == NULL || in_entry->entry_state < INGRESS_RESOLVED) {
@@ -604,7 +604,7 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
604 struct mpoa_client *mpc; 604 struct mpoa_client *mpc;
605 in_cache_entry *in_entry; 605 in_cache_entry *in_entry;
606 eg_cache_entry *eg_entry; 606 eg_cache_entry *eg_entry;
607 607
608 mpc = find_mpc_by_lec(dev); 608 mpc = find_mpc_by_lec(dev);
609 if (mpc == NULL) { 609 if (mpc == NULL) {
610 printk("mpoa: (%s) mpc_vcc_close: close for unknown MPC\n", dev->name); 610 printk("mpoa: (%s) mpc_vcc_close: close for unknown MPC\n", dev->name);
@@ -640,14 +640,14 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
640 struct mpoa_client *mpc; 640 struct mpoa_client *mpc;
641 __be32 tag; 641 __be32 tag;
642 char *tmp; 642 char *tmp;
643 643
644 ddprintk("mpoa: (%s) mpc_push:\n", dev->name); 644 ddprintk("mpoa: (%s) mpc_push:\n", dev->name);
645 if (skb == NULL) { 645 if (skb == NULL) {
646 dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name); 646 dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name);
647 mpc_vcc_close(vcc, dev); 647 mpc_vcc_close(vcc, dev);
648 return; 648 return;
649 } 649 }
650 650
651 skb->dev = dev; 651 skb->dev = dev;
652 if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) { 652 if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) {
653 struct sock *sk = sk_atm(vcc); 653 struct sock *sk = sk_atm(vcc);
@@ -693,11 +693,11 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
693 dev_kfree_skb_any(skb); 693 dev_kfree_skb_any(skb);
694 return; 694 return;
695 } 695 }
696 696
697 /* 697 /*
698 * See if ingress MPC is using shortcut we opened as a return channel. 698 * See if ingress MPC is using shortcut we opened as a return channel.
699 * This means we have a bi-directional vcc opened by us. 699 * This means we have a bi-directional vcc opened by us.
700 */ 700 */
701 if (eg->shortcut == NULL) { 701 if (eg->shortcut == NULL) {
702 eg->shortcut = vcc; 702 eg->shortcut = vcc;
703 printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name); 703 printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name);
@@ -743,7 +743,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
743 struct mpoa_client *mpc; 743 struct mpoa_client *mpc;
744 struct lec_priv *priv; 744 struct lec_priv *priv;
745 int err; 745 int err;
746 746
747 if (mpcs == NULL) { 747 if (mpcs == NULL) {
748 init_timer(&mpc_timer); 748 init_timer(&mpc_timer);
749 mpc_timer_refresh(); 749 mpc_timer_refresh();
@@ -755,7 +755,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
755 return err; 755 return err;
756 } 756 }
757 } 757 }
758 758
759 mpc = find_mpc_by_itfnum(arg); 759 mpc = find_mpc_by_itfnum(arg);
760 if (mpc == NULL) { 760 if (mpc == NULL) {
761 dprintk("mpoa: mpoad_attach: allocating new mpc for itf %d\n", arg); 761 dprintk("mpoa: mpoad_attach: allocating new mpc for itf %d\n", arg);
@@ -776,7 +776,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
776 dev_put(mpc->dev); 776 dev_put(mpc->dev);
777 mpc->dev = NULL; 777 mpc->dev = NULL;
778 } else 778 } else
779 priv->lane2_ops->associate_indicator = lane2_assoc_ind; 779 priv->lane2_ops->associate_indicator = lane2_assoc_ind;
780 } 780 }
781 781
782 mpc->mpoad_vcc = vcc; 782 mpc->mpoad_vcc = vcc;
@@ -788,7 +788,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
788 if (mpc->dev) { 788 if (mpc->dev) {
789 char empty[ATM_ESA_LEN]; 789 char empty[ATM_ESA_LEN];
790 memset(empty, 0, ATM_ESA_LEN); 790 memset(empty, 0, ATM_ESA_LEN);
791 791
792 start_mpc(mpc, mpc->dev); 792 start_mpc(mpc, mpc->dev);
793 /* set address if mpcd e.g. gets killed and restarted. 793 /* set address if mpcd e.g. gets killed and restarted.
794 * If we do not do it now we have to wait for the next LE_ARP 794 * If we do not do it now we have to wait for the next LE_ARP
@@ -806,7 +806,7 @@ static void send_set_mps_ctrl_addr(char *addr, struct mpoa_client *mpc)
806 struct k_message mesg; 806 struct k_message mesg;
807 807
808 memcpy (mpc->mps_ctrl_addr, addr, ATM_ESA_LEN); 808 memcpy (mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
809 809
810 mesg.type = SET_MPS_CTRL_ADDR; 810 mesg.type = SET_MPS_CTRL_ADDR;
811 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); 811 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
812 msg_to_mpoad(&mesg, mpc); 812 msg_to_mpoad(&mesg, mpc);
@@ -828,7 +828,7 @@ static void mpoad_close(struct atm_vcc *vcc)
828 printk("mpoa: mpoad_close: close for non-present mpoad\n"); 828 printk("mpoa: mpoad_close: close for non-present mpoad\n");
829 return; 829 return;
830 } 830 }
831 831
832 mpc->mpoad_vcc = NULL; 832 mpc->mpoad_vcc = NULL;
833 if (mpc->dev) { 833 if (mpc->dev) {
834 struct lec_priv *priv = (struct lec_priv *)mpc->dev->priv; 834 struct lec_priv *priv = (struct lec_priv *)mpc->dev->priv;
@@ -844,7 +844,7 @@ static void mpoad_close(struct atm_vcc *vcc)
844 atm_return(vcc, skb->truesize); 844 atm_return(vcc, skb->truesize);
845 kfree_skb(skb); 845 kfree_skb(skb);
846 } 846 }
847 847
848 printk("mpoa: (%s) going down\n", 848 printk("mpoa: (%s) going down\n",
849 (mpc->dev) ? mpc->dev->name : "<unknown>"); 849 (mpc->dev) ? mpc->dev->name : "<unknown>");
850 module_put(THIS_MODULE); 850 module_put(THIS_MODULE);
@@ -857,11 +857,11 @@ static void mpoad_close(struct atm_vcc *vcc)
857 */ 857 */
858static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb) 858static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
859{ 859{
860 860
861 struct mpoa_client *mpc = find_mpc_by_vcc(vcc); 861 struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
862 struct k_message *mesg = (struct k_message*)skb->data; 862 struct k_message *mesg = (struct k_message*)skb->data;
863 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 863 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
864 864
865 if (mpc == NULL) { 865 if (mpc == NULL) {
866 printk("mpoa: msg_from_mpoad: no mpc found\n"); 866 printk("mpoa: msg_from_mpoad: no mpc found\n");
867 return 0; 867 return 0;
@@ -938,7 +938,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
938 skb_put(skb, sizeof(struct k_message)); 938 skb_put(skb, sizeof(struct k_message));
939 memcpy(skb->data, mesg, sizeof(struct k_message)); 939 memcpy(skb->data, mesg, sizeof(struct k_message));
940 atm_force_charge(mpc->mpoad_vcc, skb->truesize); 940 atm_force_charge(mpc->mpoad_vcc, skb->truesize);
941 941
942 sk = sk_atm(mpc->mpoad_vcc); 942 sk = sk_atm(mpc->mpoad_vcc);
943 skb_queue_tail(&sk->sk_receive_queue, skb); 943 skb_queue_tail(&sk->sk_receive_queue, skb);
944 sk->sk_data_ready(sk, skb->len); 944 sk->sk_data_ready(sk, skb->len);
@@ -955,7 +955,7 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
955 dev = (struct net_device *)dev_ptr; 955 dev = (struct net_device *)dev_ptr;
956 if (dev->name == NULL || strncmp(dev->name, "lec", 3)) 956 if (dev->name == NULL || strncmp(dev->name, "lec", 3))
957 return NOTIFY_DONE; /* we are only interested in lec:s */ 957 return NOTIFY_DONE; /* we are only interested in lec:s */
958 958
959 switch (event) { 959 switch (event) {
960 case NETDEV_REGISTER: /* a new lec device was allocated */ 960 case NETDEV_REGISTER: /* a new lec device was allocated */
961 priv = (struct lec_priv *)dev->priv; 961 priv = (struct lec_priv *)dev->priv;
@@ -1043,7 +1043,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1043 mpc->in_ops->put(entry); 1043 mpc->in_ops->put(entry);
1044 return; 1044 return;
1045 } 1045 }
1046 1046
1047 if(entry->entry_state == INGRESS_INVALID){ 1047 if(entry->entry_state == INGRESS_INVALID){
1048 entry->entry_state = INGRESS_RESOLVING; 1048 entry->entry_state = INGRESS_RESOLVING;
1049 msg->type = SND_MPOA_RES_RQST; 1049 msg->type = SND_MPOA_RES_RQST;
@@ -1053,7 +1053,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1053 mpc->in_ops->put(entry); 1053 mpc->in_ops->put(entry);
1054 return; 1054 return;
1055 } 1055 }
1056 1056
1057 printk("mpoa: (%s) MPOA_trigger_rcvd: entry already in resolving state\n", 1057 printk("mpoa: (%s) MPOA_trigger_rcvd: entry already in resolving state\n",
1058 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1058 (mpc->dev) ? mpc->dev->name : "<unknown>");
1059 mpc->in_ops->put(entry); 1059 mpc->in_ops->put(entry);
@@ -1062,7 +1062,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1062 1062
1063/* 1063/*
1064 * Things get complicated because we have to check if there's an egress 1064 * Things get complicated because we have to check if there's an egress
1065 * shortcut with suitable traffic parameters we could use. 1065 * shortcut with suitable traffic parameters we could use.
1066 */ 1066 */
1067static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) 1067static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry)
1068{ 1068{
@@ -1079,7 +1079,7 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
1079 else if(eg_entry->shortcut->qos.txtp.max_pcr > 0) 1079 else if(eg_entry->shortcut->qos.txtp.max_pcr > 0)
1080 entry->shortcut = eg_entry->shortcut; 1080 entry->shortcut = eg_entry->shortcut;
1081 } 1081 }
1082 if(entry->shortcut){ 1082 if(entry->shortcut){
1083 dprintk("mpoa: (%s) using egress SVC to reach %u.%u.%u.%u\n",client->dev->name, NIPQUAD(dst_ip)); 1083 dprintk("mpoa: (%s) using egress SVC to reach %u.%u.%u.%u\n",client->dev->name, NIPQUAD(dst_ip));
1084 client->eg_ops->put(eg_entry); 1084 client->eg_ops->put(eg_entry);
1085 return; 1085 return;
@@ -1094,7 +1094,7 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
1094 { 1094 {
1095 msg->qos = qos->qos; 1095 msg->qos = qos->qos;
1096 printk("mpoa: (%s) trying to get a CBR shortcut\n",client->dev->name); 1096 printk("mpoa: (%s) trying to get a CBR shortcut\n",client->dev->name);
1097 } 1097 }
1098 else memset(&msg->qos,0,sizeof(struct atm_qos)); 1098 else memset(&msg->qos,0,sizeof(struct atm_qos));
1099 msg_to_mpoad(msg, client); 1099 msg_to_mpoad(msg, client);
1100 return; 1100 return;
@@ -1111,7 +1111,7 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1111 printk("\nmpoa: (%s) ARGH, received res. reply for an entry that doesn't exist.\n", mpc->dev->name); 1111 printk("\nmpoa: (%s) ARGH, received res. reply for an entry that doesn't exist.\n", mpc->dev->name);
1112 return; 1112 return;
1113 } 1113 }
1114 ddprintk(" entry_state = %d ", entry->entry_state); 1114 ddprintk(" entry_state = %d ", entry->entry_state);
1115 1115
1116 if (entry->entry_state == INGRESS_RESOLVED) { 1116 if (entry->entry_state == INGRESS_RESOLVED) {
1117 printk("\nmpoa: (%s) MPOA_res_reply_rcvd for RESOLVED entry!\n", mpc->dev->name); 1117 printk("\nmpoa: (%s) MPOA_res_reply_rcvd for RESOLVED entry!\n", mpc->dev->name);
@@ -1126,7 +1126,7 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1126 ddprintk("entry->shortcut = %p\n", entry->shortcut); 1126 ddprintk("entry->shortcut = %p\n", entry->shortcut);
1127 1127
1128 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL){ 1128 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL){
1129 entry->entry_state = INGRESS_RESOLVED; 1129 entry->entry_state = INGRESS_RESOLVED;
1130 mpc->in_ops->put(entry); 1130 mpc->in_ops->put(entry);
1131 return; /* Shortcut already open... */ 1131 return; /* Shortcut already open... */
1132 } 1132 }
@@ -1137,7 +1137,7 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1137 mpc->in_ops->put(entry); 1137 mpc->in_ops->put(entry);
1138 return; 1138 return;
1139 } 1139 }
1140 1140
1141 check_qos_and_open_shortcut(msg, mpc, entry); 1141 check_qos_and_open_shortcut(msg, mpc, entry);
1142 entry->entry_state = INGRESS_RESOLVED; 1142 entry->entry_state = INGRESS_RESOLVED;
1143 mpc->in_ops->put(entry); 1143 mpc->in_ops->put(entry);
@@ -1169,13 +1169,13 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1169 } while (entry != NULL); 1169 } while (entry != NULL);
1170 1170
1171 return; 1171 return;
1172} 1172}
1173 1173
1174static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) 1174static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1175{ 1175{
1176 __be32 cache_id = msg->content.eg_info.cache_id; 1176 __be32 cache_id = msg->content.eg_info.cache_id;
1177 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); 1177 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
1178 1178
1179 if (entry == NULL) { 1179 if (entry == NULL) {
1180 dprintk("mpoa: (%s) egress_purge_rcvd: purge for a non-existing entry\n", mpc->dev->name); 1180 dprintk("mpoa: (%s) egress_purge_rcvd: purge for a non-existing entry\n", mpc->dev->name);
1181 return; 1181 return;
@@ -1188,7 +1188,7 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1188 mpc->eg_ops->put(entry); 1188 mpc->eg_ops->put(entry);
1189 1189
1190 return; 1190 return;
1191} 1191}
1192 1192
1193static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) 1193static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1194{ 1194{
@@ -1259,7 +1259,7 @@ static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client *
1259{ 1259{
1260 uint16_t holding_time; 1260 uint16_t holding_time;
1261 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc); 1261 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc);
1262 1262
1263 holding_time = msg->content.eg_info.holding_time; 1263 holding_time = msg->content.eg_info.holding_time;
1264 dprintk("mpoa: (%s) MPOA_cache_impos_rcvd: entry = %p, holding_time = %u\n", 1264 dprintk("mpoa: (%s) MPOA_cache_impos_rcvd: entry = %p, holding_time = %u\n",
1265 mpc->dev->name, entry, holding_time); 1265 mpc->dev->name, entry, holding_time);
@@ -1272,13 +1272,13 @@ static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client *
1272 mpc->eg_ops->update(entry, holding_time); 1272 mpc->eg_ops->update(entry, holding_time);
1273 return; 1273 return;
1274 } 1274 }
1275 1275
1276 write_lock_irq(&mpc->egress_lock); 1276 write_lock_irq(&mpc->egress_lock);
1277 mpc->eg_ops->remove_entry(entry, mpc); 1277 mpc->eg_ops->remove_entry(entry, mpc);
1278 write_unlock_irq(&mpc->egress_lock); 1278 write_unlock_irq(&mpc->egress_lock);
1279 1279
1280 mpc->eg_ops->put(entry); 1280 mpc->eg_ops->put(entry);
1281 1281
1282 return; 1282 return;
1283} 1283}
1284 1284
@@ -1328,7 +1328,7 @@ static void set_mps_mac_addr_rcvd(struct k_message *msg, struct mpoa_client *cli
1328 return; 1328 return;
1329 } 1329 }
1330 client->number_of_mps_macs = 1; 1330 client->number_of_mps_macs = 1;
1331 1331
1332 return; 1332 return;
1333} 1333}
1334 1334
@@ -1364,7 +1364,7 @@ static void mpc_timer_refresh(void)
1364 mpc_timer.data = mpc_timer.expires; 1364 mpc_timer.data = mpc_timer.expires;
1365 mpc_timer.function = mpc_cache_check; 1365 mpc_timer.function = mpc_cache_check;
1366 add_timer(&mpc_timer); 1366 add_timer(&mpc_timer);
1367 1367
1368 return; 1368 return;
1369} 1369}
1370 1370
@@ -1373,7 +1373,7 @@ static void mpc_cache_check( unsigned long checking_time )
1373 struct mpoa_client *mpc = mpcs; 1373 struct mpoa_client *mpc = mpcs;
1374 static unsigned long previous_resolving_check_time; 1374 static unsigned long previous_resolving_check_time;
1375 static unsigned long previous_refresh_time; 1375 static unsigned long previous_refresh_time;
1376 1376
1377 while( mpc != NULL ){ 1377 while( mpc != NULL ){
1378 mpc->in_ops->clear_count(mpc); 1378 mpc->in_ops->clear_count(mpc);
1379 mpc->eg_ops->clear_expired(mpc); 1379 mpc->eg_ops->clear_expired(mpc);
@@ -1388,7 +1388,7 @@ static void mpc_cache_check( unsigned long checking_time )
1388 mpc = mpc->next; 1388 mpc = mpc->next;
1389 } 1389 }
1390 mpc_timer_refresh(); 1390 mpc_timer_refresh();
1391 1391
1392 return; 1392 return;
1393} 1393}
1394 1394
diff --git a/net/atm/mpc.h b/net/atm/mpc.h
index 51f460d005c3..24c386c35f57 100644
--- a/net/atm/mpc.h
+++ b/net/atm/mpc.h
@@ -12,32 +12,32 @@
12int msg_to_mpoad(struct k_message *msg, struct mpoa_client *mpc); 12int msg_to_mpoad(struct k_message *msg, struct mpoa_client *mpc);
13 13
14struct mpoa_client { 14struct mpoa_client {
15 struct mpoa_client *next; 15 struct mpoa_client *next;
16 struct net_device *dev; /* lec in question */ 16 struct net_device *dev; /* lec in question */
17 int dev_num; /* e.g. 2 for lec2 */ 17 int dev_num; /* e.g. 2 for lec2 */
18 int (*old_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev); 18 int (*old_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev);
19 struct atm_vcc *mpoad_vcc; /* control channel to mpoad */ 19 struct atm_vcc *mpoad_vcc; /* control channel to mpoad */
20 uint8_t mps_ctrl_addr[ATM_ESA_LEN]; /* MPS control ATM address */ 20 uint8_t mps_ctrl_addr[ATM_ESA_LEN]; /* MPS control ATM address */
21 uint8_t our_ctrl_addr[ATM_ESA_LEN]; /* MPC's control ATM address */ 21 uint8_t our_ctrl_addr[ATM_ESA_LEN]; /* MPC's control ATM address */
22 22
23 rwlock_t ingress_lock; 23 rwlock_t ingress_lock;
24 struct in_cache_ops *in_ops; /* ingress cache operations */ 24 struct in_cache_ops *in_ops; /* ingress cache operations */
25 in_cache_entry *in_cache; /* the ingress cache of this MPC */ 25 in_cache_entry *in_cache; /* the ingress cache of this MPC */
26 26
27 rwlock_t egress_lock; 27 rwlock_t egress_lock;
28 struct eg_cache_ops *eg_ops; /* egress cache operations */ 28 struct eg_cache_ops *eg_ops; /* egress cache operations */
29 eg_cache_entry *eg_cache; /* the egress cache of this MPC */ 29 eg_cache_entry *eg_cache; /* the egress cache of this MPC */
30 30
31 uint8_t *mps_macs; /* array of MPS MAC addresses, >=1 */ 31 uint8_t *mps_macs; /* array of MPS MAC addresses, >=1 */
32 int number_of_mps_macs; /* number of the above MAC addresses */ 32 int number_of_mps_macs; /* number of the above MAC addresses */
33 struct mpc_parameters parameters; /* parameters for this client */ 33 struct mpc_parameters parameters; /* parameters for this client */
34}; 34};
35 35
36 36
37struct atm_mpoa_qos { 37struct atm_mpoa_qos {
38 struct atm_mpoa_qos *next; 38 struct atm_mpoa_qos *next;
39 __be32 ipaddr; 39 __be32 ipaddr;
40 struct atm_qos qos; 40 struct atm_qos qos;
41}; 41};
42 42
43 43
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 697a081533b5..24799e3e78f7 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -369,7 +369,7 @@ static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_clie
369 while (entry != NULL){ 369 while (entry != NULL){
370 if (entry->shortcut == vcc) { 370 if (entry->shortcut == vcc) {
371 atomic_inc(&entry->use); 371 atomic_inc(&entry->use);
372 read_unlock_irqrestore(&mpc->egress_lock, flags); 372 read_unlock_irqrestore(&mpc->egress_lock, flags);
373 return entry; 373 return entry;
374 } 374 }
375 entry = entry->next; 375 entry = entry->next;
@@ -388,7 +388,7 @@ static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client
388 while(entry != NULL){ 388 while(entry != NULL){
389 if(entry->latest_ip_addr == ipaddr) { 389 if(entry->latest_ip_addr == ipaddr) {
390 atomic_inc(&entry->use); 390 atomic_inc(&entry->use);
391 read_unlock_irq(&mpc->egress_lock); 391 read_unlock_irq(&mpc->egress_lock);
392 return entry; 392 return entry;
393 } 393 }
394 entry = entry->next; 394 entry = entry->next;
diff --git a/net/atm/mpoa_caches.h b/net/atm/mpoa_caches.h
index 84de977def2e..8e5f78cf0be1 100644
--- a/net/atm/mpoa_caches.h
+++ b/net/atm/mpoa_caches.h
@@ -12,66 +12,66 @@ struct mpoa_client;
12void atm_mpoa_init_cache(struct mpoa_client *mpc); 12void atm_mpoa_init_cache(struct mpoa_client *mpc);
13 13
14typedef struct in_cache_entry { 14typedef struct in_cache_entry {
15 struct in_cache_entry *next; 15 struct in_cache_entry *next;
16 struct in_cache_entry *prev; 16 struct in_cache_entry *prev;
17 struct timeval tv; 17 struct timeval tv;
18 struct timeval reply_wait; 18 struct timeval reply_wait;
19 struct timeval hold_down; 19 struct timeval hold_down;
20 uint32_t packets_fwded; 20 uint32_t packets_fwded;
21 uint16_t entry_state; 21 uint16_t entry_state;
22 uint32_t retry_time; 22 uint32_t retry_time;
23 uint32_t refresh_time; 23 uint32_t refresh_time;
24 uint32_t count; 24 uint32_t count;
25 struct atm_vcc *shortcut; 25 struct atm_vcc *shortcut;
26 uint8_t MPS_ctrl_ATM_addr[ATM_ESA_LEN]; 26 uint8_t MPS_ctrl_ATM_addr[ATM_ESA_LEN];
27 struct in_ctrl_info ctrl_info; 27 struct in_ctrl_info ctrl_info;
28 atomic_t use; 28 atomic_t use;
29} in_cache_entry; 29} in_cache_entry;
30 30
31struct in_cache_ops{ 31struct in_cache_ops{
32 in_cache_entry *(*add_entry)(__be32 dst_ip, 32 in_cache_entry *(*add_entry)(__be32 dst_ip,
33 struct mpoa_client *client); 33 struct mpoa_client *client);
34 in_cache_entry *(*get)(__be32 dst_ip, struct mpoa_client *client); 34 in_cache_entry *(*get)(__be32 dst_ip, struct mpoa_client *client);
35 in_cache_entry *(*get_with_mask)(__be32 dst_ip, 35 in_cache_entry *(*get_with_mask)(__be32 dst_ip,
36 struct mpoa_client *client, 36 struct mpoa_client *client,
37 __be32 mask); 37 __be32 mask);
38 in_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, 38 in_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc,
39 struct mpoa_client *client); 39 struct mpoa_client *client);
40 void (*put)(in_cache_entry *entry); 40 void (*put)(in_cache_entry *entry);
41 void (*remove_entry)(in_cache_entry *delEntry, 41 void (*remove_entry)(in_cache_entry *delEntry,
42 struct mpoa_client *client ); 42 struct mpoa_client *client );
43 int (*cache_hit)(in_cache_entry *entry, 43 int (*cache_hit)(in_cache_entry *entry,
44 struct mpoa_client *client); 44 struct mpoa_client *client);
45 void (*clear_count)(struct mpoa_client *client); 45 void (*clear_count)(struct mpoa_client *client);
46 void (*check_resolving)(struct mpoa_client *client); 46 void (*check_resolving)(struct mpoa_client *client);
47 void (*refresh)(struct mpoa_client *client); 47 void (*refresh)(struct mpoa_client *client);
48 void (*destroy_cache)(struct mpoa_client *mpc); 48 void (*destroy_cache)(struct mpoa_client *mpc);
49}; 49};
50 50
51typedef struct eg_cache_entry{ 51typedef struct eg_cache_entry{
52 struct eg_cache_entry *next; 52 struct eg_cache_entry *next;
53 struct eg_cache_entry *prev; 53 struct eg_cache_entry *prev;
54 struct timeval tv; 54 struct timeval tv;
55 uint8_t MPS_ctrl_ATM_addr[ATM_ESA_LEN]; 55 uint8_t MPS_ctrl_ATM_addr[ATM_ESA_LEN];
56 struct atm_vcc *shortcut; 56 struct atm_vcc *shortcut;
57 uint32_t packets_rcvd; 57 uint32_t packets_rcvd;
58 uint16_t entry_state; 58 uint16_t entry_state;
59 __be32 latest_ip_addr; /* The src IP address of the last packet */ 59 __be32 latest_ip_addr; /* The src IP address of the last packet */
60 struct eg_ctrl_info ctrl_info; 60 struct eg_ctrl_info ctrl_info;
61 atomic_t use; 61 atomic_t use;
62} eg_cache_entry; 62} eg_cache_entry;
63 63
64struct eg_cache_ops{ 64struct eg_cache_ops{
65 eg_cache_entry *(*add_entry)(struct k_message *msg, struct mpoa_client *client); 65 eg_cache_entry *(*add_entry)(struct k_message *msg, struct mpoa_client *client);
66 eg_cache_entry *(*get_by_cache_id)(__be32 cache_id, struct mpoa_client *client); 66 eg_cache_entry *(*get_by_cache_id)(__be32 cache_id, struct mpoa_client *client);
67 eg_cache_entry *(*get_by_tag)(__be32 cache_id, struct mpoa_client *client); 67 eg_cache_entry *(*get_by_tag)(__be32 cache_id, struct mpoa_client *client);
68 eg_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, struct mpoa_client *client); 68 eg_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, struct mpoa_client *client);
69 eg_cache_entry *(*get_by_src_ip)(__be32 ipaddr, struct mpoa_client *client); 69 eg_cache_entry *(*get_by_src_ip)(__be32 ipaddr, struct mpoa_client *client);
70 void (*put)(eg_cache_entry *entry); 70 void (*put)(eg_cache_entry *entry);
71 void (*remove_entry)(eg_cache_entry *entry, struct mpoa_client *client); 71 void (*remove_entry)(eg_cache_entry *entry, struct mpoa_client *client);
72 void (*update)(eg_cache_entry *entry, uint16_t holding_time); 72 void (*update)(eg_cache_entry *entry, uint16_t holding_time);
73 void (*clear_expired)(struct mpoa_client *client); 73 void (*clear_expired)(struct mpoa_client *client);
74 void (*destroy_cache)(struct mpoa_client *mpc); 74 void (*destroy_cache)(struct mpoa_client *mpc);
75}; 75};
76 76
77 77
@@ -85,7 +85,7 @@ struct eg_cache_ops{
85/* VCC states */ 85/* VCC states */
86 86
87#define OPEN 1 87#define OPEN 1
88#define CLOSED 0 88#define CLOSED 0
89 89
90/* Egress cache entry states */ 90/* Egress cache entry states */
91 91
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 3844c85d602f..43315af10309 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -2,7 +2,7 @@
2#ifdef CONFIG_PROC_FS 2#ifdef CONFIG_PROC_FS
3#include <linux/errno.h> 3#include <linux/errno.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/string.h> 5#include <linux/string.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/proc_fs.h> 8#include <linux/proc_fs.h>
@@ -16,7 +16,7 @@
16 16
17/* 17/*
18 * mpoa_proc.c: Implementation MPOA client's proc 18 * mpoa_proc.c: Implementation MPOA client's proc
19 * file system statistics 19 * file system statistics
20 */ 20 */
21 21
22#if 1 22#if 1
@@ -32,7 +32,7 @@ extern struct proc_dir_entry *atm_proc_root; /* from proc.c. */
32 32
33static int proc_mpc_open(struct inode *inode, struct file *file); 33static int proc_mpc_open(struct inode *inode, struct file *file);
34static ssize_t proc_mpc_write(struct file *file, const char __user *buff, 34static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
35 size_t nbytes, loff_t *ppos); 35 size_t nbytes, loff_t *ppos);
36 36
37static int parse_qos(const char *buff); 37static int parse_qos(const char *buff);
38 38
@@ -52,18 +52,18 @@ static struct file_operations mpc_file_operations = {
52 * Returns the state of an ingress cache entry as a string 52 * Returns the state of an ingress cache entry as a string
53 */ 53 */
54static const char *ingress_state_string(int state){ 54static const char *ingress_state_string(int state){
55 switch(state) { 55 switch(state) {
56 case INGRESS_RESOLVING: 56 case INGRESS_RESOLVING:
57 return "resolving "; 57 return "resolving ";
58 break; 58 break;
59 case INGRESS_RESOLVED: 59 case INGRESS_RESOLVED:
60 return "resolved "; 60 return "resolved ";
61 break; 61 break;
62 case INGRESS_INVALID: 62 case INGRESS_INVALID:
63 return "invalid "; 63 return "invalid ";
64 break; 64 break;
65 case INGRESS_REFRESHING: 65 case INGRESS_REFRESHING:
66 return "refreshing "; 66 return "refreshing ";
67 break; 67 break;
68 default: 68 default:
69 return ""; 69 return "";
@@ -74,15 +74,15 @@ static const char *ingress_state_string(int state){
74 * Returns the state of an egress cache entry as a string 74 * Returns the state of an egress cache entry as a string
75 */ 75 */
76static const char *egress_state_string(int state){ 76static const char *egress_state_string(int state){
77 switch(state) { 77 switch(state) {
78 case EGRESS_RESOLVED: 78 case EGRESS_RESOLVED:
79 return "resolved "; 79 return "resolved ";
80 break; 80 break;
81 case EGRESS_PURGE: 81 case EGRESS_PURGE:
82 return "purge "; 82 return "purge ";
83 break; 83 break;
84 case EGRESS_INVALID: 84 case EGRESS_INVALID:
85 return "invalid "; 85 return "invalid ";
86 break; 86 break;
87 default: 87 default:
88 return ""; 88 return "";
@@ -135,7 +135,7 @@ static int mpc_show(struct seq_file *m, void *v)
135 return 0; 135 return 0;
136 } 136 }
137 137
138 seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num); 138 seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
139 seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n"); 139 seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
140 do_gettimeofday(&now); 140 do_gettimeofday(&now);
141 141
@@ -163,7 +163,7 @@ static int mpc_show(struct seq_file *m, void *v)
163 egress_state_string(eg_entry->entry_state), 163 egress_state_string(eg_entry->entry_state),
164 (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)), 164 (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)),
165 eg_entry->packets_rcvd); 165 eg_entry->packets_rcvd);
166 166
167 /* latest IP address */ 167 /* latest IP address */
168 temp = (unsigned char *)&eg_entry->latest_ip_addr; 168 temp = (unsigned char *)&eg_entry->latest_ip_addr;
169 sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]); 169 sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
@@ -190,51 +190,51 @@ static int proc_mpc_open(struct inode *inode, struct file *file)
190} 190}
191 191
192static ssize_t proc_mpc_write(struct file *file, const char __user *buff, 192static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
193 size_t nbytes, loff_t *ppos) 193 size_t nbytes, loff_t *ppos)
194{ 194{
195 char *page, *p; 195 char *page, *p;
196 unsigned len; 196 unsigned len;
197 197
198 if (nbytes == 0) 198 if (nbytes == 0)
199 return 0; 199 return 0;
200 200
201 if (nbytes >= PAGE_SIZE) 201 if (nbytes >= PAGE_SIZE)
202 nbytes = PAGE_SIZE-1; 202 nbytes = PAGE_SIZE-1;
203 203
204 page = (char *)__get_free_page(GFP_KERNEL); 204 page = (char *)__get_free_page(GFP_KERNEL);
205 if (!page) 205 if (!page)
206 return -ENOMEM; 206 return -ENOMEM;
207 207
208 for (p = page, len = 0; len < nbytes; p++, len++) { 208 for (p = page, len = 0; len < nbytes; p++, len++) {
209 if (get_user(*p, buff++)) { 209 if (get_user(*p, buff++)) {
210 free_page((unsigned long)page); 210 free_page((unsigned long)page);
211 return -EFAULT; 211 return -EFAULT;
212 } 212 }
213 if (*p == '\0' || *p == '\n') 213 if (*p == '\0' || *p == '\n')
214 break; 214 break;
215 } 215 }
216 216
217 *p = '\0'; 217 *p = '\0';
218 218
219 if (!parse_qos(page)) 219 if (!parse_qos(page))
220 printk("mpoa: proc_mpc_write: could not parse '%s'\n", page); 220 printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
221
222 free_page((unsigned long)page);
221 223
222 free_page((unsigned long)page); 224 return len;
223
224 return len;
225} 225}
226 226
227static int parse_qos(const char *buff) 227static int parse_qos(const char *buff)
228{ 228{
229 /* possible lines look like this 229 /* possible lines look like this
230 * add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu 230 * add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu
231 */ 231 */
232 unsigned char ip[4]; 232 unsigned char ip[4];
233 int tx_pcr, tx_sdu, rx_pcr, rx_sdu; 233 int tx_pcr, tx_sdu, rx_pcr, rx_sdu;
234 __be32 ipaddr; 234 __be32 ipaddr;
235 struct atm_qos qos; 235 struct atm_qos qos;
236 236
237 memset(&qos, 0, sizeof(struct atm_qos)); 237 memset(&qos, 0, sizeof(struct atm_qos));
238 238
239 if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu", 239 if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu",
240 ip, ip+1, ip+2, ip+3) == 4) { 240 ip, ip+1, ip+2, ip+3) == 4) {
@@ -250,14 +250,14 @@ static int parse_qos(const char *buff)
250 ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8) 250 ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8)
251 return 0; 251 return 0;
252 252
253 ipaddr = *(__be32 *)ip; 253 ipaddr = *(__be32 *)ip;
254 qos.txtp.traffic_class = ATM_CBR; 254 qos.txtp.traffic_class = ATM_CBR;
255 qos.txtp.max_pcr = tx_pcr; 255 qos.txtp.max_pcr = tx_pcr;
256 qos.txtp.max_sdu = tx_sdu; 256 qos.txtp.max_sdu = tx_sdu;
257 qos.rxtp.traffic_class = ATM_CBR; 257 qos.rxtp.traffic_class = ATM_CBR;
258 qos.rxtp.max_pcr = rx_pcr; 258 qos.rxtp.max_pcr = rx_pcr;
259 qos.rxtp.max_sdu = rx_sdu; 259 qos.rxtp.max_sdu = rx_sdu;
260 qos.aal = ATM_AAL5; 260 qos.aal = ATM_AAL5;
261 dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n", 261 dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
262 qos.txtp.max_pcr, 262 qos.txtp.max_pcr,
263 qos.txtp.max_sdu, 263 qos.txtp.max_sdu,
@@ -276,11 +276,11 @@ int mpc_proc_init(void)
276{ 276{
277 struct proc_dir_entry *p; 277 struct proc_dir_entry *p;
278 278
279 p = create_proc_entry(STAT_FILE_NAME, 0, atm_proc_root); 279 p = create_proc_entry(STAT_FILE_NAME, 0, atm_proc_root);
280 if (!p) { 280 if (!p) {
281 printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME); 281 printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
282 return -ENOMEM; 282 return -ENOMEM;
283 } 283 }
284 p->proc_fops = &mpc_file_operations; 284 p->proc_fops = &mpc_file_operations;
285 p->owner = THIS_MODULE; 285 p->owner = THIS_MODULE;
286 return 0; 286 return 0;
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 739866bfe9e9..190f49ce2cad 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -86,7 +86,7 @@ static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l)
86 break; 86 break;
87 } 87 }
88 l--; 88 l--;
89 } 89 }
90try_again: 90try_again:
91 for (; sk; sk = sk_next(sk)) { 91 for (; sk; sk = sk_next(sk)) {
92 l -= compare_family(sk, family); 92 l -= compare_family(sk, family);
@@ -205,7 +205,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
205 seq_printf(seq, "%p ", vcc); 205 seq_printf(seq, "%p ", vcc);
206 if (!vcc->dev) 206 if (!vcc->dev)
207 seq_printf(seq, "Unassigned "); 207 seq_printf(seq, "Unassigned ");
208 else 208 else
209 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, 209 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi,
210 vcc->vci); 210 vcc->vci);
211 switch (sk->sk_family) { 211 switch (sk->sk_family) {
@@ -249,7 +249,7 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
249 static char atm_dev_banner[] = 249 static char atm_dev_banner[] =
250 "Itf Type ESI/\"MAC\"addr " 250 "Itf Type ESI/\"MAC\"addr "
251 "AAL(TX,err,RX,err,drop) ... [refcnt]\n"; 251 "AAL(TX,err,RX,err,drop) ... [refcnt]\n";
252 252
253 if (v == (void *)1) 253 if (v == (void *)1)
254 seq_puts(seq, atm_dev_banner); 254 seq_puts(seq, atm_dev_banner);
255 else { 255 else {
@@ -257,21 +257,21 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
257 257
258 atm_dev_info(seq, dev); 258 atm_dev_info(seq, dev);
259 } 259 }
260 return 0; 260 return 0;
261} 261}
262 262
263static struct seq_operations atm_dev_seq_ops = { 263static struct seq_operations atm_dev_seq_ops = {
264 .start = atm_dev_seq_start, 264 .start = atm_dev_seq_start,
265 .next = atm_dev_seq_next, 265 .next = atm_dev_seq_next,
266 .stop = atm_dev_seq_stop, 266 .stop = atm_dev_seq_stop,
267 .show = atm_dev_seq_show, 267 .show = atm_dev_seq_show,
268}; 268};
269 269
270static int atm_dev_seq_open(struct inode *inode, struct file *file) 270static int atm_dev_seq_open(struct inode *inode, struct file *file)
271{ 271{
272 return seq_open(file, &atm_dev_seq_ops); 272 return seq_open(file, &atm_dev_seq_ops);
273} 273}
274 274
275static struct file_operations devices_seq_fops = { 275static struct file_operations devices_seq_fops = {
276 .open = atm_dev_seq_open, 276 .open = atm_dev_seq_open,
277 .read = seq_read, 277 .read = seq_read,
@@ -281,7 +281,7 @@ static struct file_operations devices_seq_fops = {
281 281
282static int pvc_seq_show(struct seq_file *seq, void *v) 282static int pvc_seq_show(struct seq_file *seq, void *v)
283{ 283{
284 static char atm_pvc_banner[] = 284 static char atm_pvc_banner[] =
285 "Itf VPI VCI AAL RX(PCR,Class) TX(PCR,Class)\n"; 285 "Itf VPI VCI AAL RX(PCR,Class) TX(PCR,Class)\n";
286 286
287 if (v == (void *)1) 287 if (v == (void *)1)
@@ -316,31 +316,31 @@ static struct file_operations pvc_seq_fops = {
316 316
317static int vcc_seq_show(struct seq_file *seq, void *v) 317static int vcc_seq_show(struct seq_file *seq, void *v)
318{ 318{
319 if (v == (void *)1) { 319 if (v == (void *)1) {
320 seq_printf(seq, sizeof(void *) == 4 ? "%-8s%s" : "%-16s%s", 320 seq_printf(seq, sizeof(void *) == 4 ? "%-8s%s" : "%-16s%s",
321 "Address ", "Itf VPI VCI Fam Flags Reply " 321 "Address ", "Itf VPI VCI Fam Flags Reply "
322 "Send buffer Recv buffer [refcnt]\n"); 322 "Send buffer Recv buffer [refcnt]\n");
323 } else { 323 } else {
324 struct vcc_state *state = seq->private; 324 struct vcc_state *state = seq->private;
325 struct atm_vcc *vcc = atm_sk(state->sk); 325 struct atm_vcc *vcc = atm_sk(state->sk);
326 326
327 vcc_info(seq, vcc); 327 vcc_info(seq, vcc);
328 } 328 }
329 return 0; 329 return 0;
330} 330}
331 331
332static struct seq_operations vcc_seq_ops = { 332static struct seq_operations vcc_seq_ops = {
333 .start = vcc_seq_start, 333 .start = vcc_seq_start,
334 .next = vcc_seq_next, 334 .next = vcc_seq_next,
335 .stop = vcc_seq_stop, 335 .stop = vcc_seq_stop,
336 .show = vcc_seq_show, 336 .show = vcc_seq_show,
337}; 337};
338 338
339static int vcc_seq_open(struct inode *inode, struct file *file) 339static int vcc_seq_open(struct inode *inode, struct file *file)
340{ 340{
341 return __vcc_seq_open(inode, file, 0, &vcc_seq_ops); 341 return __vcc_seq_open(inode, file, 0, &vcc_seq_ops);
342} 342}
343 343
344static struct file_operations vcc_seq_fops = { 344static struct file_operations vcc_seq_fops = {
345 .open = vcc_seq_open, 345 .open = vcc_seq_open,
346 .read = seq_read, 346 .read = seq_read,
@@ -350,7 +350,7 @@ static struct file_operations vcc_seq_fops = {
350 350
351static int svc_seq_show(struct seq_file *seq, void *v) 351static int svc_seq_show(struct seq_file *seq, void *v)
352{ 352{
353 static char atm_svc_banner[] = 353 static char atm_svc_banner[] =
354 "Itf VPI VCI State Remote\n"; 354 "Itf VPI VCI State Remote\n";
355 355
356 if (v == (void *)1) 356 if (v == (void *)1)
@@ -472,7 +472,7 @@ static void atm_proc_dirs_remove(void)
472 static struct atm_proc_entry *e; 472 static struct atm_proc_entry *e;
473 473
474 for (e = atm_proc_ents; e->name; e++) { 474 for (e = atm_proc_ents; e->name; e++) {
475 if (e->dirent) 475 if (e->dirent)
476 remove_proc_entry(e->name, atm_proc_root); 476 remove_proc_entry(e->name, atm_proc_root);
477 } 477 }
478 remove_proc_entry("net/atm", NULL); 478 remove_proc_entry("net/atm", NULL);
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index b2148b43a426..848e6e191cc7 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -72,7 +72,7 @@ static int pvc_setsockopt(struct socket *sock, int level, int optname,
72 72
73 73
74static int pvc_getsockopt(struct socket *sock, int level, int optname, 74static int pvc_getsockopt(struct socket *sock, int level, int optname,
75 char __user *optval, int __user *optlen) 75 char __user *optval, int __user *optlen)
76{ 76{
77 struct sock *sk = sock->sk; 77 struct sock *sk = sock->sk;
78 int error; 78 int error;
@@ -91,7 +91,7 @@ static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr,
91 struct atm_vcc *vcc = ATM_SD(sock); 91 struct atm_vcc *vcc = ATM_SD(sock);
92 92
93 if (!vcc->dev || !test_bit(ATM_VF_ADDR,&vcc->flags)) return -ENOTCONN; 93 if (!vcc->dev || !test_bit(ATM_VF_ADDR,&vcc->flags)) return -ENOTCONN;
94 *sockaddr_len = sizeof(struct sockaddr_atmpvc); 94 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
95 addr = (struct sockaddr_atmpvc *) sockaddr; 95 addr = (struct sockaddr_atmpvc *) sockaddr;
96 addr->sap_family = AF_ATMPVC; 96 addr->sap_family = AF_ATMPVC;
97 addr->sap_addr.itf = vcc->dev->number; 97 addr->sap_addr.itf = vcc->dev->number;
diff --git a/net/atm/raw.c b/net/atm/raw.c
index 3e57b17ca523..4df7cdd72aa1 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -56,12 +56,12 @@ static int atm_send_aal0(struct atm_vcc *vcc,struct sk_buff *skb)
56 * still work 56 * still work
57 */ 57 */
58 if (!capable(CAP_NET_ADMIN) && 58 if (!capable(CAP_NET_ADMIN) &&
59 (((u32 *) skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) != 59 (((u32 *) skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
60 ((vcc->vpi << ATM_HDR_VPI_SHIFT) | (vcc->vci << ATM_HDR_VCI_SHIFT))) 60 ((vcc->vpi << ATM_HDR_VPI_SHIFT) | (vcc->vci << ATM_HDR_VCI_SHIFT)))
61 { 61 {
62 kfree_skb(skb); 62 kfree_skb(skb);
63 return -EADDRNOTAVAIL; 63 return -EADDRNOTAVAIL;
64 } 64 }
65 return vcc->dev->ops->send(vcc,skb); 65 return vcc->dev->ops->send(vcc,skb);
66} 66}
67 67
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 529f7e64aa2c..1bcf6dc8d409 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -142,8 +142,8 @@ void atm_dev_deregister(struct atm_dev *dev)
142 set_bit(ATM_DF_REMOVED, &dev->flags); 142 set_bit(ATM_DF_REMOVED, &dev->flags);
143 143
144 /* 144 /*
145 * if we remove current device from atm_devs list, new device 145 * if we remove current device from atm_devs list, new device
146 * with same number can appear, such we need deregister proc, 146 * with same number can appear, such we need deregister proc,
147 * release async all vccs and remove them from vccs list too 147 * release async all vccs and remove them from vccs list too
148 */ 148 */
149 mutex_lock(&atm_dev_mutex); 149 mutex_lock(&atm_dev_mutex);
@@ -228,7 +228,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg)
228 *tmp_p++ = dev->number; 228 *tmp_p++ = dev->number;
229 } 229 }
230 mutex_unlock(&atm_dev_mutex); 230 mutex_unlock(&atm_dev_mutex);
231 error = ((copy_to_user(buf, tmp_buf, size)) || 231 error = ((copy_to_user(buf, tmp_buf, size)) ||
232 put_user(size, &iobuf->length)) 232 put_user(size, &iobuf->length))
233 ? -EFAULT : 0; 233 ? -EFAULT : 0;
234 kfree(tmp_buf); 234 kfree(tmp_buf);
@@ -247,7 +247,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg)
247 if (!(dev = try_then_request_module(atm_dev_lookup(number), 247 if (!(dev = try_then_request_module(atm_dev_lookup(number),
248 "atm-device-%d", number))) 248 "atm-device-%d", number)))
249 return -ENODEV; 249 return -ENODEV;
250 250
251 switch (cmd) { 251 switch (cmd) {
252 case ATM_GETTYPE: 252 case ATM_GETTYPE:
253 size = strlen(dev->type) + 1; 253 size = strlen(dev->type) + 1;
@@ -390,7 +390,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg)
390 goto done; 390 goto done;
391 } 391 }
392 } 392 }
393 393
394 if (size) 394 if (size)
395 error = put_user(size, &sioc->length) 395 error = put_user(size, &sioc->length)
396 ? -EFAULT : 0; 396 ? -EFAULT : 0;
@@ -414,15 +414,15 @@ static __inline__ void *dev_get_idx(loff_t left)
414 414
415void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos) 415void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
416{ 416{
417 mutex_lock(&atm_dev_mutex); 417 mutex_lock(&atm_dev_mutex);
418 return *pos ? dev_get_idx(*pos) : (void *) 1; 418 return *pos ? dev_get_idx(*pos) : (void *) 1;
419} 419}
420 420
421void atm_dev_seq_stop(struct seq_file *seq, void *v) 421void atm_dev_seq_stop(struct seq_file *seq, void *v)
422{ 422{
423 mutex_unlock(&atm_dev_mutex); 423 mutex_unlock(&atm_dev_mutex);
424} 424}
425 425
426void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 426void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
427{ 427{
428 ++*pos; 428 ++*pos;
diff --git a/net/atm/signaling.h b/net/atm/signaling.h
index 434ead455714..08b2a69cc572 100644
--- a/net/atm/signaling.h
+++ b/net/atm/signaling.h
@@ -1,7 +1,7 @@
1/* net/atm/signaling.h - ATM signaling */ 1/* net/atm/signaling.h - ATM signaling */
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5 5
6#ifndef NET_ATM_SIGNALING_H 6#ifndef NET_ATM_SIGNALING_H
7#define NET_ATM_SIGNALING_H 7#define NET_ATM_SIGNALING_H
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 3a180cfd7b48..876ec7b47a2f 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -89,7 +89,7 @@ static int svc_release(struct socket *sock)
89 clear_bit(ATM_VF_READY, &vcc->flags); 89 clear_bit(ATM_VF_READY, &vcc->flags);
90 /* VCC pointer is used as a reference, so we must not free it 90 /* VCC pointer is used as a reference, so we must not free it
91 (thereby subjecting it to re-use) before all pending connections 91 (thereby subjecting it to re-use) before all pending connections
92 are closed */ 92 are closed */
93 svc_disconnect(vcc); 93 svc_disconnect(vcc);
94 vcc_release(sock); 94 vcc_release(sock);
95 } 95 }
@@ -144,7 +144,7 @@ static int svc_bind(struct socket *sock,struct sockaddr *sockaddr,
144 error = -EUNATCH; 144 error = -EUNATCH;
145 goto out; 145 goto out;
146 } 146 }
147 if (!sk->sk_err) 147 if (!sk->sk_err)
148 set_bit(ATM_VF_BOUND,&vcc->flags); 148 set_bit(ATM_VF_BOUND,&vcc->flags);
149 error = -sk->sk_err; 149 error = -sk->sk_err;
150out: 150out:
@@ -229,7 +229,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
229 * This is tricky: 229 * This is tricky:
230 * Kernel ---close--> Demon 230 * Kernel ---close--> Demon
231 * Kernel <--close--- Demon 231 * Kernel <--close--- Demon
232 * or 232 * or
233 * Kernel ---close--> Demon 233 * Kernel ---close--> Demon
234 * Kernel <--error--- Demon 234 * Kernel <--error--- Demon
235 * or 235 * or
@@ -470,13 +470,13 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
470 } 470 }
471 set_bit(ATM_VF_HASSAP, &vcc->flags); 471 set_bit(ATM_VF_HASSAP, &vcc->flags);
472 break; 472 break;
473 case SO_MULTIPOINT: 473 case SO_MULTIPOINT:
474 if (level != SOL_ATM || optlen != sizeof(int)) { 474 if (level != SOL_ATM || optlen != sizeof(int)) {
475 error = -EINVAL; 475 error = -EINVAL;
476 goto out; 476 goto out;
477 } 477 }
478 if (get_user(value, (int __user *) optval)) { 478 if (get_user(value, (int __user *) optval)) {
479 error = -EFAULT; 479 error = -EFAULT;
480 goto out; 480 goto out;
481 } 481 }
482 if (value == 1) { 482 if (value == 1) {
@@ -486,7 +486,7 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
486 } else { 486 } else {
487 error = -EINVAL; 487 error = -EINVAL;
488 } 488 }
489 break; 489 break;
490 default: 490 default:
491 error = vcc_setsockopt(sock, level, optname, 491 error = vcc_setsockopt(sock, level, optname,
492 optval, optlen); 492 optval, optlen);
@@ -539,7 +539,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
539 set_bit(ATM_VF_WAITING, &vcc->flags); 539 set_bit(ATM_VF_WAITING, &vcc->flags);
540 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 540 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
541 sigd_enq(vcc, as_addparty, NULL, NULL, 541 sigd_enq(vcc, as_addparty, NULL, NULL,
542 (struct sockaddr_atmsvc *) sockaddr); 542 (struct sockaddr_atmsvc *) sockaddr);
543 if (flags & O_NONBLOCK) { 543 if (flags & O_NONBLOCK) {
544 finish_wait(sk->sk_sleep, &wait); 544 finish_wait(sk->sk_sleep, &wait);
545 error = -EINPROGRESS; 545 error = -EINPROGRESS;
@@ -587,26 +587,26 @@ out:
587 587
588static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 588static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
589{ 589{
590 int error, ep_ref; 590 int error, ep_ref;
591 struct sockaddr_atmsvc sa; 591 struct sockaddr_atmsvc sa;
592 struct atm_vcc *vcc = ATM_SD(sock); 592 struct atm_vcc *vcc = ATM_SD(sock);
593 593
594 switch (cmd) { 594 switch (cmd) {
595 case ATM_ADDPARTY: 595 case ATM_ADDPARTY:
596 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 596 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
597 return -EINVAL; 597 return -EINVAL;
598 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa))) 598 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
599 return -EFAULT; 599 return -EFAULT;
600 error = svc_addparty(sock, (struct sockaddr *) &sa, sizeof(sa), 0); 600 error = svc_addparty(sock, (struct sockaddr *) &sa, sizeof(sa), 0);
601 break; 601 break;
602 case ATM_DROPPARTY: 602 case ATM_DROPPARTY:
603 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 603 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
604 return -EINVAL; 604 return -EINVAL;
605 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int))) 605 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
606 return -EFAULT; 606 return -EFAULT;
607 error = svc_dropparty(sock, ep_ref); 607 error = svc_dropparty(sock, ep_ref);
608 break; 608 break;
609 default: 609 default:
610 error = vcc_ioctl(sock, cmd, arg); 610 error = vcc_ioctl(sock, cmd, arg);
611 } 611 }
612 612
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 42233df2b099..9a0b677d1e7f 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -263,9 +263,9 @@ static void ax25_destroy_timer(unsigned long data)
263{ 263{
264 ax25_cb *ax25=(ax25_cb *)data; 264 ax25_cb *ax25=(ax25_cb *)data;
265 struct sock *sk; 265 struct sock *sk;
266 266
267 sk=ax25->sk; 267 sk=ax25->sk;
268 268
269 bh_lock_sock(sk); 269 bh_lock_sock(sk);
270 sock_hold(sk); 270 sock_hold(sk);
271 ax25_destroy_socket(ax25); 271 ax25_destroy_socket(ax25);
@@ -369,57 +369,57 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
369 ax25_disconnect(ax25, ENETRESET); 369 ax25_disconnect(ax25, ENETRESET);
370 break; 370 break;
371 371
372 case AX25_WINDOW: 372 case AX25_WINDOW:
373 if (ax25->modulus == AX25_MODULUS) { 373 if (ax25->modulus == AX25_MODULUS) {
374 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7) 374 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
375 return -EINVAL; 375 return -EINVAL;
376 } else { 376 } else {
377 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63) 377 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
378 return -EINVAL; 378 return -EINVAL;
379 } 379 }
380 ax25->window = ax25_ctl.arg; 380 ax25->window = ax25_ctl.arg;
381 break; 381 break;
382 382
383 case AX25_T1: 383 case AX25_T1:
384 if (ax25_ctl.arg < 1) 384 if (ax25_ctl.arg < 1)
385 return -EINVAL;
386 ax25->rtt = (ax25_ctl.arg * HZ) / 2;
387 ax25->t1 = ax25_ctl.arg * HZ;
388 break;
389
390 case AX25_T2:
391 if (ax25_ctl.arg < 1)
392 return -EINVAL;
393 ax25->t2 = ax25_ctl.arg * HZ;
394 break;
395
396 case AX25_N2:
397 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
398 return -EINVAL; 385 return -EINVAL;
399 ax25->n2count = 0; 386 ax25->rtt = (ax25_ctl.arg * HZ) / 2;
400 ax25->n2 = ax25_ctl.arg; 387 ax25->t1 = ax25_ctl.arg * HZ;
401 break; 388 break;
402 389
403 case AX25_T3: 390 case AX25_T2:
404 if (ax25_ctl.arg < 0) 391 if (ax25_ctl.arg < 1)
405 return -EINVAL; 392 return -EINVAL;
406 ax25->t3 = ax25_ctl.arg * HZ; 393 ax25->t2 = ax25_ctl.arg * HZ;
407 break; 394 break;
408 395
409 case AX25_IDLE: 396 case AX25_N2:
410 if (ax25_ctl.arg < 0) 397 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
411 return -EINVAL; 398 return -EINVAL;
412 ax25->idle = ax25_ctl.arg * 60 * HZ; 399 ax25->n2count = 0;
413 break; 400 ax25->n2 = ax25_ctl.arg;
414 401 break;
415 case AX25_PACLEN: 402
416 if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535) 403 case AX25_T3:
417 return -EINVAL; 404 if (ax25_ctl.arg < 0)
418 ax25->paclen = ax25_ctl.arg; 405 return -EINVAL;
419 break; 406 ax25->t3 = ax25_ctl.arg * HZ;
420 407 break;
421 default: 408
422 return -EINVAL; 409 case AX25_IDLE:
410 if (ax25_ctl.arg < 0)
411 return -EINVAL;
412 ax25->idle = ax25_ctl.arg * 60 * HZ;
413 break;
414
415 case AX25_PACLEN:
416 if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
417 return -EINVAL;
418 ax25->paclen = ax25_ctl.arg;
419 break;
420
421 default:
422 return -EINVAL;
423 } 423 }
424 424
425 return 0; 425 return 0;
@@ -1209,7 +1209,7 @@ static int __must_check ax25_connect(struct socket *sock,
1209 1209
1210 if (sk->sk_type == SOCK_SEQPACKET && 1210 if (sk->sk_type == SOCK_SEQPACKET &&
1211 (ax25t=ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi, 1211 (ax25t=ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi,
1212 ax25->ax25_dev->dev))) { 1212 ax25->ax25_dev->dev))) {
1213 kfree(digi); 1213 kfree(digi);
1214 err = -EADDRINUSE; /* Already such a connection */ 1214 err = -EADDRINUSE; /* Already such a connection */
1215 ax25_cb_put(ax25t); 1215 ax25_cb_put(ax25t);
@@ -1456,7 +1456,7 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1456 err = -EMSGSIZE; 1456 err = -EMSGSIZE;
1457 goto out; 1457 goto out;
1458 } 1458 }
1459 1459
1460 if (usax != NULL) { 1460 if (usax != NULL) {
1461 if (usax->sax25_family != AF_AX25) { 1461 if (usax->sax25_family != AF_AX25) {
1462 err = -EINVAL; 1462 err = -EINVAL;
@@ -1470,8 +1470,8 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1470 else if (addr_len != sizeof(struct full_sockaddr_ax25)) { 1470 else if (addr_len != sizeof(struct full_sockaddr_ax25)) {
1471 /* support for old structure may go away some time */ 1471 /* support for old structure may go away some time */
1472 if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) || 1472 if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
1473 (addr_len > sizeof(struct full_sockaddr_ax25))) { 1473 (addr_len > sizeof(struct full_sockaddr_ax25))) {
1474 err = -EINVAL; 1474 err = -EINVAL;
1475 goto out; 1475 goto out;
1476 } 1476 }
1477 1477
@@ -1624,7 +1624,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
1624 1624
1625 /* Now we can treat all alike */ 1625 /* Now we can treat all alike */
1626 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1626 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1627 flags & MSG_DONTWAIT, &err); 1627 flags & MSG_DONTWAIT, &err);
1628 if (skb == NULL) 1628 if (skb == NULL)
1629 goto out; 1629 goto out;
1630 1630
@@ -1869,7 +1869,7 @@ static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
1869 return hlist_entry( ((struct ax25_cb *)v)->ax25_node.next, 1869 return hlist_entry( ((struct ax25_cb *)v)->ax25_node.next,
1870 struct ax25_cb, ax25_node); 1870 struct ax25_cb, ax25_node);
1871} 1871}
1872 1872
1873static void ax25_info_stop(struct seq_file *seq, void *v) 1873static void ax25_info_stop(struct seq_file *seq, void *v)
1874{ 1874{
1875 spin_unlock_bh(&ax25_list_lock); 1875 spin_unlock_bh(&ax25_list_lock);
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 97a49c79c605..419e7188d5a7 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -126,10 +126,10 @@ int ax25cmp(const ax25_address *a, const ax25_address *b)
126 ct++; 126 ct++;
127 } 127 }
128 128
129 if ((a->ax25_call[ct] & 0x1E) == (b->ax25_call[ct] & 0x1E)) /* SSID without control bit */ 129 if ((a->ax25_call[ct] & 0x1E) == (b->ax25_call[ct] & 0x1E)) /* SSID without control bit */
130 return 0; 130 return 0;
131 131
132 return 2; /* Partial match */ 132 return 2; /* Partial match */
133} 133}
134 134
135EXPORT_SYMBOL(ax25cmp); 135EXPORT_SYMBOL(ax25cmp);
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 136c3aefa9de..8d62d8681615 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -55,46 +55,46 @@ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short
55 if (type == ETH_P_AX25) 55 if (type == ETH_P_AX25)
56 return 0; 56 return 0;
57 57
58 /* header is an AX.25 UI frame from us to them */ 58 /* header is an AX.25 UI frame from us to them */
59 buff = skb_push(skb, AX25_HEADER_LEN); 59 buff = skb_push(skb, AX25_HEADER_LEN);
60 *buff++ = 0x00; /* KISS DATA */ 60 *buff++ = 0x00; /* KISS DATA */
61 61
62 if (daddr != NULL) 62 if (daddr != NULL)
63 memcpy(buff, daddr, dev->addr_len); /* Address specified */ 63 memcpy(buff, daddr, dev->addr_len); /* Address specified */
64 64
65 buff[6] &= ~AX25_CBIT; 65 buff[6] &= ~AX25_CBIT;
66 buff[6] &= ~AX25_EBIT; 66 buff[6] &= ~AX25_EBIT;
67 buff[6] |= AX25_SSSID_SPARE; 67 buff[6] |= AX25_SSSID_SPARE;
68 buff += AX25_ADDR_LEN; 68 buff += AX25_ADDR_LEN;
69 69
70 if (saddr != NULL) 70 if (saddr != NULL)
71 memcpy(buff, saddr, dev->addr_len); 71 memcpy(buff, saddr, dev->addr_len);
72 else 72 else
73 memcpy(buff, dev->dev_addr, dev->addr_len); 73 memcpy(buff, dev->dev_addr, dev->addr_len);
74 74
75 buff[6] &= ~AX25_CBIT; 75 buff[6] &= ~AX25_CBIT;
76 buff[6] |= AX25_EBIT; 76 buff[6] |= AX25_EBIT;
77 buff[6] |= AX25_SSSID_SPARE; 77 buff[6] |= AX25_SSSID_SPARE;
78 buff += AX25_ADDR_LEN; 78 buff += AX25_ADDR_LEN;
79 79
80 *buff++ = AX25_UI; /* UI */ 80 *buff++ = AX25_UI; /* UI */
81 81
82 /* Append a suitable AX.25 PID */ 82 /* Append a suitable AX.25 PID */
83 switch (type) { 83 switch (type) {
84 case ETH_P_IP: 84 case ETH_P_IP:
85 *buff++ = AX25_P_IP; 85 *buff++ = AX25_P_IP;
86 break; 86 break;
87 case ETH_P_ARP: 87 case ETH_P_ARP:
88 *buff++ = AX25_P_ARP; 88 *buff++ = AX25_P_ARP;
89 break; 89 break;
90 default: 90 default:
91 printk(KERN_ERR "AX.25: ax25_hard_header - wrong protocol type 0x%2.2x\n", type); 91 printk(KERN_ERR "AX.25: ax25_hard_header - wrong protocol type 0x%2.2x\n", type);
92 *buff++ = 0; 92 *buff++ = 0;
93 break; 93 break;
94 } 94 }
95 95
96 if (daddr != NULL) 96 if (daddr != NULL)
97 return AX25_HEADER_LEN; 97 return AX25_HEADER_LEN;
98 98
99 return -AX25_HEADER_LEN; /* Unfinished header */ 99 return -AX25_HEADER_LEN; /* Unfinished header */
100} 100}
@@ -114,8 +114,8 @@ int ax25_rebuild_header(struct sk_buff *skb)
114 dst = (ax25_address *)(bp + 1); 114 dst = (ax25_address *)(bp + 1);
115 src = (ax25_address *)(bp + 8); 115 src = (ax25_address *)(bp + 8);
116 116
117 if (arp_find(bp + 1, skb)) 117 if (arp_find(bp + 1, skb))
118 return 1; 118 return 1;
119 119
120 route = ax25_get_route(dst, NULL); 120 route = ax25_get_route(dst, NULL);
121 if (route) { 121 if (route) {
@@ -127,8 +127,8 @@ int ax25_rebuild_header(struct sk_buff *skb)
127 if (dev == NULL) 127 if (dev == NULL)
128 dev = skb->dev; 128 dev = skb->dev;
129 129
130 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { 130 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
131 goto put; 131 goto put;
132 } 132 }
133 133
134 if (bp[16] == AX25_P_IP) { 134 if (bp[16] == AX25_P_IP) {
@@ -175,8 +175,8 @@ int ax25_rebuild_header(struct sk_buff *skb)
175 ourskb->nh.raw = ourskb->data; 175 ourskb->nh.raw = ourskb->data;
176 176
177 ax25=ax25_send_frame( 177 ax25=ax25_send_frame(
178 ourskb, 178 ourskb,
179 ax25_dev->values[AX25_VALUES_PACLEN], 179 ax25_dev->values[AX25_VALUES_PACLEN],
180 &src_c, 180 &src_c,
181 &dst_c, digipeat, dev); 181 &dst_c, digipeat, dev);
182 if (ax25) { 182 if (ax25) {
@@ -186,13 +186,13 @@ int ax25_rebuild_header(struct sk_buff *skb)
186 } 186 }
187 } 187 }
188 188
189 bp[7] &= ~AX25_CBIT; 189 bp[7] &= ~AX25_CBIT;
190 bp[7] &= ~AX25_EBIT; 190 bp[7] &= ~AX25_EBIT;
191 bp[7] |= AX25_SSSID_SPARE; 191 bp[7] |= AX25_SSSID_SPARE;
192 192
193 bp[14] &= ~AX25_CBIT; 193 bp[14] &= ~AX25_CBIT;
194 bp[14] |= AX25_EBIT; 194 bp[14] |= AX25_EBIT;
195 bp[14] |= AX25_SSSID_SPARE; 195 bp[14] |= AX25_SSSID_SPARE;
196 196
197 skb_pull(skb, AX25_KISS_HEADER_LEN); 197 skb_pull(skb, AX25_KISS_HEADER_LEN);
198 198
@@ -211,7 +211,7 @@ put:
211 if (route) 211 if (route)
212 ax25_put_route(route); 212 ax25_put_route(route);
213 213
214 return 1; 214 return 1;
215} 215}
216 216
217#else /* INET */ 217#else /* INET */
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 0a0381622b1c..7078861a7385 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -87,7 +87,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
87 ax25_rt = ax25_route_list; 87 ax25_rt = ax25_route_list;
88 while (ax25_rt != NULL) { 88 while (ax25_rt != NULL) {
89 if (ax25cmp(&ax25_rt->callsign, &route->dest_addr) == 0 && 89 if (ax25cmp(&ax25_rt->callsign, &route->dest_addr) == 0 &&
90 ax25_rt->dev == ax25_dev->dev) { 90 ax25_rt->dev == ax25_dev->dev) {
91 kfree(ax25_rt->digipeat); 91 kfree(ax25_rt->digipeat);
92 ax25_rt->digipeat = NULL; 92 ax25_rt->digipeat = NULL;
93 if (route->digi_count != 0) { 93 if (route->digi_count != 0) {
@@ -252,8 +252,8 @@ static void *ax25_rt_seq_start(struct seq_file *seq, loff_t *pos)
252{ 252{
253 struct ax25_route *ax25_rt; 253 struct ax25_route *ax25_rt;
254 int i = 1; 254 int i = 1;
255 255
256 read_lock(&ax25_route_lock); 256 read_lock(&ax25_route_lock);
257 if (*pos == 0) 257 if (*pos == 0)
258 return SEQ_START_TOKEN; 258 return SEQ_START_TOKEN;
259 259
@@ -269,7 +269,7 @@ static void *ax25_rt_seq_start(struct seq_file *seq, loff_t *pos)
269static void *ax25_rt_seq_next(struct seq_file *seq, void *v, loff_t *pos) 269static void *ax25_rt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
270{ 270{
271 ++*pos; 271 ++*pos;
272 return (v == SEQ_START_TOKEN) ? ax25_route_list : 272 return (v == SEQ_START_TOKEN) ? ax25_route_list :
273 ((struct ax25_route *) v)->next; 273 ((struct ax25_route *) v)->next;
274} 274}
275 275
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index a29c480a4dc1..e3528b1a7802 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -34,7 +34,7 @@
34void ax25_std_heartbeat_expiry(ax25_cb *ax25) 34void ax25_std_heartbeat_expiry(ax25_cb *ax25)
35{ 35{
36 struct sock *sk=ax25->sk; 36 struct sock *sk=ax25->sk;
37 37
38 if (sk) 38 if (sk)
39 bh_lock_sock(sk); 39 bh_lock_sock(sk);
40 40
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index c41dbe5fadee..85c58c49b4d0 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -56,7 +56,7 @@ void ax25_frames_acked(ax25_cb *ax25, unsigned short nr)
56 */ 56 */
57 if (ax25->va != nr) { 57 if (ax25->va != nr) {
58 while (skb_peek(&ax25->ack_queue) != NULL && ax25->va != nr) { 58 while (skb_peek(&ax25->ack_queue) != NULL && ax25->va != nr) {
59 skb = skb_dequeue(&ax25->ack_queue); 59 skb = skb_dequeue(&ax25->ack_queue);
60 kfree_skb(skb); 60 kfree_skb(skb);
61 ax25->va = (ax25->va + 1) % ax25->modulus; 61 ax25->va = (ax25->va + 1) % ax25->modulus;
62 } 62 }
@@ -65,7 +65,7 @@ void ax25_frames_acked(ax25_cb *ax25, unsigned short nr)
65 65
66void ax25_requeue_frames(ax25_cb *ax25) 66void ax25_requeue_frames(ax25_cb *ax25)
67{ 67{
68 struct sk_buff *skb, *skb_prev = NULL; 68 struct sk_buff *skb, *skb_prev = NULL;
69 69
70 /* 70 /*
71 * Requeue all the un-ack-ed frames on the output queue to be picked 71 * Requeue all the un-ack-ed frames on the output queue to be picked
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 5e9a81e8b214..7f4c294b36f1 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -164,7 +164,7 @@ static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
164 ++*pos; 164 ++*pos;
165 165
166 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next, 166 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
167 ax25_uid_assoc, uid_node); 167 ax25_uid_assoc, uid_node);
168} 168}
169 169
170static void ax25_uid_seq_stop(struct seq_file *seq, void *v) 170static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 67df99e2e5c8..c7228cfc6218 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,13 +12,13 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
@@ -119,7 +119,7 @@ static int bt_sock_create(struct socket *sock, int proto)
119 119
120 read_unlock(&bt_proto_lock); 120 read_unlock(&bt_proto_lock);
121 121
122 return err; 122 return err;
123} 123}
124 124
125void bt_sock_link(struct bt_sock_list *l, struct sock *sk) 125void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
@@ -265,7 +265,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
265 if (sk->sk_shutdown == SHUTDOWN_MASK) 265 if (sk->sk_shutdown == SHUTDOWN_MASK)
266 mask |= POLLHUP; 266 mask |= POLLHUP;
267 267
268 if (!skb_queue_empty(&sk->sk_receive_queue) || 268 if (!skb_queue_empty(&sk->sk_receive_queue) ||
269 (sk->sk_shutdown & RCV_SHUTDOWN)) 269 (sk->sk_shutdown & RCV_SHUTDOWN))
270 mask |= POLLIN | POLLRDNORM; 270 mask |= POLLIN | POLLRDNORM;
271 271
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 0b6cd0e2528d..a2992280c3d1 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -1,7 +1,7 @@
1/* 1/*
2 BNEP protocol definition for Linux Bluetooth stack (BlueZ). 2 BNEP protocol definition for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> 3 Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License, version 2, as 6 it under the terms of the GNU General Public License, version 2, as
7 published by the Free Software Foundation. 7 published by the Free Software Foundation.
@@ -60,7 +60,7 @@
60// Extension types 60// Extension types
61#define BNEP_EXT_CONTROL 0x00 61#define BNEP_EXT_CONTROL 0x00
62 62
63// Response messages 63// Response messages
64#define BNEP_SUCCESS 0x00 64#define BNEP_SUCCESS 0x00
65 65
66#define BNEP_CONN_INVALID_DST 0x01 66#define BNEP_CONN_INVALID_DST 0x01
@@ -81,7 +81,7 @@
81#define BNEP_CONNECT_TO 15 81#define BNEP_CONNECT_TO 15
82#define BNEP_FILTER_TO 15 82#define BNEP_FILTER_TO 15
83 83
84// Headers 84// Headers
85#define BNEP_TYPE_MASK 0x7f 85#define BNEP_TYPE_MASK 0x7f
86#define BNEP_EXT_HEADER 0x80 86#define BNEP_EXT_HEADER 0x80
87 87
@@ -132,7 +132,7 @@ struct bnep_conndel_req {
132struct bnep_conninfo { 132struct bnep_conninfo {
133 __u32 flags; 133 __u32 flags;
134 __u16 role; 134 __u16 role;
135 __u16 state; 135 __u16 state;
136 __u8 dst[ETH_ALEN]; 136 __u8 dst[ETH_ALEN];
137 char device[16]; 137 char device[16];
138}; 138};
@@ -155,10 +155,10 @@ int bnep_get_conninfo(struct bnep_conninfo *ci);
155// BNEP sessions 155// BNEP sessions
156struct bnep_session { 156struct bnep_session {
157 struct list_head list; 157 struct list_head list;
158 158
159 unsigned int role; 159 unsigned int role;
160 unsigned long state; 160 unsigned long state;
161 unsigned long flags; 161 unsigned long flags;
162 atomic_t killed; 162 atomic_t killed;
163 163
164 struct ethhdr eh; 164 struct ethhdr eh;
@@ -166,7 +166,7 @@ struct bnep_session {
166 166
167 struct bnep_proto_filter proto_filter[BNEP_MAX_PROTO_FILTERS]; 167 struct bnep_proto_filter proto_filter[BNEP_MAX_PROTO_FILTERS];
168 u64 mc_filter; 168 u64 mc_filter;
169 169
170 struct socket *sock; 170 struct socket *sock;
171 struct net_device *dev; 171 struct net_device *dev;
172 struct net_device_stats stats; 172 struct net_device_stats stats;
@@ -178,7 +178,7 @@ int bnep_sock_cleanup(void);
178 178
179static inline int bnep_mc_hash(__u8 *addr) 179static inline int bnep_mc_hash(__u8 *addr)
180{ 180{
181 return (crc32_be(~0, addr, ETH_ALEN) >> 26); 181 return (crc32_be(~0, addr, ETH_ALEN) >> 26);
182} 182}
183 183
184#endif 184#endif
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 7ba6470dc507..b85d1492c357 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BNEP implementation for Linux Bluetooth stack (BlueZ). 2 BNEP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2001-2002 Inventel Systemes 3 Copyright (C) 2001-2002 Inventel Systemes
4 Written 2001-2002 by 4 Written 2001-2002 by
@@ -15,19 +15,19 @@
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28/* 28/*
29 * $Id: core.c,v 1.20 2002/08/04 21:23:58 maxk Exp $ 29 * $Id: core.c,v 1.20 2002/08/04 21:23:58 maxk Exp $
30 */ 30 */
31 31
32#include <linux/module.h> 32#include <linux/module.h>
33 33
@@ -74,7 +74,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
74 BT_DBG(""); 74 BT_DBG("");
75 75
76 list_for_each(p, &bnep_session_list) { 76 list_for_each(p, &bnep_session_list) {
77 s = list_entry(p, struct bnep_session, list); 77 s = list_entry(p, struct bnep_session, list);
78 if (!compare_ether_addr(dst, s->eh.h_source)) 78 if (!compare_ether_addr(dst, s->eh.h_source))
79 return s; 79 return s;
80 } 80 }
@@ -87,7 +87,7 @@ static void __bnep_link_session(struct bnep_session *s)
87 by the socket layer which has to hold the refference to this module. 87 by the socket layer which has to hold the refference to this module.
88 */ 88 */
89 __module_get(THIS_MODULE); 89 __module_get(THIS_MODULE);
90 list_add(&s->list, &bnep_session_list); 90 list_add(&s->list, &bnep_session_list);
91} 91}
92 92
93static void __bnep_unlink_session(struct bnep_session *s) 93static void __bnep_unlink_session(struct bnep_session *s)
@@ -203,7 +203,7 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
203 203
204 memcpy(a1, data, ETH_ALEN); data += ETH_ALEN; 204 memcpy(a1, data, ETH_ALEN); data += ETH_ALEN;
205 a2 = data; data += ETH_ALEN; 205 a2 = data; data += ETH_ALEN;
206 206
207 BT_DBG("mc filter %s -> %s", 207 BT_DBG("mc filter %s -> %s",
208 batostr((void *) a1), batostr((void *) a2)); 208 batostr((void *) a1), batostr((void *) a2));
209 209
@@ -277,7 +277,7 @@ static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb)
277 } 277 }
278 278
279 BT_DBG("type 0x%x len %d", h->type, h->len); 279 BT_DBG("type 0x%x len %d", h->type, h->len);
280 280
281 switch (h->type & BNEP_TYPE_MASK) { 281 switch (h->type & BNEP_TYPE_MASK) {
282 case BNEP_EXT_CONTROL: 282 case BNEP_EXT_CONTROL:
283 bnep_rx_control(s, skb->data, skb->len); 283 bnep_rx_control(s, skb->data, skb->len);
@@ -293,7 +293,7 @@ static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb)
293 break; 293 break;
294 } 294 }
295 } while (!err && (h->type & BNEP_EXT_HEADER)); 295 } while (!err && (h->type & BNEP_EXT_HEADER));
296 296
297 return err; 297 return err;
298} 298}
299 299
@@ -319,7 +319,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
319 319
320 if ((type & BNEP_TYPE_MASK) > BNEP_RX_TYPES) 320 if ((type & BNEP_TYPE_MASK) > BNEP_RX_TYPES)
321 goto badframe; 321 goto badframe;
322 322
323 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { 323 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
324 bnep_rx_control(s, skb->data, skb->len); 324 bnep_rx_control(s, skb->data, skb->len);
325 kfree_skb(skb); 325 kfree_skb(skb);
@@ -345,7 +345,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
345 goto badframe; 345 goto badframe;
346 s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); 346 s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
347 } 347 }
348 348
349 /* We have to alloc new skb and copy data here :(. Because original skb 349 /* We have to alloc new skb and copy data here :(. Because original skb
350 * may not be modified and because of the alignment requirements. */ 350 * may not be modified and because of the alignment requirements. */
351 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); 351 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
@@ -361,7 +361,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
361 case BNEP_COMPRESSED: 361 case BNEP_COMPRESSED:
362 memcpy(__skb_put(nskb, ETH_HLEN), &s->eh, ETH_HLEN); 362 memcpy(__skb_put(nskb, ETH_HLEN), &s->eh, ETH_HLEN);
363 break; 363 break;
364 364
365 case BNEP_COMPRESSED_SRC_ONLY: 365 case BNEP_COMPRESSED_SRC_ONLY:
366 memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN); 366 memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN);
367 memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN); 367 memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN);
@@ -381,7 +381,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
381 381
382 memcpy(__skb_put(nskb, skb->len), skb->data, skb->len); 382 memcpy(__skb_put(nskb, skb->len), skb->data, skb->len);
383 kfree_skb(skb); 383 kfree_skb(skb);
384 384
385 s->stats.rx_packets++; 385 s->stats.rx_packets++;
386 nskb->dev = dev; 386 nskb->dev = dev;
387 nskb->ip_summed = CHECKSUM_NONE; 387 nskb->ip_summed = CHECKSUM_NONE;
@@ -435,7 +435,7 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
435 iv[il++] = (struct kvec) { eh->h_source, ETH_ALEN }; 435 iv[il++] = (struct kvec) { eh->h_source, ETH_ALEN };
436 len += ETH_ALEN; 436 len += ETH_ALEN;
437 break; 437 break;
438 438
439 case BNEP_COMPRESSED_DST_ONLY: 439 case BNEP_COMPRESSED_DST_ONLY:
440 iv[il++] = (struct kvec) { eh->h_dest, ETH_ALEN }; 440 iv[il++] = (struct kvec) { eh->h_dest, ETH_ALEN };
441 len += ETH_ALEN; 441 len += ETH_ALEN;
@@ -445,7 +445,7 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
445send: 445send:
446 iv[il++] = (struct kvec) { skb->data, skb->len }; 446 iv[il++] = (struct kvec) { skb->data, skb->len };
447 len += skb->len; 447 len += skb->len;
448 448
449 /* FIXME: linearize skb */ 449 /* FIXME: linearize skb */
450 { 450 {
451 len = kernel_sendmsg(sock, &s->msg, iv, il, len); 451 len = kernel_sendmsg(sock, &s->msg, iv, il, len);
@@ -471,7 +471,7 @@ static int bnep_session(void *arg)
471 471
472 BT_DBG(""); 472 BT_DBG("");
473 473
474 daemonize("kbnepd %s", dev->name); 474 daemonize("kbnepd %s", dev->name);
475 set_user_nice(current, -15); 475 set_user_nice(current, -15);
476 current->flags |= PF_NOFREEZE; 476 current->flags |= PF_NOFREEZE;
477 477
@@ -488,13 +488,13 @@ static int bnep_session(void *arg)
488 488
489 if (sk->sk_state != BT_CONNECTED) 489 if (sk->sk_state != BT_CONNECTED)
490 break; 490 break;
491 491
492 // TX 492 // TX
493 while ((skb = skb_dequeue(&sk->sk_write_queue))) 493 while ((skb = skb_dequeue(&sk->sk_write_queue)))
494 if (bnep_tx_frame(s, skb)) 494 if (bnep_tx_frame(s, skb))
495 break; 495 break;
496 netif_wake_queue(dev); 496 netif_wake_queue(dev);
497 497
498 schedule(); 498 schedule();
499 } 499 }
500 set_current_state(TASK_RUNNING); 500 set_current_state(TASK_RUNNING);
@@ -573,7 +573,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
573 s->sock = sock; 573 s->sock = sock;
574 s->role = req->role; 574 s->role = req->role;
575 s->state = BT_CONNECTED; 575 s->state = BT_CONNECTED;
576 576
577 s->msg.msg_flags = MSG_NOSIGNAL; 577 s->msg.msg_flags = MSG_NOSIGNAL;
578 578
579#ifdef CONFIG_BT_BNEP_MC_FILTER 579#ifdef CONFIG_BT_BNEP_MC_FILTER
@@ -594,7 +594,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
594 } 594 }
595 595
596 __bnep_link_session(s); 596 __bnep_link_session(s);
597 597
598 err = kernel_thread(bnep_session, s, CLONE_KERNEL); 598 err = kernel_thread(bnep_session, s, CLONE_KERNEL);
599 if (err < 0) { 599 if (err < 0) {
600 /* Session thread start failed, gotta cleanup. */ 600 /* Session thread start failed, gotta cleanup. */
@@ -627,7 +627,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
627 /* Wakeup user-space which is polling for socket errors. 627 /* Wakeup user-space which is polling for socket errors.
628 * This is temporary hack untill we have shutdown in L2CAP */ 628 * This is temporary hack untill we have shutdown in L2CAP */
629 s->sock->sk->sk_err = EUNATCH; 629 s->sock->sk->sk_err = EUNATCH;
630 630
631 /* Kill session thread */ 631 /* Kill session thread */
632 atomic_inc(&s->killed); 632 atomic_inc(&s->killed);
633 wake_up_interruptible(s->sock->sk->sk_sleep); 633 wake_up_interruptible(s->sock->sk->sk_sleep);
@@ -661,7 +661,7 @@ int bnep_get_connlist(struct bnep_connlist_req *req)
661 s = list_entry(p, struct bnep_session, list); 661 s = list_entry(p, struct bnep_session, list);
662 662
663 __bnep_copy_ci(&ci, s); 663 __bnep_copy_ci(&ci, s);
664 664
665 if (copy_to_user(req->ci, &ci, sizeof(ci))) { 665 if (copy_to_user(req->ci, &ci, sizeof(ci))) {
666 err = -EFAULT; 666 err = -EFAULT;
667 break; 667 break;
@@ -696,7 +696,7 @@ int bnep_get_conninfo(struct bnep_conninfo *ci)
696} 696}
697 697
698static int __init bnep_init(void) 698static int __init bnep_init(void)
699{ 699{
700 char flt[50] = ""; 700 char flt[50] = "";
701 701
702 l2cap_load(); 702 l2cap_load();
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 67a002a9751a..9092816f58de 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BNEP implementation for Linux Bluetooth stack (BlueZ). 2 BNEP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2001-2002 Inventel Systemes 3 Copyright (C) 2001-2002 Inventel Systemes
4 Written 2001-2002 by 4 Written 2001-2002 by
@@ -15,19 +15,19 @@
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28/* 28/*
29 * $Id: netdev.c,v 1.8 2002/08/04 21:23:58 maxk Exp $ 29 * $Id: netdev.c,v 1.8 2002/08/04 21:23:58 maxk Exp $
30 */ 30 */
31 31
32#include <linux/module.h> 32#include <linux/module.h>
33 33
@@ -94,7 +94,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
94 r->type = BNEP_CONTROL; 94 r->type = BNEP_CONTROL;
95 r->ctrl = BNEP_FILTER_MULTI_ADDR_SET; 95 r->ctrl = BNEP_FILTER_MULTI_ADDR_SET;
96 96
97 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 97 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
98 u8 start[ETH_ALEN] = { 0x01 }; 98 u8 start[ETH_ALEN] = { 0x01 };
99 99
100 /* Request all addresses */ 100 /* Request all addresses */
@@ -102,14 +102,14 @@ static void bnep_net_set_mc_list(struct net_device *dev)
102 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); 102 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
103 r->len = htons(ETH_ALEN * 2); 103 r->len = htons(ETH_ALEN * 2);
104 } else { 104 } else {
105 struct dev_mc_list *dmi = dev->mc_list; 105 struct dev_mc_list *dmi = dev->mc_list;
106 int i, len = skb->len; 106 int i, len = skb->len;
107 107
108 if (dev->flags & IFF_BROADCAST) { 108 if (dev->flags & IFF_BROADCAST) {
109 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); 109 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
110 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); 110 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
111 } 111 }
112 112
113 /* FIXME: We should group addresses here. */ 113 /* FIXME: We should group addresses here. */
114 114
115 for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) { 115 for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) {
@@ -159,13 +159,13 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
159{ 159{
160 struct ethhdr *eh = (void *) skb->data; 160 struct ethhdr *eh = (void *) skb->data;
161 u16 proto = ntohs(eh->h_proto); 161 u16 proto = ntohs(eh->h_proto);
162 162
163 if (proto >= 1536) 163 if (proto >= 1536)
164 return proto; 164 return proto;
165 165
166 if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) 166 if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF))
167 return ETH_P_802_3; 167 return ETH_P_802_3;
168 168
169 return ETH_P_802_2; 169 return ETH_P_802_2;
170} 170}
171 171
@@ -174,7 +174,7 @@ static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session
174 u16 proto = bnep_net_eth_proto(skb); 174 u16 proto = bnep_net_eth_proto(skb);
175 struct bnep_proto_filter *f = s->proto_filter; 175 struct bnep_proto_filter *f = s->proto_filter;
176 int i; 176 int i;
177 177
178 for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { 178 for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) {
179 if (proto >= f[i].start && proto <= f[i].end) 179 if (proto >= f[i].start && proto <= f[i].end)
180 return 0; 180 return 0;
@@ -198,14 +198,14 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
198 return 0; 198 return 0;
199 } 199 }
200#endif 200#endif
201 201
202#ifdef CONFIG_BT_BNEP_PROTO_FILTER 202#ifdef CONFIG_BT_BNEP_PROTO_FILTER
203 if (bnep_net_proto_filter(skb, s)) { 203 if (bnep_net_proto_filter(skb, s)) {
204 kfree_skb(skb); 204 kfree_skb(skb);
205 return 0; 205 return 0;
206 } 206 }
207#endif 207#endif
208 208
209 /* 209 /*
210 * We cannot send L2CAP packets from here as we are potentially in a bh. 210 * We cannot send L2CAP packets from here as we are potentially in a bh.
211 * So we have to queue them and wake up session thread which is sleeping 211 * So we have to queue them and wake up session thread which is sleeping
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 5563db1bf526..6d7311cee1b4 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BNEP implementation for Linux Bluetooth stack (BlueZ). 2 BNEP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2001-2002 Inventel Systemes 3 Copyright (C) 2001-2002 Inventel Systemes
4 Written 2001-2002 by 4 Written 2001-2002 by
@@ -14,19 +14,19 @@
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27/* 27/*
28 * $Id: sock.c,v 1.4 2002/08/04 21:23:58 maxk Exp $ 28 * $Id: sock.c,v 1.4 2002/08/04 21:23:58 maxk Exp $
29 */ 29 */
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32 32
@@ -89,7 +89,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
89 89
90 if (copy_from_user(&ca, argp, sizeof(ca))) 90 if (copy_from_user(&ca, argp, sizeof(ca)))
91 return -EFAULT; 91 return -EFAULT;
92 92
93 nsock = sockfd_lookup(ca.sock, &err); 93 nsock = sockfd_lookup(ca.sock, &err);
94 if (!nsock) 94 if (!nsock)
95 return err; 95 return err;
@@ -101,20 +101,20 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
101 101
102 err = bnep_add_connection(&ca, nsock); 102 err = bnep_add_connection(&ca, nsock);
103 if (!err) { 103 if (!err) {
104 if (copy_to_user(argp, &ca, sizeof(ca))) 104 if (copy_to_user(argp, &ca, sizeof(ca)))
105 err = -EFAULT; 105 err = -EFAULT;
106 } else 106 } else
107 fput(nsock->file); 107 fput(nsock->file);
108 108
109 return err; 109 return err;
110 110
111 case BNEPCONNDEL: 111 case BNEPCONNDEL:
112 if (!capable(CAP_NET_ADMIN)) 112 if (!capable(CAP_NET_ADMIN))
113 return -EACCES; 113 return -EACCES;
114 114
115 if (copy_from_user(&cd, argp, sizeof(cd))) 115 if (copy_from_user(&cd, argp, sizeof(cd)))
116 return -EFAULT; 116 return -EFAULT;
117 117
118 return bnep_del_connection(&cd); 118 return bnep_del_connection(&cd);
119 119
120 case BNEPGETCONNLIST: 120 case BNEPGETCONNLIST:
@@ -123,7 +123,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
123 123
124 if (cl.cnum <= 0) 124 if (cl.cnum <= 0)
125 return -EINVAL; 125 return -EINVAL;
126 126
127 err = bnep_get_connlist(&cl); 127 err = bnep_get_connlist(&cl);
128 if (!err && copy_to_user(argp, &cl, sizeof(cl))) 128 if (!err && copy_to_user(argp, &cl, sizeof(cl)))
129 return -EFAULT; 129 return -EFAULT;
@@ -163,7 +163,7 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
163 163
164 if (cl.cnum <= 0) 164 if (cl.cnum <= 0)
165 return -EINVAL; 165 return -EINVAL;
166 166
167 err = bnep_get_connlist(&cl); 167 err = bnep_get_connlist(&cl);
168 168
169 if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) 169 if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index ab166b48ce8d..3e9d5bb3fefb 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -1,4 +1,4 @@
1/* 1/*
2 CMTP implementation for Linux Bluetooth stack (BlueZ). 2 CMTP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> 3 Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org>
4 4
@@ -10,13 +10,13 @@
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
@@ -563,7 +563,7 @@ int cmtp_attach_device(struct cmtp_session *session)
563 563
564 ret = wait_event_interruptible_timeout(session->wait, 564 ret = wait_event_interruptible_timeout(session->wait,
565 session->ncontroller, CMTP_INTEROP_TIMEOUT); 565 session->ncontroller, CMTP_INTEROP_TIMEOUT);
566 566
567 BT_INFO("Found %d CAPI controller(s) on device %s", session->ncontroller, session->name); 567 BT_INFO("Found %d CAPI controller(s) on device %s", session->ncontroller, session->name);
568 568
569 if (!ret) 569 if (!ret)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index 40e3dfec0cc8..e4663aa14d26 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -1,4 +1,4 @@
1/* 1/*
2 CMTP implementation for Linux Bluetooth stack (BlueZ). 2 CMTP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> 3 Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org>
4 4
@@ -10,13 +10,13 @@
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index b81a01c64aea..3933608a9296 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -1,4 +1,4 @@
1/* 1/*
2 CMTP implementation for Linux Bluetooth stack (BlueZ). 2 CMTP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> 3 Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org>
4 4
@@ -10,13 +10,13 @@
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
@@ -336,7 +336,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
336 baswap(&dst, &bt_sk(sock->sk)->dst); 336 baswap(&dst, &bt_sk(sock->sk)->dst);
337 337
338 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); 338 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
339 if (!session) 339 if (!session)
340 return -ENOMEM; 340 return -ENOMEM;
341 341
342 down_write(&cmtp_session_sem); 342 down_write(&cmtp_session_sem);
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 53295d33dc5c..0c5ccd95517f 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -1,4 +1,4 @@
1/* 1/*
2 CMTP implementation for Linux Bluetooth stack (BlueZ). 2 CMTP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> 3 Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org>
4 4
@@ -10,13 +10,13 @@
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
@@ -154,7 +154,7 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
154 154
155 if (cl.cnum <= 0) 155 if (cl.cnum <= 0)
156 return -EINVAL; 156 return -EINVAL;
157 157
158 err = cmtp_get_connlist(&cl); 158 err = cmtp_get_connlist(&cl);
159 159
160 if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) 160 if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6cd5711fa28a..67ee0bd80f5f 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,13 +12,13 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
@@ -146,7 +146,7 @@ static void hci_conn_timeout(unsigned long arg)
146 case BT_CONNECT: 146 case BT_CONNECT:
147 hci_acl_connect_cancel(conn); 147 hci_acl_connect_cancel(conn);
148 break; 148 break;
149 case BT_CONNECTED: 149 case BT_CONNECTED:
150 hci_acl_disconn(conn, 0x13); 150 hci_acl_disconn(conn, 0x13);
151 break; 151 break;
152 default: 152 default:
@@ -272,7 +272,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
272 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 272 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
273 continue; 273 continue;
274 274
275 /* Simple routing: 275 /* Simple routing:
276 * No source address - find interface with bdaddr != dst 276 * No source address - find interface with bdaddr != dst
277 * Source address - find interface with bdaddr == src 277 * Source address - find interface with bdaddr == src
278 */ 278 */
@@ -328,7 +328,7 @@ struct hci_conn * hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
328 328
329 hci_conn_hold(sco); 329 hci_conn_hold(sco);
330 330
331 if (acl->state == BT_CONNECTED && 331 if (acl->state == BT_CONNECTED &&
332 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) 332 (sco->state == BT_OPEN || sco->state == BT_CLOSED))
333 hci_add_sco(sco, acl->handle); 333 hci_add_sco(sco, acl->handle);
334 334
@@ -370,7 +370,7 @@ int hci_conn_encrypt(struct hci_conn *conn)
370 if (hci_conn_auth(conn)) { 370 if (hci_conn_auth(conn)) {
371 struct hci_cp_set_conn_encrypt cp; 371 struct hci_cp_set_conn_encrypt cp;
372 cp.handle = __cpu_to_le16(conn->handle); 372 cp.handle = __cpu_to_le16(conn->handle);
373 cp.encrypt = 1; 373 cp.encrypt = 1;
374 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); 374 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp);
375 } 375 }
376 return 0; 376 return 0;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 338ae977a31b..4917919d86a6 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,13 +12,13 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
@@ -116,7 +116,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
116} 116}
117 117
118/* Execute request and wait for completion. */ 118/* Execute request and wait for completion. */
119static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 119static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120 unsigned long opt, __u32 timeout) 120 unsigned long opt, __u32 timeout)
121{ 121{
122 DECLARE_WAITQUEUE(wait, current); 122 DECLARE_WAITQUEUE(wait, current);
@@ -278,7 +278,7 @@ static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt); 278 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
279} 279}
280 280
281/* Get HCI device by index. 281/* Get HCI device by index.
282 * Device is held on return. */ 282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index) 283struct hci_dev *hci_dev_get(int index)
284{ 284{
@@ -405,7 +405,7 @@ int hci_inquiry(void __user *arg)
405 return -ENODEV; 405 return -ENODEV;
406 406
407 hci_dev_lock_bh(hdev); 407 hci_dev_lock_bh(hdev);
408 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 408 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
409 inquiry_cache_empty(hdev) || 409 inquiry_cache_empty(hdev) ||
410 ir.flags & IREQ_CACHE_FLUSH) { 410 ir.flags & IREQ_CACHE_FLUSH) {
411 inquiry_cache_flush(hdev); 411 inquiry_cache_flush(hdev);
@@ -439,7 +439,7 @@ int hci_inquiry(void __user *arg)
439 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 439 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
440 ir.num_rsp)) 440 ir.num_rsp))
441 err = -EFAULT; 441 err = -EFAULT;
442 } else 442 } else
443 err = -EFAULT; 443 err = -EFAULT;
444 444
445 kfree(buf); 445 kfree(buf);
@@ -491,7 +491,7 @@ int hci_dev_open(__u16 dev)
491 hci_dev_hold(hdev); 491 hci_dev_hold(hdev);
492 set_bit(HCI_UP, &hdev->flags); 492 set_bit(HCI_UP, &hdev->flags);
493 hci_notify(hdev, HCI_DEV_UP); 493 hci_notify(hdev, HCI_DEV_UP);
494 } else { 494 } else {
495 /* Init failed, cleanup */ 495 /* Init failed, cleanup */
496 tasklet_kill(&hdev->rx_task); 496 tasklet_kill(&hdev->rx_task);
497 tasklet_kill(&hdev->tx_task); 497 tasklet_kill(&hdev->tx_task);
@@ -619,7 +619,7 @@ int hci_dev_reset(__u16 dev)
619 if (hdev->flush) 619 if (hdev->flush)
620 hdev->flush(hdev); 620 hdev->flush(hdev);
621 621
622 atomic_set(&hdev->cmd_cnt, 1); 622 atomic_set(&hdev->cmd_cnt, 1);
623 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 623 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
624 624
625 if (!test_bit(HCI_RAW, &hdev->flags)) 625 if (!test_bit(HCI_RAW, &hdev->flags))
@@ -841,7 +841,7 @@ int hci_register_dev(struct hci_dev *hdev)
841 break; 841 break;
842 head = p; id++; 842 head = p; id++;
843 } 843 }
844 844
845 sprintf(hdev->name, "hci%d", id); 845 sprintf(hdev->name, "hci%d", id);
846 hdev->id = id; 846 hdev->id = id;
847 list_add(&hdev->list, head); 847 list_add(&hdev->list, head);
@@ -1109,7 +1109,7 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1109 __skb_queue_tail(&conn->data_q, skb); 1109 __skb_queue_tail(&conn->data_q, skb);
1110 do { 1110 do {
1111 skb = list; list = list->next; 1111 skb = list; list = list->next;
1112 1112
1113 skb->dev = (void *) hdev; 1113 skb->dev = (void *) hdev;
1114 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1114 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1115 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1115 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
@@ -1164,7 +1164,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1164 int num = 0, min = ~0; 1164 int num = 0, min = ~0;
1165 struct list_head *p; 1165 struct list_head *p;
1166 1166
1167 /* We don't have to lock device here. Connections are always 1167 /* We don't have to lock device here. Connections are always
1168 * added and removed with TX task disabled. */ 1168 * added and removed with TX task disabled. */
1169 list_for_each(p, &h->list) { 1169 list_for_each(p, &h->list) {
1170 struct hci_conn *c; 1170 struct hci_conn *c;
@@ -1306,7 +1306,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1306 hci_dev_lock(hdev); 1306 hci_dev_lock(hdev);
1307 conn = hci_conn_hash_lookup_handle(hdev, handle); 1307 conn = hci_conn_hash_lookup_handle(hdev, handle);
1308 hci_dev_unlock(hdev); 1308 hci_dev_unlock(hdev);
1309 1309
1310 if (conn) { 1310 if (conn) {
1311 register struct hci_proto *hp; 1311 register struct hci_proto *hp;
1312 1312
@@ -1318,7 +1318,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1318 return; 1318 return;
1319 } 1319 }
1320 } else { 1320 } else {
1321 BT_ERR("%s ACL packet for unknown connection handle %d", 1321 BT_ERR("%s ACL packet for unknown connection handle %d",
1322 hdev->name, handle); 1322 hdev->name, handle);
1323 } 1323 }
1324 1324
@@ -1353,7 +1353,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1353 return; 1353 return;
1354 } 1354 }
1355 } else { 1355 } else {
1356 BT_ERR("%s SCO packet for unknown connection handle %d", 1356 BT_ERR("%s SCO packet for unknown connection handle %d",
1357 hdev->name, handle); 1357 hdev->name, handle);
1358 } 1358 }
1359 1359
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index bb94e6da223c..00d845e3d307 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,13 +12,13 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
@@ -100,7 +100,7 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *
100 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 100 BT_DBG("%s ocf 0x%x", hdev->name, ocf);
101 101
102 switch (ocf) { 102 switch (ocf) {
103 case OCF_ROLE_DISCOVERY: 103 case OCF_ROLE_DISCOVERY:
104 rd = (void *) skb->data; 104 rd = (void *) skb->data;
105 105
106 if (rd->status) 106 if (rd->status)
@@ -141,7 +141,7 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *
141 break; 141 break;
142 142
143 default: 143 default:
144 BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", 144 BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x",
145 hdev->name, ocf); 145 hdev->name, ocf);
146 break; 146 break;
147 } 147 }
@@ -237,10 +237,10 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb
237 if (!status) { 237 if (!status) {
238 clear_bit(HCI_PSCAN, &hdev->flags); 238 clear_bit(HCI_PSCAN, &hdev->flags);
239 clear_bit(HCI_ISCAN, &hdev->flags); 239 clear_bit(HCI_ISCAN, &hdev->flags);
240 if (param & SCAN_INQUIRY) 240 if (param & SCAN_INQUIRY)
241 set_bit(HCI_ISCAN, &hdev->flags); 241 set_bit(HCI_ISCAN, &hdev->flags);
242 242
243 if (param & SCAN_PAGE) 243 if (param & SCAN_PAGE)
244 set_bit(HCI_PSCAN, &hdev->flags); 244 set_bit(HCI_PSCAN, &hdev->flags);
245 } 245 }
246 hci_req_complete(hdev, status); 246 hci_req_complete(hdev, status);
@@ -343,7 +343,7 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s
343 343
344 memcpy(hdev->features, lf->features, sizeof(hdev->features)); 344 memcpy(hdev->features, lf->features, sizeof(hdev->features));
345 345
346 /* Adjust default settings according to features 346 /* Adjust default settings according to features
347 * supported by device. */ 347 * supported by device. */
348 if (hdev->features[0] & LMP_3SLOT) 348 if (hdev->features[0] & LMP_3SLOT)
349 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 349 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
@@ -491,7 +491,7 @@ static void hci_cs_link_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
491 break; 491 break;
492 492
493 default: 493 default:
494 BT_DBG("%s Command status: ogf LINK_CTL ocf %x status %d", 494 BT_DBG("%s Command status: ogf LINK_CTL ocf %x status %d",
495 hdev->name, ocf, status); 495 hdev->name, ocf, status);
496 break; 496 break;
497 } 497 }
@@ -793,7 +793,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
793 if (!conn->out) { 793 if (!conn->out) {
794 struct hci_cp_change_conn_ptype cp; 794 struct hci_cp_change_conn_ptype cp;
795 cp.handle = ev->handle; 795 cp.handle = ev->handle;
796 cp.pkt_type = (conn->type == ACL_LINK) ? 796 cp.pkt_type = (conn->type == ACL_LINK) ?
797 __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): 797 __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK):
798 __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); 798 __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
799 799
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index dbf98c49dbaa..dc3ecb19a5cd 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,13 +12,13 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
@@ -170,7 +170,7 @@ static int hci_sock_release(struct socket *sock)
170 return 0; 170 return 0;
171} 171}
172 172
173/* Ioctls that require bound socket */ 173/* Ioctls that require bound socket */
174static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 174static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
175{ 175{
176 struct hci_dev *hdev = hci_pi(sk)->hdev; 176 struct hci_dev *hdev = hci_pi(sk)->hdev;
@@ -348,8 +348,8 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
348 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(tv), &tv); 348 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(tv), &tv);
349 } 349 }
350} 350}
351 351
352static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 352static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
353 struct msghdr *msg, size_t len, int flags) 353 struct msghdr *msg, size_t len, int flags)
354{ 354{
355 int noblock = flags & MSG_DONTWAIT; 355 int noblock = flags & MSG_DONTWAIT;
@@ -386,7 +386,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
386 return err ? : copied; 386 return err ? : copied;
387} 387}
388 388
389static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 389static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
390 struct msghdr *msg, size_t len) 390 struct msghdr *msg, size_t len)
391{ 391{
392 struct sock *sk = sock->sk; 392 struct sock *sk = sock->sk;
@@ -520,7 +520,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
520 *((u32 *) f->event_mask + 0) = uf.event_mask[0]; 520 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
521 *((u32 *) f->event_mask + 1) = uf.event_mask[1]; 521 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
522 } 522 }
523 break; 523 break;
524 524
525 default: 525 default:
526 err = -ENOPROTOOPT; 526 err = -ENOPROTOOPT;
@@ -535,7 +535,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
535{ 535{
536 struct hci_ufilter uf; 536 struct hci_ufilter uf;
537 struct sock *sk = sock->sk; 537 struct sock *sk = sock->sk;
538 int len, opt; 538 int len, opt;
539 539
540 if (get_user(len, optlen)) 540 if (get_user(len, optlen))
541 return -EFAULT; 541 return -EFAULT;
@@ -544,7 +544,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
544 case HCI_DATA_DIR: 544 case HCI_DATA_DIR:
545 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) 545 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
546 opt = 1; 546 opt = 1;
547 else 547 else
548 opt = 0; 548 opt = 0;
549 549
550 if (put_user(opt, optval)) 550 if (put_user(opt, optval))
@@ -554,7 +554,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
554 case HCI_TIME_STAMP: 554 case HCI_TIME_STAMP:
555 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) 555 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
556 opt = 1; 556 opt = 1;
557 else 557 else
558 opt = 0; 558 opt = 0;
559 559
560 if (put_user(opt, optval)) 560 if (put_user(opt, optval))
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 66782010f82c..4b99c5e4478d 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1,4 +1,4 @@
1/* 1/*
2 HIDP implementation for Linux Bluetooth stack (BlueZ). 2 HIDP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org> 3 Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org>
4 4
@@ -10,13 +10,13 @@
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
@@ -293,7 +293,7 @@ static inline void hidp_process_handshake(struct hidp_session *session, unsigned
293 293
294 case HIDP_HSHK_ERR_FATAL: 294 case HIDP_HSHK_ERR_FATAL:
295 /* Device requests a reboot, as this is the only way this error 295 /* Device requests a reboot, as this is the only way this error
296 * can be recovered. */ 296 * can be recovered. */
297 __hidp_send_ctrl_message(session, 297 __hidp_send_ctrl_message(session,
298 HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0); 298 HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0);
299 break; 299 break;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index c8dfacd40a06..a326601c8f41 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -1,4 +1,4 @@
1/* 1/*
2 HIDP implementation for Linux Bluetooth stack (BlueZ). 2 HIDP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org> 3 Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org>
4 4
@@ -10,13 +10,13 @@
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 93cf9e586178..33bda40aceb8 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -1,4 +1,4 @@
1/* 1/*
2 HIDP implementation for Linux Bluetooth stack (BlueZ). 2 HIDP implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org> 3 Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org>
4 4
@@ -10,13 +10,13 @@
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
@@ -206,7 +206,7 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
206 put_user(ca.idle_to, &uca->idle_to) || 206 put_user(ca.idle_to, &uca->idle_to) ||
207 copy_to_user(&uca->name[0], &ca.name[0], 128)) 207 copy_to_user(&uca->name[0], &ca.name[0], 128))
208 return -EFAULT; 208 return -EFAULT;
209 209
210 arg = (unsigned long) uca; 210 arg = (unsigned long) uca;
211 211
212 /* Fall through. We don't actually write back any _changes_ 212 /* Fall through. We don't actually write back any _changes_
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index f8c25d500155..e83ee82440d3 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,13 +12,13 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
@@ -227,7 +227,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
227 bt_accept_enqueue(parent, sk); 227 bt_accept_enqueue(parent, sk);
228} 228}
229 229
230/* Delete channel. 230/* Delete channel.
231 * Must be called on the locked socket. */ 231 * Must be called on the locked socket. */
232static void l2cap_chan_del(struct sock *sk, int err) 232static void l2cap_chan_del(struct sock *sk, int err)
233{ 233{
@@ -238,7 +238,7 @@ static void l2cap_chan_del(struct sock *sk, int err)
238 238
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err); 239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
240 240
241 if (conn) { 241 if (conn) {
242 /* Unlink from channel list */ 242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk); 243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL; 244 l2cap_pi(sk)->conn = NULL;
@@ -590,7 +590,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
590 err = -EACCES; 590 err = -EACCES;
591 goto done; 591 goto done;
592 } 592 }
593 593
594 write_lock_bh(&l2cap_sk_list.lock); 594 write_lock_bh(&l2cap_sk_list.lock);
595 595
596 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) { 596 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
@@ -890,7 +890,7 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
890 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); 890 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
891 if (!*frag) 891 if (!*frag)
892 goto fail; 892 goto fail;
893 893
894 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) { 894 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
895 err = -EFAULT; 895 err = -EFAULT;
896 goto fail; 896 goto fail;
@@ -1267,7 +1267,7 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
1267 1267
1268static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len) 1268static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1269{ 1269{
1270 int type, hint, olen; 1270 int type, hint, olen;
1271 unsigned long val; 1271 unsigned long val;
1272 void *ptr = data; 1272 void *ptr = data;
1273 1273
@@ -1414,7 +1414,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
1414 1414
1415 /* Check for backlog size */ 1415 /* Check for backlog size */
1416 if (sk_acceptq_is_full(parent)) { 1416 if (sk_acceptq_is_full(parent)) {
1417 BT_DBG("backlog full %d", parent->sk_ack_backlog); 1417 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1418 goto response; 1418 goto response;
1419 } 1419 }
1420 1420
@@ -1607,7 +1607,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
1607 goto done; 1607 goto done;
1608 } 1608 }
1609 1609
1610 default: 1610 default:
1611 sk->sk_state = BT_DISCONN; 1611 sk->sk_state = BT_DISCONN;
1612 sk->sk_err = ECONNRESET; 1612 sk->sk_err = ECONNRESET;
1613 l2cap_sock_set_timer(sk, HZ * 5); 1613 l2cap_sock_set_timer(sk, HZ * 5);
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index e5fd0cb70ae9..ad2af5814e40 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,13 +12,13 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 278c8676906a..94f457360560 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1,4 +1,4 @@
1/* 1/*
2 RFCOMM implementation for Linux Bluetooth stack (BlueZ). 2 RFCOMM implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> 3 Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> 4 Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
@@ -11,13 +11,13 @@
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED. 21 SOFTWARE IS DISCLAIMED.
22*/ 22*/
23 23
@@ -134,7 +134,7 @@ static inline void rfcomm_session_put(struct rfcomm_session *s)
134/* ---- RFCOMM FCS computation ---- */ 134/* ---- RFCOMM FCS computation ---- */
135 135
136/* reversed, 8-bit, poly=0x07 */ 136/* reversed, 8-bit, poly=0x07 */
137static unsigned char rfcomm_crc_table[256] = { 137static unsigned char rfcomm_crc_table[256] = {
138 0x00, 0x91, 0xe3, 0x72, 0x07, 0x96, 0xe4, 0x75, 138 0x00, 0x91, 0xe3, 0x72, 0x07, 0x96, 0xe4, 0x75,
139 0x0e, 0x9f, 0xed, 0x7c, 0x09, 0x98, 0xea, 0x7b, 139 0x0e, 0x9f, 0xed, 0x7c, 0x09, 0x98, 0xea, 0x7b,
140 0x1c, 0x8d, 0xff, 0x6e, 0x1b, 0x8a, 0xf8, 0x69, 140 0x1c, 0x8d, 0xff, 0x6e, 0x1b, 0x8a, 0xf8, 0x69,
@@ -179,13 +179,13 @@ static unsigned char rfcomm_crc_table[256] = {
179/* CRC on 2 bytes */ 179/* CRC on 2 bytes */
180#define __crc(data) (rfcomm_crc_table[rfcomm_crc_table[0xff ^ data[0]] ^ data[1]]) 180#define __crc(data) (rfcomm_crc_table[rfcomm_crc_table[0xff ^ data[0]] ^ data[1]])
181 181
182/* FCS on 2 bytes */ 182/* FCS on 2 bytes */
183static inline u8 __fcs(u8 *data) 183static inline u8 __fcs(u8 *data)
184{ 184{
185 return (0xff - __crc(data)); 185 return (0xff - __crc(data));
186} 186}
187 187
188/* FCS on 3 bytes */ 188/* FCS on 3 bytes */
189static inline u8 __fcs2(u8 *data) 189static inline u8 __fcs2(u8 *data)
190{ 190{
191 return (0xff - rfcomm_crc_table[__crc(data) ^ data[2]]); 191 return (0xff - rfcomm_crc_table[__crc(data) ^ data[2]]);
@@ -288,7 +288,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
288 atomic_set(&d->refcnt, 1); 288 atomic_set(&d->refcnt, 1);
289 289
290 rfcomm_dlc_clear_state(d); 290 rfcomm_dlc_clear_state(d);
291 291
292 BT_DBG("%p", d); 292 BT_DBG("%p", d);
293 293
294 return d; 294 return d;
@@ -345,7 +345,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
345 int err = 0; 345 int err = 0;
346 u8 dlci; 346 u8 dlci;
347 347
348 BT_DBG("dlc %p state %ld %s %s channel %d", 348 BT_DBG("dlc %p state %ld %s %s channel %d",
349 d, d->state, batostr(src), batostr(dst), channel); 349 d, d->state, batostr(src), batostr(dst), channel);
350 350
351 if (channel < 1 || channel > 30) 351 if (channel < 1 || channel > 30)
@@ -489,21 +489,21 @@ void fastcall __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
489 rfcomm_schedule(RFCOMM_SCHED_TX); 489 rfcomm_schedule(RFCOMM_SCHED_TX);
490} 490}
491 491
492/* 492/*
493 Set/get modem status functions use _local_ status i.e. what we report 493 Set/get modem status functions use _local_ status i.e. what we report
494 to the other side. 494 to the other side.
495 Remote status is provided by dlc->modem_status() callback. 495 Remote status is provided by dlc->modem_status() callback.
496 */ 496 */
497int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig) 497int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig)
498{ 498{
499 BT_DBG("dlc %p state %ld v24_sig 0x%x", 499 BT_DBG("dlc %p state %ld v24_sig 0x%x",
500 d, d->state, v24_sig); 500 d, d->state, v24_sig);
501 501
502 if (test_bit(RFCOMM_RX_THROTTLED, &d->flags)) 502 if (test_bit(RFCOMM_RX_THROTTLED, &d->flags))
503 v24_sig |= RFCOMM_V24_FC; 503 v24_sig |= RFCOMM_V24_FC;
504 else 504 else
505 v24_sig &= ~RFCOMM_V24_FC; 505 v24_sig &= ~RFCOMM_V24_FC;
506 506
507 d->v24_sig = v24_sig; 507 d->v24_sig = v24_sig;
508 508
509 if (!test_and_set_bit(RFCOMM_MSC_PENDING, &d->flags)) 509 if (!test_and_set_bit(RFCOMM_MSC_PENDING, &d->flags))
@@ -514,7 +514,7 @@ int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig)
514 514
515int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig) 515int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig)
516{ 516{
517 BT_DBG("dlc %p state %ld v24_sig 0x%x", 517 BT_DBG("dlc %p state %ld v24_sig 0x%x",
518 d, d->state, d->v24_sig); 518 d, d->state, d->v24_sig);
519 519
520 *v24_sig = d->v24_sig; 520 *v24_sig = d->v24_sig;
@@ -576,7 +576,7 @@ static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
576 struct bt_sock *sk; 576 struct bt_sock *sk;
577 list_for_each_safe(p, n, &session_list) { 577 list_for_each_safe(p, n, &session_list) {
578 s = list_entry(p, struct rfcomm_session, list); 578 s = list_entry(p, struct rfcomm_session, list);
579 sk = bt_sk(s->sock->sk); 579 sk = bt_sk(s->sock->sk);
580 580
581 if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) && 581 if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) &&
582 !bacmp(&sk->dst, dst)) 582 !bacmp(&sk->dst, dst))
@@ -825,7 +825,7 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
825 825
826int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, 826int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
827 u8 bit_rate, u8 data_bits, u8 stop_bits, 827 u8 bit_rate, u8 data_bits, u8 stop_bits,
828 u8 parity, u8 flow_ctrl_settings, 828 u8 parity, u8 flow_ctrl_settings,
829 u8 xon_char, u8 xoff_char, u16 param_mask) 829 u8 xon_char, u8 xoff_char, u16 param_mask)
830{ 830{
831 struct rfcomm_hdr *hdr; 831 struct rfcomm_hdr *hdr;
@@ -834,8 +834,8 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
834 u8 buf[16], *ptr = buf; 834 u8 buf[16], *ptr = buf;
835 835
836 BT_DBG("%p cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x" 836 BT_DBG("%p cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x"
837 " flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x", 837 " flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x",
838 s, cr, dlci, bit_rate, data_bits, stop_bits, parity, 838 s, cr, dlci, bit_rate, data_bits, stop_bits, parity,
839 flow_ctrl_settings, xon_char, xoff_char, param_mask); 839 flow_ctrl_settings, xon_char, xoff_char, param_mask);
840 840
841 hdr = (void *) ptr; ptr += sizeof(*hdr); 841 hdr = (void *) ptr; ptr += sizeof(*hdr);
@@ -1120,9 +1120,9 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
1120 1120
1121 d->state = BT_CLOSED; 1121 d->state = BT_CLOSED;
1122 __rfcomm_dlc_close(d, err); 1122 __rfcomm_dlc_close(d, err);
1123 } else 1123 } else
1124 rfcomm_send_dm(s, dlci); 1124 rfcomm_send_dm(s, dlci);
1125 1125
1126 } else { 1126 } else {
1127 rfcomm_send_ua(s, 0); 1127 rfcomm_send_ua(s, 0);
1128 1128
@@ -1230,7 +1230,7 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn)
1230{ 1230{
1231 struct rfcomm_session *s = d->session; 1231 struct rfcomm_session *s = d->session;
1232 1232
1233 BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", 1233 BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d",
1234 d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); 1234 d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits);
1235 1235
1236 if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) || 1236 if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) ||
@@ -1454,7 +1454,7 @@ static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb
1454 if (d->modem_status) 1454 if (d->modem_status)
1455 d->modem_status(d, msc->v24_sig); 1455 d->modem_status(d, msc->v24_sig);
1456 rfcomm_dlc_unlock(d); 1456 rfcomm_dlc_unlock(d);
1457 1457
1458 rfcomm_send_msc(s, 0, dlci, msc->v24_sig); 1458 rfcomm_send_msc(s, 0, dlci, msc->v24_sig);
1459 1459
1460 d->mscex |= RFCOMM_MSCEX_RX; 1460 d->mscex |= RFCOMM_MSCEX_RX;
@@ -1641,18 +1641,18 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
1641 struct sk_buff *skb; 1641 struct sk_buff *skb;
1642 int err; 1642 int err;
1643 1643
1644 BT_DBG("dlc %p state %ld cfc %d rx_credits %d tx_credits %d", 1644 BT_DBG("dlc %p state %ld cfc %d rx_credits %d tx_credits %d",
1645 d, d->state, d->cfc, d->rx_credits, d->tx_credits); 1645 d, d->state, d->cfc, d->rx_credits, d->tx_credits);
1646 1646
1647 /* Send pending MSC */ 1647 /* Send pending MSC */
1648 if (test_and_clear_bit(RFCOMM_MSC_PENDING, &d->flags)) 1648 if (test_and_clear_bit(RFCOMM_MSC_PENDING, &d->flags))
1649 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); 1649 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
1650 1650
1651 if (d->cfc) { 1651 if (d->cfc) {
1652 /* CFC enabled. 1652 /* CFC enabled.
1653 * Give them some credits */ 1653 * Give them some credits */
1654 if (!test_bit(RFCOMM_RX_THROTTLED, &d->flags) && 1654 if (!test_bit(RFCOMM_RX_THROTTLED, &d->flags) &&
1655 d->rx_credits <= (d->cfc >> 2)) { 1655 d->rx_credits <= (d->cfc >> 2)) {
1656 rfcomm_send_credits(d->session, d->addr, d->cfc - d->rx_credits); 1656 rfcomm_send_credits(d->session, d->addr, d->cfc - d->rx_credits);
1657 d->rx_credits = d->cfc; 1657 d->rx_credits = d->cfc;
1658 } 1658 }
@@ -1876,7 +1876,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1876 1876
1877 /* Create socket */ 1877 /* Create socket */
1878 err = rfcomm_l2sock_create(&sock); 1878 err = rfcomm_l2sock_create(&sock);
1879 if (err < 0) { 1879 if (err < 0) {
1880 BT_ERR("Create socket failed %d", err); 1880 BT_ERR("Create socket failed %d", err);
1881 return err; 1881 return err;
1882 } 1882 }
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index cb7e855f0828..30586ab9e878 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1,4 +1,4 @@
1/* 1/*
2 RFCOMM implementation for Linux Bluetooth stack (BlueZ). 2 RFCOMM implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> 3 Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> 4 Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
@@ -11,13 +11,13 @@
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED. 21 SOFTWARE IS DISCLAIMED.
22*/ 22*/
23 23
@@ -130,7 +130,7 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
130 struct hlist_node *node; 130 struct hlist_node *node;
131 131
132 sk_for_each(sk, node, &rfcomm_sk_list.head) { 132 sk_for_each(sk, node, &rfcomm_sk_list.head) {
133 if (rfcomm_pi(sk)->channel == channel && 133 if (rfcomm_pi(sk)->channel == channel &&
134 !bacmp(&bt_sk(sk)->src, src)) 134 !bacmp(&bt_sk(sk)->src, src))
135 break; 135 break;
136 } 136 }
@@ -572,7 +572,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
572 while (len) { 572 while (len) {
573 size_t size = min_t(size_t, len, d->mtu); 573 size_t size = min_t(size_t, len, d->mtu);
574 int err; 574 int err;
575 575
576 skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, 576 skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
577 msg->msg_flags & MSG_DONTWAIT, &err); 577 msg->msg_flags & MSG_DONTWAIT, &err);
578 if (!skb) 578 if (!skb)
@@ -843,7 +843,7 @@ static int rfcomm_sock_release(struct socket *sock)
843 return err; 843 return err;
844} 844}
845 845
846/* ---- RFCOMM core layer callbacks ---- 846/* ---- RFCOMM core layer callbacks ----
847 * 847 *
848 * called under rfcomm_lock() 848 * called under rfcomm_lock()
849 */ 849 */
@@ -864,7 +864,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
864 864
865 /* Check for backlog size */ 865 /* Check for backlog size */
866 if (sk_acceptq_is_full(parent)) { 866 if (sk_acceptq_is_full(parent)) {
867 BT_DBG("backlog full %d", parent->sk_ack_backlog); 867 BT_DBG("backlog full %d", parent->sk_ack_backlog);
868 goto done; 868 goto done;
869 } 869 }
870 870
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index eb2b52484c70..8cd82dce5008 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -1,4 +1,4 @@
1/* 1/*
2 RFCOMM implementation for Linux Bluetooth stack (BlueZ). 2 RFCOMM implementation for Linux Bluetooth stack (BlueZ).
3 Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> 3 Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> 4 Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
@@ -11,13 +11,13 @@
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED. 21 SOFTWARE IS DISCLAIMED.
22*/ 22*/
23 23
@@ -110,7 +110,7 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
110 110
111 kfree(dev); 111 kfree(dev);
112 112
113 /* It's safe to call module_put() here because socket still 113 /* It's safe to call module_put() here because socket still
114 holds reference to this module. */ 114 holds reference to this module. */
115 module_put(THIS_MODULE); 115 module_put(THIS_MODULE);
116} 116}
@@ -185,7 +185,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
185 int err = 0; 185 int err = 0;
186 186
187 BT_DBG("id %d channel %d", req->dev_id, req->channel); 187 BT_DBG("id %d channel %d", req->dev_id, req->channel);
188 188
189 dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); 189 dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);
190 if (!dev) 190 if (!dev)
191 return -ENOMEM; 191 return -ENOMEM;
@@ -234,7 +234,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
234 bacpy(&dev->dst, &req->dst); 234 bacpy(&dev->dst, &req->dst);
235 dev->channel = req->channel; 235 dev->channel = req->channel;
236 236
237 dev->flags = req->flags & 237 dev->flags = req->flags &
238 ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC)); 238 ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC));
239 239
240 init_waitqueue_head(&dev->wait); 240 init_waitqueue_head(&dev->wait);
@@ -249,7 +249,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
249 dev->dlc = dlc; 249 dev->dlc = dlc;
250 rfcomm_dlc_unlock(dlc); 250 rfcomm_dlc_unlock(dlc);
251 251
252 /* It's safe to call __module_get() here because socket already 252 /* It's safe to call __module_get() here because socket already
253 holds reference to this module. */ 253 holds reference to this module. */
254 __module_get(THIS_MODULE); 254 __module_get(THIS_MODULE);
255 255
@@ -487,7 +487,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
487{ 487{
488 struct rfcomm_dev *dev = dlc->owner; 488 struct rfcomm_dev *dev = dlc->owner;
489 struct tty_struct *tty; 489 struct tty_struct *tty;
490 490
491 if (!dev || !(tty = dev->tty)) { 491 if (!dev || !(tty = dev->tty)) {
492 kfree_skb(skb); 492 kfree_skb(skb);
493 return; 493 return;
@@ -506,7 +506,7 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
506 struct rfcomm_dev *dev = dlc->owner; 506 struct rfcomm_dev *dev = dlc->owner;
507 if (!dev) 507 if (!dev)
508 return; 508 return;
509 509
510 BT_DBG("dlc %p dev %p err %d", dlc, dev, err); 510 BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
511 511
512 dev->err = err; 512 dev->err = err;
@@ -525,7 +525,7 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
525 rfcomm_dev_put(dev); 525 rfcomm_dev_put(dev);
526 rfcomm_dlc_lock(dlc); 526 rfcomm_dlc_lock(dlc);
527 } 527 }
528 } else 528 } else
529 tty_hangup(dev->tty); 529 tty_hangup(dev->tty);
530 } 530 }
531} 531}
@@ -543,7 +543,7 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
543 tty_hangup(dev->tty); 543 tty_hangup(dev->tty);
544 } 544 }
545 545
546 dev->modem_status = 546 dev->modem_status =
547 ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | 547 ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
548 ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | 548 ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) |
549 ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | 549 ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) |
@@ -561,7 +561,7 @@ static void rfcomm_tty_wakeup(unsigned long arg)
561 BT_DBG("dev %p tty %p", dev, tty); 561 BT_DBG("dev %p tty %p", dev, tty);
562 562
563 if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && tty->ldisc.write_wakeup) 563 if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && tty->ldisc.write_wakeup)
564 (tty->ldisc.write_wakeup)(tty); 564 (tty->ldisc.write_wakeup)(tty);
565 565
566 wake_up_interruptible(&tty->write_wait); 566 wake_up_interruptible(&tty->write_wait);
567#ifdef SERIAL_HAVE_POLL_WAIT 567#ifdef SERIAL_HAVE_POLL_WAIT
@@ -576,7 +576,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
576 struct rfcomm_dlc *dlc; 576 struct rfcomm_dlc *dlc;
577 int err, id; 577 int err, id;
578 578
579 id = tty->index; 579 id = tty->index;
580 580
581 BT_DBG("tty %p id %d", tty, id); 581 BT_DBG("tty %p id %d", tty, id);
582 582
@@ -670,7 +670,7 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
670 size = min_t(uint, count, dlc->mtu); 670 size = min_t(uint, count, dlc->mtu);
671 671
672 skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC); 672 skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC);
673 673
674 if (!skb) 674 if (!skb)
675 break; 675 break;
676 676
@@ -773,7 +773,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
773 return; 773 return;
774 774
775 /* Handle turning off CRTSCTS */ 775 /* Handle turning off CRTSCTS */
776 if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS)) 776 if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS))
777 BT_DBG("Turning off CRTSCTS unsupported"); 777 BT_DBG("Turning off CRTSCTS unsupported");
778 778
779 /* Parity on/off and when on, odd/even */ 779 /* Parity on/off and when on, odd/even */
@@ -830,7 +830,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
830 } 830 }
831 831
832 /* Handle number of data bits [5-8] */ 832 /* Handle number of data bits [5-8] */
833 if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE)) 833 if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE))
834 changes |= RFCOMM_RPN_PM_DATA; 834 changes |= RFCOMM_RPN_PM_DATA;
835 835
836 switch (new->c_cflag & CSIZE) { 836 switch (new->c_cflag & CSIZE) {
@@ -868,7 +868,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
868 case 9600: 868 case 9600:
869 baud = RFCOMM_RPN_BR_9600; 869 baud = RFCOMM_RPN_BR_9600;
870 break; 870 break;
871 case 19200: 871 case 19200:
872 baud = RFCOMM_RPN_BR_19200; 872 baud = RFCOMM_RPN_BR_19200;
873 break; 873 break;
874 case 38400: 874 case 38400:
@@ -887,7 +887,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
887 /* 9600 is standard accordinag to the RFCOMM specification */ 887 /* 9600 is standard accordinag to the RFCOMM specification */
888 baud = RFCOMM_RPN_BR_9600; 888 baud = RFCOMM_RPN_BR_9600;
889 break; 889 break;
890 890
891 } 891 }
892 892
893 if (changes) 893 if (changes)
@@ -978,11 +978,11 @@ static int rfcomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
978 978
979static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp) 979static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp)
980{ 980{
981 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 981 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
982 982
983 BT_DBG("tty %p dev %p", tty, dev); 983 BT_DBG("tty %p dev %p", tty, dev);
984 984
985 return dev->modem_status; 985 return dev->modem_status;
986} 986}
987 987
988static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsigned int set, unsigned int clear) 988static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsigned int set, unsigned int clear)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 5d13d4f31753..ae4391440950 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,13 +12,13 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
@@ -149,7 +149,7 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
149 struct sco_conn *conn; 149 struct sco_conn *conn;
150 struct sock *sk; 150 struct sock *sk;
151 151
152 if (!(conn = hcon->sco_data)) 152 if (!(conn = hcon->sco_data))
153 return 0; 153 return 0;
154 154
155 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 155 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
@@ -404,7 +404,7 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
404{ 404{
405 BT_DBG("sk %p", sk); 405 BT_DBG("sk %p", sk);
406 406
407 if (parent) 407 if (parent)
408 sk->sk_type = parent->sk_type; 408 sk->sk_type = parent->sk_type;
409} 409}
410 410
@@ -522,7 +522,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
522 if ((err = sco_connect(sk))) 522 if ((err = sco_connect(sk)))
523 goto done; 523 goto done;
524 524
525 err = bt_sock_wait_state(sk, BT_CONNECTED, 525 err = bt_sock_wait_state(sk, BT_CONNECTED,
526 sock_sndtimeo(sk, flags & O_NONBLOCK)); 526 sock_sndtimeo(sk, flags & O_NONBLOCK));
527 527
528done: 528done:
@@ -627,7 +627,7 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
627 return 0; 627 return 0;
628} 628}
629 629
630static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 630static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
631 struct msghdr *msg, size_t len) 631 struct msghdr *msg, size_t len)
632{ 632{
633 struct sock *sk = sock->sk; 633 struct sock *sk = sock->sk;
@@ -677,7 +677,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
677 struct sock *sk = sock->sk; 677 struct sock *sk = sock->sk;
678 struct sco_options opts; 678 struct sco_options opts;
679 struct sco_conninfo cinfo; 679 struct sco_conninfo cinfo;
680 int len, err = 0; 680 int len, err = 0;
681 681
682 BT_DBG("sk %p", sk); 682 BT_DBG("sk %p", sk);
683 683
@@ -761,7 +761,7 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *
761 bt_accept_enqueue(parent, sk); 761 bt_accept_enqueue(parent, sk);
762} 762}
763 763
764/* Delete channel. 764/* Delete channel.
765 * Must be called on the locked socket. */ 765 * Must be called on the locked socket. */
766static void sco_chan_del(struct sock *sk, int err) 766static void sco_chan_del(struct sock *sk, int err)
767{ 767{
@@ -771,7 +771,7 @@ static void sco_chan_del(struct sock *sk, int err)
771 771
772 BT_DBG("sk %p, conn %p, err %d", sk, conn, err); 772 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
773 773
774 if (conn) { 774 if (conn) {
775 sco_conn_lock(conn); 775 sco_conn_lock(conn);
776 conn->sk = NULL; 776 conn->sk = NULL;
777 sco_pi(sk)->conn = NULL; 777 sco_pi(sk)->conn = NULL;
@@ -855,7 +855,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
855 conn = sco_conn_add(hcon, status); 855 conn = sco_conn_add(hcon, status);
856 if (conn) 856 if (conn)
857 sco_conn_ready(conn); 857 sco_conn_ready(conn);
858 } else 858 } else
859 sco_conn_del(hcon, bt_err(status)); 859 sco_conn_del(hcon, bt_err(status));
860 860
861 return 0; 861 return 0;
@@ -887,7 +887,7 @@ static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
887 } 887 }
888 888
889drop: 889drop:
890 kfree_skb(skb); 890 kfree_skb(skb);
891 return 0; 891 return 0;
892} 892}
893 893
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index f8dbcee80eba..905a39c33a16 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -40,7 +40,7 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
40 skb->mac.raw = skb->data; 40 skb->mac.raw = skb->data;
41 skb_pull(skb, ETH_HLEN); 41 skb_pull(skb, ETH_HLEN);
42 42
43 if (dest[0] & 1) 43 if (dest[0] & 1)
44 br_flood_deliver(br, skb, 0); 44 br_flood_deliver(br, skb, 0);
45 else if ((dst = __br_fdb_get(br, dest)) != NULL) 45 else if ((dst = __br_fdb_get(br, dest)) != NULL)
46 br_deliver(dst->dst, skb); 46 br_deliver(dst->dst, skb);
@@ -178,12 +178,12 @@ void br_dev_setup(struct net_device *dev)
178 dev->change_mtu = br_change_mtu; 178 dev->change_mtu = br_change_mtu;
179 dev->destructor = free_netdev; 179 dev->destructor = free_netdev;
180 SET_MODULE_OWNER(dev); 180 SET_MODULE_OWNER(dev);
181 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 181 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
182 dev->stop = br_dev_stop; 182 dev->stop = br_dev_stop;
183 dev->tx_queue_len = 0; 183 dev->tx_queue_len = 0;
184 dev->set_mac_address = br_set_mac_address; 184 dev->set_mac_address = br_set_mac_address;
185 dev->priv_flags = IFF_EBRIDGE; 185 dev->priv_flags = IFF_EBRIDGE;
186 186
187 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 187 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
188 NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST; 188 NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
189} 189}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 8ca448db7a0d..def2e403f932 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -52,7 +52,7 @@ static __inline__ unsigned long hold_time(const struct net_bridge *br)
52static __inline__ int has_expired(const struct net_bridge *br, 52static __inline__ int has_expired(const struct net_bridge *br,
53 const struct net_bridge_fdb_entry *fdb) 53 const struct net_bridge_fdb_entry *fdb)
54{ 54{
55 return !fdb->is_static 55 return !fdb->is_static
56 && time_before_eq(fdb->ageing_timer + hold_time(br), jiffies); 56 && time_before_eq(fdb->ageing_timer + hold_time(br), jiffies);
57} 57}
58 58
@@ -71,7 +71,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
71{ 71{
72 struct net_bridge *br = p->br; 72 struct net_bridge *br = p->br;
73 int i; 73 int i;
74 74
75 spin_lock_bh(&br->hash_lock); 75 spin_lock_bh(&br->hash_lock);
76 76
77 /* Search all chains since old address/hash is unknown */ 77 /* Search all chains since old address/hash is unknown */
@@ -85,7 +85,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
85 /* maybe another port has same hw addr? */ 85 /* maybe another port has same hw addr? */
86 struct net_bridge_port *op; 86 struct net_bridge_port *op;
87 list_for_each_entry(op, &br->port_list, list) { 87 list_for_each_entry(op, &br->port_list, list) {
88 if (op != p && 88 if (op != p &&
89 !compare_ether_addr(op->dev->dev_addr, 89 !compare_ether_addr(op->dev->dev_addr,
90 f->addr.addr)) { 90 f->addr.addr)) {
91 f->dst = op; 91 f->dst = op;
@@ -118,8 +118,8 @@ void br_fdb_cleanup(unsigned long _data)
118 struct hlist_node *h, *n; 118 struct hlist_node *h, *n;
119 119
120 hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { 120 hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
121 if (!f->is_static && 121 if (!f->is_static &&
122 time_before_eq(f->ageing_timer + delay, jiffies)) 122 time_before_eq(f->ageing_timer + delay, jiffies))
123 fdb_delete(f); 123 fdb_delete(f);
124 } 124 }
125 } 125 }
@@ -138,11 +138,11 @@ void br_fdb_delete_by_port(struct net_bridge *br,
138 spin_lock_bh(&br->hash_lock); 138 spin_lock_bh(&br->hash_lock);
139 for (i = 0; i < BR_HASH_SIZE; i++) { 139 for (i = 0; i < BR_HASH_SIZE; i++) {
140 struct hlist_node *h, *g; 140 struct hlist_node *h, *g;
141 141
142 hlist_for_each_safe(h, g, &br->hash[i]) { 142 hlist_for_each_safe(h, g, &br->hash[i]) {
143 struct net_bridge_fdb_entry *f 143 struct net_bridge_fdb_entry *f
144 = hlist_entry(h, struct net_bridge_fdb_entry, hlist); 144 = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
145 if (f->dst != p) 145 if (f->dst != p)
146 continue; 146 continue;
147 147
148 if (f->is_static && !do_all) 148 if (f->is_static && !do_all)
@@ -155,7 +155,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
155 if (f->is_local) { 155 if (f->is_local) {
156 struct net_bridge_port *op; 156 struct net_bridge_port *op;
157 list_for_each_entry(op, &br->port_list, list) { 157 list_for_each_entry(op, &br->port_list, list) {
158 if (op != p && 158 if (op != p &&
159 !compare_ether_addr(op->dev->dev_addr, 159 !compare_ether_addr(op->dev->dev_addr,
160 f->addr.addr)) { 160 f->addr.addr)) {
161 f->dst = op; 161 f->dst = op;
@@ -190,14 +190,14 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
190} 190}
191 191
192/* Interface used by ATM hook that keeps a ref count */ 192/* Interface used by ATM hook that keeps a ref count */
193struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, 193struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
194 unsigned char *addr) 194 unsigned char *addr)
195{ 195{
196 struct net_bridge_fdb_entry *fdb; 196 struct net_bridge_fdb_entry *fdb;
197 197
198 rcu_read_lock(); 198 rcu_read_lock();
199 fdb = __br_fdb_get(br, addr); 199 fdb = __br_fdb_get(br, addr);
200 if (fdb) 200 if (fdb)
201 atomic_inc(&fdb->use_count); 201 atomic_inc(&fdb->use_count);
202 rcu_read_unlock(); 202 rcu_read_unlock();
203 return fdb; 203 return fdb;
@@ -218,7 +218,7 @@ void br_fdb_put(struct net_bridge_fdb_entry *ent)
218} 218}
219 219
220/* 220/*
221 * Fill buffer with forwarding table records in 221 * Fill buffer with forwarding table records in
222 * the API format. 222 * the API format.
223 */ 223 */
224int br_fdb_fillbuf(struct net_bridge *br, void *buf, 224int br_fdb_fillbuf(struct net_bridge *br, void *buf,
@@ -237,7 +237,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
237 if (num >= maxnum) 237 if (num >= maxnum)
238 goto out; 238 goto out;
239 239
240 if (has_expired(br, f)) 240 if (has_expired(br, f))
241 continue; 241 continue;
242 242
243 if (skip) { 243 if (skip) {
@@ -277,7 +277,7 @@ static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
277 277
278static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, 278static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
279 struct net_bridge_port *source, 279 struct net_bridge_port *source,
280 const unsigned char *addr, 280 const unsigned char *addr,
281 int is_local) 281 int is_local)
282{ 282{
283 struct net_bridge_fdb_entry *fdb; 283 struct net_bridge_fdb_entry *fdb;
@@ -307,17 +307,17 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
307 307
308 fdb = fdb_find(head, addr); 308 fdb = fdb_find(head, addr);
309 if (fdb) { 309 if (fdb) {
310 /* it is okay to have multiple ports with same 310 /* it is okay to have multiple ports with same
311 * address, just use the first one. 311 * address, just use the first one.
312 */ 312 */
313 if (fdb->is_local) 313 if (fdb->is_local)
314 return 0; 314 return 0;
315 315
316 printk(KERN_WARNING "%s adding interface with same address " 316 printk(KERN_WARNING "%s adding interface with same address "
317 "as a received packet\n", 317 "as a received packet\n",
318 source->dev->name); 318 source->dev->name);
319 fdb_delete(fdb); 319 fdb_delete(fdb);
320 } 320 }
321 321
322 if (!fdb_create(head, source, addr, 1)) 322 if (!fdb_create(head, source, addr, 1))
323 return -ENOMEM; 323 return -ENOMEM;
@@ -350,7 +350,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
350 if (likely(fdb)) { 350 if (likely(fdb)) {
351 /* attempt to update an entry for a local interface */ 351 /* attempt to update an entry for a local interface */
352 if (unlikely(fdb->is_local)) { 352 if (unlikely(fdb->is_local)) {
353 if (net_ratelimit()) 353 if (net_ratelimit())
354 printk(KERN_WARNING "%s: received packet with " 354 printk(KERN_WARNING "%s: received packet with "
355 " own address as source address\n", 355 " own address as source address\n",
356 source->dev->name); 356 source->dev->name);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 191b861e5e53..3e45c1a1aa96 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -21,7 +21,7 @@
21#include "br_private.h" 21#include "br_private.h"
22 22
23/* Don't forward packets to originating port or forwarding diasabled */ 23/* Don't forward packets to originating port or forwarding diasabled */
24static inline int should_deliver(const struct net_bridge_port *p, 24static inline int should_deliver(const struct net_bridge_port *p,
25 const struct sk_buff *skb) 25 const struct sk_buff *skb)
26{ 26{
27 return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING); 27 return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING);
@@ -101,7 +101,7 @@ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
101 101
102/* called under bridge lock */ 102/* called under bridge lock */
103static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone, 103static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
104 void (*__packet_hook)(const struct net_bridge_port *p, 104 void (*__packet_hook)(const struct net_bridge_port *p,
105 struct sk_buff *skb)) 105 struct sk_buff *skb))
106{ 106{
107 struct net_bridge_port *p; 107 struct net_bridge_port *p;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 2b7c2c7dad48..f35c1a378d0f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -47,7 +47,7 @@ static int port_cost(struct net_device *dev)
47 set_fs(KERNEL_DS); 47 set_fs(KERNEL_DS);
48 err = dev_ethtool(&ifr); 48 err = dev_ethtool(&ifr);
49 set_fs(old_fs); 49 set_fs(old_fs);
50 50
51 if (!err) { 51 if (!err) {
52 switch(ecmd.speed) { 52 switch(ecmd.speed) {
53 case SPEED_100: 53 case SPEED_100:
@@ -191,7 +191,7 @@ static void del_br(struct net_bridge *br)
191 del_timer_sync(&br->gc_timer); 191 del_timer_sync(&br->gc_timer);
192 192
193 br_sysfs_delbr(br->dev); 193 br_sysfs_delbr(br->dev);
194 unregister_netdevice(br->dev); 194 unregister_netdevice(br->dev);
195} 195}
196 196
197static struct net_device *new_bridge_dev(const char *name) 197static struct net_device *new_bridge_dev(const char *name)
@@ -201,7 +201,7 @@ static struct net_device *new_bridge_dev(const char *name)
201 201
202 dev = alloc_netdev(sizeof(struct net_bridge), name, 202 dev = alloc_netdev(sizeof(struct net_bridge), name,
203 br_dev_setup); 203 br_dev_setup);
204 204
205 if (!dev) 205 if (!dev)
206 return NULL; 206 return NULL;
207 207
@@ -258,12 +258,12 @@ static int find_portno(struct net_bridge *br)
258} 258}
259 259
260/* called with RTNL but without bridge lock */ 260/* called with RTNL but without bridge lock */
261static struct net_bridge_port *new_nbp(struct net_bridge *br, 261static struct net_bridge_port *new_nbp(struct net_bridge *br,
262 struct net_device *dev) 262 struct net_device *dev)
263{ 263{
264 int index; 264 int index;
265 struct net_bridge_port *p; 265 struct net_bridge_port *p;
266 266
267 index = find_portno(br); 267 index = find_portno(br);
268 if (index < 0) 268 if (index < 0)
269 return ERR_PTR(index); 269 return ERR_PTR(index);
@@ -276,7 +276,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
276 dev_hold(dev); 276 dev_hold(dev);
277 p->dev = dev; 277 p->dev = dev;
278 p->path_cost = port_cost(dev); 278 p->path_cost = port_cost(dev);
279 p->priority = 0x8000 >> BR_PORT_BITS; 279 p->priority = 0x8000 >> BR_PORT_BITS;
280 p->port_no = index; 280 p->port_no = index;
281 br_init_port(p); 281 br_init_port(p);
282 p->state = BR_STATE_DISABLED; 282 p->state = BR_STATE_DISABLED;
@@ -298,7 +298,7 @@ int br_add_bridge(const char *name)
298 int ret; 298 int ret;
299 299
300 dev = new_bridge_dev(name); 300 dev = new_bridge_dev(name);
301 if (!dev) 301 if (!dev)
302 return -ENOMEM; 302 return -ENOMEM;
303 303
304 rtnl_lock(); 304 rtnl_lock();
@@ -329,7 +329,7 @@ int br_del_bridge(const char *name)
329 329
330 rtnl_lock(); 330 rtnl_lock();
331 dev = __dev_get_by_name(name); 331 dev = __dev_get_by_name(name);
332 if (dev == NULL) 332 if (dev == NULL)
333 ret = -ENXIO; /* Could not find device */ 333 ret = -ENXIO; /* Could not find device */
334 334
335 else if (!(dev->priv_flags & IFF_EBRIDGE)) { 335 else if (!(dev->priv_flags & IFF_EBRIDGE)) {
@@ -340,9 +340,9 @@ int br_del_bridge(const char *name)
340 else if (dev->flags & IFF_UP) { 340 else if (dev->flags & IFF_UP) {
341 /* Not shutdown yet. */ 341 /* Not shutdown yet. */
342 ret = -EBUSY; 342 ret = -EBUSY;
343 } 343 }
344 344
345 else 345 else
346 del_br(netdev_priv(dev)); 346 del_br(netdev_priv(dev));
347 347
348 rtnl_unlock(); 348 rtnl_unlock();
@@ -428,7 +428,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
428 if (err) 428 if (err)
429 goto err0; 429 goto err0;
430 430
431 err = br_fdb_insert(br, p, dev->dev_addr); 431 err = br_fdb_insert(br, p, dev->dev_addr);
432 if (err) 432 if (err)
433 goto err1; 433 goto err1;
434 434
@@ -464,8 +464,8 @@ err0:
464int br_del_if(struct net_bridge *br, struct net_device *dev) 464int br_del_if(struct net_bridge *br, struct net_device *dev)
465{ 465{
466 struct net_bridge_port *p = dev->br_port; 466 struct net_bridge_port *p = dev->br_port;
467 467
468 if (!p || p->br != br) 468 if (!p || p->br != br)
469 return -EINVAL; 469 return -EINVAL;
470 470
471 del_nbp(p); 471 del_nbp(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index bfa4d8c333f7..35b94f9a1ac5 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -119,7 +119,7 @@ static inline int is_link_local(const unsigned char *dest)
119 * Called via br_handle_frame_hook. 119 * Called via br_handle_frame_hook.
120 * Return 0 if *pskb should be processed furthur 120 * Return 0 if *pskb should be processed furthur
121 * 1 if *pskb is handled 121 * 1 if *pskb is handled
122 * note: already called with rcu_read_lock (preempt_disabled) 122 * note: already called with rcu_read_lock (preempt_disabled)
123 */ 123 */
124int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb) 124int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb)
125{ 125{
@@ -137,7 +137,7 @@ int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb)
137 137
138 if (p->state == BR_STATE_FORWARDING || p->state == BR_STATE_LEARNING) { 138 if (p->state == BR_STATE_FORWARDING || p->state == BR_STATE_LEARNING) {
139 if (br_should_route_hook) { 139 if (br_should_route_hook) {
140 if (br_should_route_hook(pskb)) 140 if (br_should_route_hook(pskb))
141 return 0; 141 return 0;
142 skb = *pskb; 142 skb = *pskb;
143 dest = eth_hdr(skb)->h_dest; 143 dest = eth_hdr(skb)->h_dest;
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 4c61a7e0a86e..3ab153d3c508 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -28,7 +28,7 @@ static int get_bridge_ifindices(int *indices, int num)
28 int i = 0; 28 int i = 0;
29 29
30 for (dev = dev_base; dev && i < num; dev = dev->next) { 30 for (dev = dev_base; dev && i < num; dev = dev->next) {
31 if (dev->priv_flags & IFF_EBRIDGE) 31 if (dev->priv_flags & IFF_EBRIDGE)
32 indices[i++] = dev->ifindex; 32 indices[i++] = dev->ifindex;
33 } 33 }
34 34
@@ -53,7 +53,7 @@ static void get_port_ifindices(struct net_bridge *br, int *ifindices, int num)
53 * (limited to a page for sanity) 53 * (limited to a page for sanity)
54 * offset -- number of records to skip 54 * offset -- number of records to skip
55 */ 55 */
56static int get_fdb_entries(struct net_bridge *br, void __user *userbuf, 56static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
57 unsigned long maxnum, unsigned long offset) 57 unsigned long maxnum, unsigned long offset)
58{ 58{
59 int num; 59 int num;
@@ -69,7 +69,7 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
69 buf = kmalloc(size, GFP_USER); 69 buf = kmalloc(size, GFP_USER);
70 if (!buf) 70 if (!buf)
71 return -ENOMEM; 71 return -ENOMEM;
72 72
73 num = br_fdb_fillbuf(br, buf, maxnum, offset); 73 num = br_fdb_fillbuf(br, buf, maxnum, offset);
74 if (num > 0) { 74 if (num > 0) {
75 if (copy_to_user(userbuf, buf, num*sizeof(struct __fdb_entry))) 75 if (copy_to_user(userbuf, buf, num*sizeof(struct __fdb_entry)))
@@ -91,7 +91,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
91 dev = dev_get_by_index(ifindex); 91 dev = dev_get_by_index(ifindex);
92 if (dev == NULL) 92 if (dev == NULL)
93 return -EINVAL; 93 return -EINVAL;
94 94
95 if (isadd) 95 if (isadd)
96 ret = br_add_if(br, dev); 96 ret = br_add_if(br, dev);
97 else 97 else
@@ -110,7 +110,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
110{ 110{
111 struct net_bridge *br = netdev_priv(dev); 111 struct net_bridge *br = netdev_priv(dev);
112 unsigned long args[4]; 112 unsigned long args[4];
113 113
114 if (copy_from_user(args, rq->ifr_data, sizeof(args))) 114 if (copy_from_user(args, rq->ifr_data, sizeof(args)))
115 return -EFAULT; 115 return -EFAULT;
116 116
@@ -143,7 +143,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
143 b.tcn_timer_value = br_timer_value(&br->tcn_timer); 143 b.tcn_timer_value = br_timer_value(&br->tcn_timer);
144 b.topology_change_timer_value = br_timer_value(&br->topology_change_timer); 144 b.topology_change_timer_value = br_timer_value(&br->topology_change_timer);
145 b.gc_timer_value = br_timer_value(&br->gc_timer); 145 b.gc_timer_value = br_timer_value(&br->gc_timer);
146 rcu_read_unlock(); 146 rcu_read_unlock();
147 147
148 if (copy_to_user((void __user *)args[1], &b, sizeof(b))) 148 if (copy_to_user((void __user *)args[1], &b, sizeof(b)))
149 return -EFAULT; 149 return -EFAULT;
@@ -275,7 +275,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
275 return -ERANGE; 275 return -ERANGE;
276 276
277 spin_lock_bh(&br->lock); 277 spin_lock_bh(&br->lock);
278 if ((p = br_get_port(br, args[1])) == NULL) 278 if ((p = br_get_port(br, args[1])) == NULL)
279 ret = -EINVAL; 279 ret = -EINVAL;
280 else 280 else
281 br_stp_set_port_priority(p, args[2]); 281 br_stp_set_port_priority(p, args[2]);
@@ -301,7 +301,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
301 } 301 }
302 302
303 case BRCTL_GET_FDB_ENTRIES: 303 case BRCTL_GET_FDB_ENTRIES:
304 return get_fdb_entries(br, (void __user *)args[1], 304 return get_fdb_entries(br, (void __user *)args[1],
305 args[2], args[3]); 305 args[2], args[3]);
306 } 306 }
307 307
@@ -368,7 +368,7 @@ int br_ioctl_deviceless_stub(unsigned int cmd, void __user *uarg)
368 case SIOCGIFBR: 368 case SIOCGIFBR:
369 case SIOCSIFBR: 369 case SIOCSIFBR:
370 return old_deviceless(uarg); 370 return old_deviceless(uarg);
371 371
372 case SIOCBRADDBR: 372 case SIOCBRADDBR:
373 case SIOCBRDELBR: 373 case SIOCBRDELBR:
374 { 374 {
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index a25fa8cb5284..a0744e653f09 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -68,7 +68,7 @@ static __be16 inline vlan_proto(const struct sk_buff *skb)
68 68
69#define IS_VLAN_IP(skb) \ 69#define IS_VLAN_IP(skb) \
70 (skb->protocol == htons(ETH_P_8021Q) && \ 70 (skb->protocol == htons(ETH_P_8021Q) && \
71 vlan_proto(skb) == htons(ETH_P_IP) && \ 71 vlan_proto(skb) == htons(ETH_P_IP) && \
72 brnf_filter_vlan_tagged) 72 brnf_filter_vlan_tagged)
73 73
74#define IS_VLAN_IPV6(skb) \ 74#define IS_VLAN_IPV6(skb) \
@@ -124,7 +124,7 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
124 124
125static inline void nf_bridge_save_header(struct sk_buff *skb) 125static inline void nf_bridge_save_header(struct sk_buff *skb)
126{ 126{
127 int header_size = ETH_HLEN; 127 int header_size = ETH_HLEN;
128 128
129 if (skb->protocol == htons(ETH_P_8021Q)) 129 if (skb->protocol == htons(ETH_P_8021Q))
130 header_size += VLAN_HLEN; 130 header_size += VLAN_HLEN;
@@ -139,7 +139,7 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
139int nf_bridge_copy_header(struct sk_buff *skb) 139int nf_bridge_copy_header(struct sk_buff *skb)
140{ 140{
141 int err; 141 int err;
142 int header_size = ETH_HLEN; 142 int header_size = ETH_HLEN;
143 143
144 if (skb->protocol == htons(ETH_P_8021Q)) 144 if (skb->protocol == htons(ETH_P_8021Q))
145 header_size += VLAN_HLEN; 145 header_size += VLAN_HLEN;
@@ -836,10 +836,10 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb,
836 * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because 836 * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
837 * ip_refrag() can return NF_STOLEN. */ 837 * ip_refrag() can return NF_STOLEN. */
838static struct nf_hook_ops br_nf_ops[] = { 838static struct nf_hook_ops br_nf_ops[] = {
839 { .hook = br_nf_pre_routing, 839 { .hook = br_nf_pre_routing,
840 .owner = THIS_MODULE, 840 .owner = THIS_MODULE,
841 .pf = PF_BRIDGE, 841 .pf = PF_BRIDGE,
842 .hooknum = NF_BR_PRE_ROUTING, 842 .hooknum = NF_BR_PRE_ROUTING,
843 .priority = NF_BR_PRI_BRNF, }, 843 .priority = NF_BR_PRI_BRNF, },
844 { .hook = br_nf_local_in, 844 { .hook = br_nf_local_in,
845 .owner = THIS_MODULE, 845 .owner = THIS_MODULE,
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 20278494e4da..8cd3e4229070 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -26,7 +26,7 @@ struct notifier_block br_device_notifier = {
26 26
27/* 27/*
28 * Handle changes in state of network devices enslaved to a bridge. 28 * Handle changes in state of network devices enslaved to a bridge.
29 * 29 *
30 * Note: don't care about up/down if bridge itself is down, because 30 * Note: don't care about up/down if bridge itself is down, because
31 * port state is checked when bridge is brought up. 31 * port state is checked when bridge is brought up.
32 */ 32 */
@@ -60,11 +60,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
60 break; 60 break;
61 61
62 case NETDEV_FEAT_CHANGE: 62 case NETDEV_FEAT_CHANGE:
63 if (br->dev->flags & IFF_UP) 63 if (br->dev->flags & IFF_UP)
64 br_features_recompute(br); 64 br_features_recompute(br);
65 65
66 /* could do recursive feature change notification 66 /* could do recursive feature change notification
67 * but who would care?? 67 * but who would care??
68 */ 68 */
69 break; 69 break;
70 70
@@ -74,7 +74,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
74 break; 74 break;
75 75
76 case NETDEV_UP: 76 case NETDEV_UP:
77 if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP)) 77 if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP))
78 br_stp_enable_port(p); 78 br_stp_enable_port(p);
79 break; 79 break;
80 80
@@ -82,7 +82,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
82 spin_unlock_bh(&br->lock); 82 spin_unlock_bh(&br->lock);
83 br_del_if(br, dev); 83 br_del_if(br, dev);
84 goto done; 84 goto done;
85 } 85 }
86 spin_unlock_bh(&br->lock); 86 spin_unlock_bh(&br->lock);
87 87
88 done: 88 done:
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3a534e94c7f3..6a0540e0591e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -149,7 +149,7 @@ extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
149extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, 149extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
150 unsigned char *addr); 150 unsigned char *addr);
151extern void br_fdb_put(struct net_bridge_fdb_entry *ent); 151extern void br_fdb_put(struct net_bridge_fdb_entry *ent);
152extern int br_fdb_fillbuf(struct net_bridge *br, void *buf, 152extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
153 unsigned long count, unsigned long off); 153 unsigned long count, unsigned long off);
154extern int br_fdb_insert(struct net_bridge *br, 154extern int br_fdb_insert(struct net_bridge *br,
155 struct net_bridge_port *source, 155 struct net_bridge_port *source,
@@ -203,7 +203,7 @@ extern void br_netfilter_fini(void);
203/* br_stp.c */ 203/* br_stp.c */
204extern void br_log_state(const struct net_bridge_port *p); 204extern void br_log_state(const struct net_bridge_port *p);
205extern struct net_bridge_port *br_get_port(struct net_bridge *br, 205extern struct net_bridge_port *br_get_port(struct net_bridge *br,
206 u16 port_no); 206 u16 port_no);
207extern void br_init_port(struct net_bridge_port *p); 207extern void br_init_port(struct net_bridge_port *p);
208extern void br_become_designated_port(struct net_bridge_port *p); 208extern void br_become_designated_port(struct net_bridge_port *p);
209 209
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 04ca0639a95a..f9ff4d57b0d7 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -24,17 +24,17 @@
24#define MESSAGE_AGE_INCR ((HZ < 256) ? 1 : (HZ/256)) 24#define MESSAGE_AGE_INCR ((HZ < 256) ? 1 : (HZ/256))
25 25
26static const char *br_port_state_names[] = { 26static const char *br_port_state_names[] = {
27 [BR_STATE_DISABLED] = "disabled", 27 [BR_STATE_DISABLED] = "disabled",
28 [BR_STATE_LISTENING] = "listening", 28 [BR_STATE_LISTENING] = "listening",
29 [BR_STATE_LEARNING] = "learning", 29 [BR_STATE_LEARNING] = "learning",
30 [BR_STATE_FORWARDING] = "forwarding", 30 [BR_STATE_FORWARDING] = "forwarding",
31 [BR_STATE_BLOCKING] = "blocking", 31 [BR_STATE_BLOCKING] = "blocking",
32}; 32};
33 33
34void br_log_state(const struct net_bridge_port *p) 34void br_log_state(const struct net_bridge_port *p)
35{ 35{
36 pr_info("%s: port %d(%s) entering %s state\n", 36 pr_info("%s: port %d(%s) entering %s state\n",
37 p->br->dev->name, p->port_no, p->dev->name, 37 p->br->dev->name, p->port_no, p->dev->name,
38 br_port_state_names[p->state]); 38 br_port_state_names[p->state]);
39 39
40} 40}
@@ -53,7 +53,7 @@ struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no)
53} 53}
54 54
55/* called under bridge lock */ 55/* called under bridge lock */
56static int br_should_become_root_port(const struct net_bridge_port *p, 56static int br_should_become_root_port(const struct net_bridge_port *p,
57 u16 root_port) 57 u16 root_port)
58{ 58{
59 struct net_bridge *br; 59 struct net_bridge *br;
@@ -184,7 +184,7 @@ void br_transmit_config(struct net_bridge_port *p)
184} 184}
185 185
186/* called under bridge lock */ 186/* called under bridge lock */
187static inline void br_record_config_information(struct net_bridge_port *p, 187static inline void br_record_config_information(struct net_bridge_port *p,
188 const struct br_config_bpdu *bpdu) 188 const struct br_config_bpdu *bpdu)
189{ 189{
190 p->designated_root = bpdu->root; 190 p->designated_root = bpdu->root;
@@ -192,12 +192,12 @@ static inline void br_record_config_information(struct net_bridge_port *p,
192 p->designated_bridge = bpdu->bridge_id; 192 p->designated_bridge = bpdu->bridge_id;
193 p->designated_port = bpdu->port_id; 193 p->designated_port = bpdu->port_id;
194 194
195 mod_timer(&p->message_age_timer, jiffies 195 mod_timer(&p->message_age_timer, jiffies
196 + (p->br->max_age - bpdu->message_age)); 196 + (p->br->max_age - bpdu->message_age));
197} 197}
198 198
199/* called under bridge lock */ 199/* called under bridge lock */
200static inline void br_record_config_timeout_values(struct net_bridge *br, 200static inline void br_record_config_timeout_values(struct net_bridge *br,
201 const struct br_config_bpdu *bpdu) 201 const struct br_config_bpdu *bpdu)
202{ 202{
203 br->max_age = bpdu->max_age; 203 br->max_age = bpdu->max_age;
@@ -415,7 +415,7 @@ void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *b
415{ 415{
416 struct net_bridge *br; 416 struct net_bridge *br;
417 int was_root; 417 int was_root;
418 418
419 br = p->br; 419 br = p->br;
420 was_root = br_is_root_bridge(br); 420 was_root = br_is_root_bridge(br);
421 421
@@ -430,7 +430,7 @@ void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *b
430 del_timer(&br->topology_change_timer); 430 del_timer(&br->topology_change_timer);
431 br_transmit_tcn(br); 431 br_transmit_tcn(br);
432 432
433 mod_timer(&br->tcn_timer, 433 mod_timer(&br->tcn_timer,
434 jiffies + br->bridge_hello_time); 434 jiffies + br->bridge_hello_time);
435 } 435 }
436 } 436 }
@@ -441,8 +441,8 @@ void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *b
441 if (bpdu->topology_change_ack) 441 if (bpdu->topology_change_ack)
442 br_topology_change_acknowledged(br); 442 br_topology_change_acknowledged(br);
443 } 443 }
444 } else if (br_is_designated_port(p)) { 444 } else if (br_is_designated_port(p)) {
445 br_reply(p); 445 br_reply(p);
446 } 446 }
447} 447}
448 448
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 068d8afbf0a7..b9fb0dc4ab12 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -29,7 +29,7 @@
29#define LLC_RESERVE sizeof(struct llc_pdu_un) 29#define LLC_RESERVE sizeof(struct llc_pdu_un)
30 30
31static void br_send_bpdu(struct net_bridge_port *p, 31static void br_send_bpdu(struct net_bridge_port *p,
32 const unsigned char *data, int length) 32 const unsigned char *data, int length)
33{ 33{
34 struct sk_buff *skb; 34 struct sk_buff *skb;
35 35
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d294224592db..58d13f2bd121 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -28,7 +28,7 @@
28 */ 28 */
29static inline port_id br_make_port_id(__u8 priority, __u16 port_no) 29static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
30{ 30{
31 return ((u16)priority << BR_PORT_BITS) 31 return ((u16)priority << BR_PORT_BITS)
32 | (port_no & ((1<<BR_PORT_BITS)-1)); 32 | (port_no & ((1<<BR_PORT_BITS)-1));
33} 33}
34 34
@@ -50,7 +50,7 @@ void br_stp_enable_bridge(struct net_bridge *br)
50 spin_lock_bh(&br->lock); 50 spin_lock_bh(&br->lock);
51 mod_timer(&br->hello_timer, jiffies + br->hello_time); 51 mod_timer(&br->hello_timer, jiffies + br->hello_time);
52 mod_timer(&br->gc_timer, jiffies + HZ/10); 52 mod_timer(&br->gc_timer, jiffies + HZ/10);
53 53
54 br_config_bpdu_generation(br); 54 br_config_bpdu_generation(br);
55 55
56 list_for_each_entry(p, &br->port_list, list) { 56 list_for_each_entry(p, &br->port_list, list) {
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index d0fcde82c6fc..030aa798fea7 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -27,7 +27,7 @@ static int br_is_designated_for_some_port(const struct net_bridge *br)
27 27
28 list_for_each_entry(p, &br->port_list, list) { 28 list_for_each_entry(p, &br->port_list, list) {
29 if (p->state != BR_STATE_DISABLED && 29 if (p->state != BR_STATE_DISABLED &&
30 !memcmp(&p->designated_bridge, &br->bridge_id, 8)) 30 !memcmp(&p->designated_bridge, &br->bridge_id, 8))
31 return 1; 31 return 1;
32 } 32 }
33 33
@@ -37,7 +37,7 @@ static int br_is_designated_for_some_port(const struct net_bridge *br)
37static void br_hello_timer_expired(unsigned long arg) 37static void br_hello_timer_expired(unsigned long arg)
38{ 38{
39 struct net_bridge *br = (struct net_bridge *)arg; 39 struct net_bridge *br = (struct net_bridge *)arg;
40 40
41 pr_debug("%s: hello timer expired\n", br->dev->name); 41 pr_debug("%s: hello timer expired\n", br->dev->name);
42 spin_lock(&br->lock); 42 spin_lock(&br->lock);
43 if (br->dev->flags & IFF_UP) { 43 if (br->dev->flags & IFF_UP) {
@@ -58,11 +58,11 @@ static void br_message_age_timer_expired(unsigned long arg)
58 if (p->state == BR_STATE_DISABLED) 58 if (p->state == BR_STATE_DISABLED)
59 return; 59 return;
60 60
61 61
62 pr_info("%s: neighbor %.2x%.2x.%.2x:%.2x:%.2x:%.2x:%.2x:%.2x lost on port %d(%s)\n", 62 pr_info("%s: neighbor %.2x%.2x.%.2x:%.2x:%.2x:%.2x:%.2x:%.2x lost on port %d(%s)\n",
63 br->dev->name, 63 br->dev->name,
64 id->prio[0], id->prio[1], 64 id->prio[0], id->prio[1],
65 id->addr[0], id->addr[1], id->addr[2], 65 id->addr[0], id->addr[1], id->addr[2],
66 id->addr[3], id->addr[4], id->addr[5], 66 id->addr[3], id->addr[4], id->addr[5],
67 p->port_no, p->dev->name); 67 p->port_no, p->dev->name);
68 68
@@ -114,7 +114,7 @@ static void br_tcn_timer_expired(unsigned long arg)
114 spin_lock(&br->lock); 114 spin_lock(&br->lock);
115 if (br->dev->flags & IFF_UP) { 115 if (br->dev->flags & IFF_UP) {
116 br_transmit_tcn(br); 116 br_transmit_tcn(br);
117 117
118 mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time); 118 mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
119 } 119 }
120 spin_unlock(&br->lock); 120 spin_unlock(&br->lock);
@@ -135,7 +135,7 @@ static void br_hold_timer_expired(unsigned long arg)
135{ 135{
136 struct net_bridge_port *p = (struct net_bridge_port *) arg; 136 struct net_bridge_port *p = (struct net_bridge_port *) arg;
137 137
138 pr_debug("%s: %d(%s) hold timer expired\n", 138 pr_debug("%s: %d(%s) hold timer expired\n",
139 p->br->dev->name, p->port_no, p->dev->name); 139 p->br->dev->name, p->port_no, p->dev->name);
140 140
141 spin_lock(&p->br->lock); 141 spin_lock(&p->br->lock);
@@ -166,10 +166,10 @@ void br_stp_port_timer_init(struct net_bridge_port *p)
166 166
167 setup_timer(&p->forward_delay_timer, br_forward_delay_timer_expired, 167 setup_timer(&p->forward_delay_timer, br_forward_delay_timer_expired,
168 (unsigned long) p); 168 (unsigned long) p);
169 169
170 setup_timer(&p->hold_timer, br_hold_timer_expired, 170 setup_timer(&p->hold_timer, br_hold_timer_expired,
171 (unsigned long) p); 171 (unsigned long) p);
172} 172}
173 173
174/* Report ticks left (in USER_HZ) used for API */ 174/* Report ticks left (in USER_HZ) used for API */
175unsigned long br_timer_value(const struct timer_list *timer) 175unsigned long br_timer_value(const struct timer_list *timer)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index ce10464716a7..01a22ad0cc75 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -353,19 +353,19 @@ static ssize_t brforward_read(struct kobject *kobj, char *buf,
353 if (off % sizeof(struct __fdb_entry) != 0) 353 if (off % sizeof(struct __fdb_entry) != 0)
354 return -EINVAL; 354 return -EINVAL;
355 355
356 n = br_fdb_fillbuf(br, buf, 356 n = br_fdb_fillbuf(br, buf,
357 count / sizeof(struct __fdb_entry), 357 count / sizeof(struct __fdb_entry),
358 off / sizeof(struct __fdb_entry)); 358 off / sizeof(struct __fdb_entry));
359 359
360 if (n > 0) 360 if (n > 0)
361 n *= sizeof(struct __fdb_entry); 361 n *= sizeof(struct __fdb_entry);
362 362
363 return n; 363 return n;
364} 364}
365 365
366static struct bin_attribute bridge_forward = { 366static struct bin_attribute bridge_forward = {
367 .attr = { .name = SYSFS_BRIDGE_FDB, 367 .attr = { .name = SYSFS_BRIDGE_FDB,
368 .mode = S_IRUGO, 368 .mode = S_IRUGO,
369 .owner = THIS_MODULE, }, 369 .owner = THIS_MODULE, },
370 .read = brforward_read, 370 .read = brforward_read,
371}; 371};
@@ -401,7 +401,7 @@ int br_sysfs_addbr(struct net_device *dev)
401 goto out2; 401 goto out2;
402 } 402 }
403 403
404 404
405 kobject_set_name(&br->ifobj, SYSFS_BRIDGE_PORT_SUBDIR); 405 kobject_set_name(&br->ifobj, SYSFS_BRIDGE_PORT_SUBDIR);
406 br->ifobj.ktype = NULL; 406 br->ifobj.ktype = NULL;
407 br->ifobj.kset = NULL; 407 br->ifobj.kset = NULL;
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index 9abbc09ccdc3..41a78072cd0e 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -5,7 +5,7 @@
5 * Chris Vitale csv@bluetail.com 5 * Chris Vitale csv@bluetail.com
6 * 6 *
7 * May 2003 7 * May 2003
8 * 8 *
9 */ 9 */
10 10
11#include <linux/netfilter_bridge/ebtables.h> 11#include <linux/netfilter_bridge/ebtables.h>
@@ -20,7 +20,7 @@ static int ebt_filter_802_3(const struct sk_buff *skb, const struct net_device *
20 __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type; 20 __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type;
21 21
22 if (info->bitmask & EBT_802_3_SAP) { 22 if (info->bitmask & EBT_802_3_SAP) {
23 if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP)) 23 if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP))
24 return EBT_NOMATCH; 24 return EBT_NOMATCH;
25 if (FWINV(info->sap != hdr->llc.ui.dsap, EBT_802_3_SAP)) 25 if (FWINV(info->sap != hdr->llc.ui.dsap, EBT_802_3_SAP))
26 return EBT_NOMATCH; 26 return EBT_NOMATCH;
@@ -29,7 +29,7 @@ static int ebt_filter_802_3(const struct sk_buff *skb, const struct net_device *
29 if (info->bitmask & EBT_802_3_TYPE) { 29 if (info->bitmask & EBT_802_3_TYPE) {
30 if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE)) 30 if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE))
31 return EBT_NOMATCH; 31 return EBT_NOMATCH;
32 if (FWINV(info->type != type, EBT_802_3_TYPE)) 32 if (FWINV(info->type != type, EBT_802_3_TYPE))
33 return EBT_NOMATCH; 33 return EBT_NOMATCH;
34 } 34 }
35 35
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index ce97c4285f9a..392d877040d3 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -18,7 +18,7 @@ static int ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
18 const char *mac, __be32 ip) 18 const char *mac, __be32 ip)
19{ 19{
20 /* You may be puzzled as to how this code works. 20 /* You may be puzzled as to how this code works.
21 * Some tricks were used, refer to 21 * Some tricks were used, refer to
22 * include/linux/netfilter_bridge/ebt_among.h 22 * include/linux/netfilter_bridge/ebt_among.h
23 * as there you can find a solution of this mystery. 23 * as there you can find a solution of this mystery.
24 */ 24 */
@@ -207,8 +207,8 @@ static int ebt_among_check(const char *tablename, unsigned int hookmask,
207} 207}
208 208
209static struct ebt_match filter_among = { 209static struct ebt_match filter_among = {
210 .name = EBT_AMONG_MATCH, 210 .name = EBT_AMONG_MATCH,
211 .match = ebt_filter_among, 211 .match = ebt_filter_among,
212 .check = ebt_among_check, 212 .check = ebt_among_check,
213 .me = THIS_MODULE, 213 .me = THIS_MODULE,
214}; 214};
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 0aa7b9910a86..ffe468a632e7 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -51,7 +51,7 @@ static int ebt_target_reply(struct sk_buff **pskb, unsigned int hooknr,
51 return EBT_DROP; 51 return EBT_DROP;
52 52
53 arp_send(ARPOP_REPLY, ETH_P_ARP, *siptr, (struct net_device *)in, 53 arp_send(ARPOP_REPLY, ETH_P_ARP, *siptr, (struct net_device *)in,
54 *diptr, shp, info->mac, shp); 54 *diptr, shp, info->mac, shp);
55 55
56 return info->target; 56 return info->target;
57} 57}
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index 6afa4d017d4a..69f7f0ab9c76 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -61,15 +61,15 @@ static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in,
61 if (info->bitmask & EBT_IP_DPORT) { 61 if (info->bitmask & EBT_IP_DPORT) {
62 u32 dst = ntohs(pptr->dst); 62 u32 dst = ntohs(pptr->dst);
63 if (FWINV(dst < info->dport[0] || 63 if (FWINV(dst < info->dport[0] ||
64 dst > info->dport[1], 64 dst > info->dport[1],
65 EBT_IP_DPORT)) 65 EBT_IP_DPORT))
66 return EBT_NOMATCH; 66 return EBT_NOMATCH;
67 } 67 }
68 if (info->bitmask & EBT_IP_SPORT) { 68 if (info->bitmask & EBT_IP_SPORT) {
69 u32 src = ntohs(pptr->src); 69 u32 src = ntohs(pptr->src);
70 if (FWINV(src < info->sport[0] || 70 if (FWINV(src < info->sport[0] ||
71 src > info->sport[1], 71 src > info->sport[1],
72 EBT_IP_SPORT)) 72 EBT_IP_SPORT))
73 return EBT_NOMATCH; 73 return EBT_NOMATCH;
74 } 74 }
75 } 75 }
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 985df82e427b..f9a5ae9d5b6d 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -169,10 +169,10 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
169 169
170 if (info->bitmask & EBT_LOG_NFLOG) 170 if (info->bitmask & EBT_LOG_NFLOG)
171 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, 171 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
172 "%s", info->prefix); 172 "%s", info->prefix);
173 else 173 else
174 ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, 174 ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
175 info->prefix); 175 info->prefix);
176} 176}
177 177
178static struct ebt_watcher log = 178static struct ebt_watcher log =
@@ -199,7 +199,7 @@ static int __init ebt_log_init(void)
199 if (nf_log_register(PF_BRIDGE, &ebt_log_logger) < 0) { 199 if (nf_log_register(PF_BRIDGE, &ebt_log_logger) < 0) {
200 printk(KERN_WARNING "ebt_log: not logging via system console " 200 printk(KERN_WARNING "ebt_log: not logging via system console "
201 "since somebody else already registered for PF_INET\n"); 201 "since somebody else already registered for PF_INET\n");
202 /* we cannot make module load fail here, since otherwise 202 /* we cannot make module load fail here, since otherwise
203 * ebtables userspace would abort */ 203 * ebtables userspace would abort */
204 } 204 }
205 205
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index 025869ee0b68..6b0d2169af74 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -26,7 +26,7 @@ static int ebt_filter_mark(const struct sk_buff *skb,
26static int ebt_mark_check(const char *tablename, unsigned int hookmask, 26static int ebt_mark_check(const char *tablename, unsigned int hookmask,
27 const struct ebt_entry *e, void *data, unsigned int datalen) 27 const struct ebt_entry *e, void *data, unsigned int datalen)
28{ 28{
29 struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data; 29 struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data;
30 30
31 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_m_info))) 31 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_m_info)))
32 return -EINVAL; 32 return -EINVAL;
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index a0bed82145ed..31b77367319c 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -62,7 +62,7 @@ static int ebt_filter_config(struct ebt_stp_info *info,
62 verdict = 0; 62 verdict = 0;
63 for (i = 0; i < 6; i++) 63 for (i = 0; i < 6; i++)
64 verdict |= (stpc->root[2+i] ^ c->root_addr[i]) & 64 verdict |= (stpc->root[2+i] ^ c->root_addr[i]) &
65 c->root_addrmsk[i]; 65 c->root_addrmsk[i];
66 if (FWINV(verdict != 0, EBT_STP_ROOTADDR)) 66 if (FWINV(verdict != 0, EBT_STP_ROOTADDR))
67 return EBT_NOMATCH; 67 return EBT_NOMATCH;
68 } 68 }
@@ -82,7 +82,7 @@ static int ebt_filter_config(struct ebt_stp_info *info,
82 verdict = 0; 82 verdict = 0;
83 for (i = 0; i < 6; i++) 83 for (i = 0; i < 6; i++)
84 verdict |= (stpc->sender[2+i] ^ c->sender_addr[i]) & 84 verdict |= (stpc->sender[2+i] ^ c->sender_addr[i]) &
85 c->sender_addrmsk[i]; 85 c->sender_addrmsk[i];
86 if (FWINV(verdict != 0, EBT_STP_SENDERADDR)) 86 if (FWINV(verdict != 0, EBT_STP_SENDERADDR))
87 return EBT_NOMATCH; 87 return EBT_NOMATCH;
88 } 88 }
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index c1af68b5a29c..2e4cb24e191a 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -10,8 +10,8 @@
10 * Based on ipt_ULOG.c, which is 10 * Based on ipt_ULOG.c, which is
11 * (C) 2000-2002 by Harald Welte <laforge@netfilter.org> 11 * (C) 2000-2002 by Harald Welte <laforge@netfilter.org>
12 * 12 *
13 * This module accepts two parameters: 13 * This module accepts two parameters:
14 * 14 *
15 * nlbufsiz: 15 * nlbufsiz:
16 * The parameter specifies how big the buffer for each netlink multicast 16 * The parameter specifies how big the buffer for each netlink multicast
17 * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will 17 * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
@@ -43,17 +43,17 @@
43#include "../br_private.h" 43#include "../br_private.h"
44 44
45#define PRINTR(format, args...) do { if (net_ratelimit()) \ 45#define PRINTR(format, args...) do { if (net_ratelimit()) \
46 printk(format , ## args); } while (0) 46 printk(format , ## args); } while (0)
47 47
48static unsigned int nlbufsiz = NLMSG_GOODSIZE; 48static unsigned int nlbufsiz = NLMSG_GOODSIZE;
49module_param(nlbufsiz, uint, 0600); 49module_param(nlbufsiz, uint, 0600);
50MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " 50MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
51 "(defaults to 4096)"); 51 "(defaults to 4096)");
52 52
53static unsigned int flushtimeout = 10; 53static unsigned int flushtimeout = 10;
54module_param(flushtimeout, uint, 0600); 54module_param(flushtimeout, uint, 0600);
55MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) " 55MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) "
56 "(defaults to 10)"); 56 "(defaults to 10)");
57 57
58typedef struct { 58typedef struct {
59 unsigned int qlen; /* number of nlmsgs' in the skb */ 59 unsigned int qlen; /* number of nlmsgs' in the skb */
@@ -157,7 +157,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
157 } 157 }
158 158
159 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0, 159 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0,
160 size - NLMSG_ALIGN(sizeof(*nlh))); 160 size - NLMSG_ALIGN(sizeof(*nlh)));
161 ub->qlen++; 161 ub->qlen++;
162 162
163 pm = NLMSG_DATA(nlh); 163 pm = NLMSG_DATA(nlh);
@@ -302,7 +302,7 @@ static int __init ebt_ulog_init(void)
302 } 302 }
303 303
304 ebtulognl = netlink_kernel_create(NETLINK_NFLOG, EBT_ULOG_MAXNLGROUPS, 304 ebtulognl = netlink_kernel_create(NETLINK_NFLOG, EBT_ULOG_MAXNLGROUPS,
305 NULL, THIS_MODULE); 305 NULL, THIS_MODULE);
306 if (!ebtulognl) 306 if (!ebtulognl)
307 ret = -ENOMEM; 307 ret = -ENOMEM;
308 else if ((ret = ebt_register_watcher(&ulog))) 308 else if ((ret = ebt_register_watcher(&ulog)))
@@ -344,4 +344,4 @@ module_exit(ebt_ulog_fini);
344MODULE_LICENSE("GPL"); 344MODULE_LICENSE("GPL");
345MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); 345MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
346MODULE_DESCRIPTION("ebtables userspace logging module for bridged Ethernet" 346MODULE_DESCRIPTION("ebtables userspace logging module for bridged Ethernet"
347 " frames"); 347 " frames");
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 7ee377622964..a43c697d3d73 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -128,9 +128,9 @@ ebt_check_vlan(const char *tablename,
128 128
129 /* Reserved VLAN ID (VID) values 129 /* Reserved VLAN ID (VID) values
130 * ----------------------------- 130 * -----------------------------
131 * 0 - The null VLAN ID. 131 * 0 - The null VLAN ID.
132 * 1 - The default Port VID (PVID) 132 * 1 - The default Port VID (PVID)
133 * 0x0FFF - Reserved for implementation use. 133 * 0x0FFF - Reserved for implementation use.
134 * if_vlan.h: VLAN_GROUP_ARRAY_LEN 4096. */ 134 * if_vlan.h: VLAN_GROUP_ARRAY_LEN 4096. */
135 if (GET_BITMASK(EBT_VLAN_ID)) { 135 if (GET_BITMASK(EBT_VLAN_ID)) {
136 if (!!info->id) { /* if id!=0 => check vid range */ 136 if (!!info->id) { /* if id!=0 => check vid range */
@@ -141,7 +141,7 @@ ebt_check_vlan(const char *tablename,
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 /* Note: This is valid VLAN-tagged frame point. 143 /* Note: This is valid VLAN-tagged frame point.
144 * Any value of user_priority are acceptable, 144 * Any value of user_priority are acceptable,
145 * but should be ignored according to 802.1Q Std. 145 * but should be ignored according to 802.1Q Std.
146 * So we just drop the prio flag. */ 146 * So we just drop the prio flag. */
147 info->bitmask &= ~EBT_VLAN_PRIO; 147 info->bitmask &= ~EBT_VLAN_PRIO;
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 127135ead2d5..81d84145c417 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -51,10 +51,10 @@ static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
51} 51}
52 52
53static struct ebt_table frame_filter = 53static struct ebt_table frame_filter =
54{ 54{
55 .name = "filter", 55 .name = "filter",
56 .table = &initial_table, 56 .table = &initial_table,
57 .valid_hooks = FILTER_VALID_HOOKS, 57 .valid_hooks = FILTER_VALID_HOOKS,
58 .lock = RW_LOCK_UNLOCKED, 58 .lock = RW_LOCK_UNLOCKED,
59 .check = check, 59 .check = check,
60 .me = THIS_MODULE, 60 .me = THIS_MODULE,
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 6c84ccb8c9d7..5818d70b8e8d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -33,10 +33,10 @@
33#include "../br_private.h" 33#include "../br_private.h"
34 34
35#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ 35#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
36 "report to author: "format, ## args) 36 "report to author: "format, ## args)
37/* #define BUGPRINT(format, args...) */ 37/* #define BUGPRINT(format, args...) */
38#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\ 38#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
39 ": out of memory: "format, ## args) 39 ": out of memory: "format, ## args)
40/* #define MEMPRINT(format, args...) */ 40/* #define MEMPRINT(format, args...) */
41 41
42 42
@@ -482,7 +482,7 @@ ebt_check_entry_size_and_hooks(struct ebt_entry *e,
482 as it said it has */ 482 as it said it has */
483 if (*n != *cnt) { 483 if (*n != *cnt) {
484 BUGPRINT("nentries does not equal the nr of entries " 484 BUGPRINT("nentries does not equal the nr of entries "
485 "in the chain\n"); 485 "in the chain\n");
486 return -EINVAL; 486 return -EINVAL;
487 } 487 }
488 if (((struct ebt_entries *)e)->policy != EBT_DROP && 488 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
@@ -809,7 +809,7 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
809 i = 0; /* holds the expected nr. of entries for the chain */ 809 i = 0; /* holds the expected nr. of entries for the chain */
810 j = 0; /* holds the up to now counted entries for the chain */ 810 j = 0; /* holds the up to now counted entries for the chain */
811 k = 0; /* holds the total nr. of entries, should equal 811 k = 0; /* holds the total nr. of entries, should equal
812 newinfo->nentries afterwards */ 812 newinfo->nentries afterwards */
813 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ 813 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
814 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 814 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
815 ebt_check_entry_size_and_hooks, newinfo, 815 ebt_check_entry_size_and_hooks, newinfo,
@@ -820,7 +820,7 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
820 820
821 if (i != j) { 821 if (i != j) {
822 BUGPRINT("nentries does not equal the nr of entries in the " 822 BUGPRINT("nentries does not equal the nr of entries in the "
823 "(last) chain\n"); 823 "(last) chain\n");
824 return -EINVAL; 824 return -EINVAL;
825 } 825 }
826 if (k != newinfo->nentries) { 826 if (k != newinfo->nentries) {
@@ -835,7 +835,7 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
835 if an error occurs */ 835 if an error occurs */
836 newinfo->chainstack = 836 newinfo->chainstack =
837 vmalloc((highest_possible_processor_id()+1) 837 vmalloc((highest_possible_processor_id()+1)
838 * sizeof(*(newinfo->chainstack))); 838 * sizeof(*(newinfo->chainstack)));
839 if (!newinfo->chainstack) 839 if (!newinfo->chainstack)
840 return -ENOMEM; 840 return -ENOMEM;
841 for_each_possible_cpu(i) { 841 for_each_possible_cpu(i) {
@@ -948,7 +948,7 @@ static int do_replace(void __user *user, unsigned int len)
948 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 948 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
949 return -ENOMEM; 949 return -ENOMEM;
950 950
951 countersize = COUNTER_OFFSET(tmp.nentries) * 951 countersize = COUNTER_OFFSET(tmp.nentries) *
952 (highest_possible_processor_id()+1); 952 (highest_possible_processor_id()+1);
953 newinfo = vmalloc(sizeof(*newinfo) + countersize); 953 newinfo = vmalloc(sizeof(*newinfo) + countersize);
954 if (!newinfo) 954 if (!newinfo)
@@ -1350,7 +1350,7 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u
1350 1350
1351 hlp = ubase + (((char *)e + e->target_offset) - base); 1351 hlp = ubase + (((char *)e + e->target_offset) - base);
1352 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 1352 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1353 1353
1354 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase); 1354 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1355 if (ret != 0) 1355 if (ret != 0)
1356 return ret; 1356 return ret;
diff --git a/net/compat.c b/net/compat.c
index 52d32f1bc728..9c970749a3b0 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * 32bit Socket syscall emulation. Based on arch/sparc64/kernel/sys_sparc32.c. 2 * 32bit Socket syscall emulation. Based on arch/sparc64/kernel/sys_sparc32.c.
3 * 3 *
4 * Copyright (C) 2000 VA Linux Co 4 * Copyright (C) 2000 VA Linux Co
@@ -8,7 +8,7 @@
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 2000 Hewlett-Packard Co. 9 * Copyright (C) 2000 Hewlett-Packard Co.
10 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> 10 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
11 * Copyright (C) 2000,2001 Andi Kleen, SuSE Labs 11 * Copyright (C) 2000,2001 Andi Kleen, SuSE Labs
12 */ 12 */
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
@@ -225,14 +225,14 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
225 return 0; /* XXX: return error? check spec. */ 225 return 0; /* XXX: return error? check spec. */
226 } 226 }
227 227
228 if (level == SOL_SOCKET && type == SO_TIMESTAMP) { 228 if (level == SOL_SOCKET && type == SO_TIMESTAMP) {
229 struct timeval *tv = (struct timeval *)data; 229 struct timeval *tv = (struct timeval *)data;
230 ctv.tv_sec = tv->tv_sec; 230 ctv.tv_sec = tv->tv_sec;
231 ctv.tv_usec = tv->tv_usec; 231 ctv.tv_usec = tv->tv_usec;
232 data = &ctv; 232 data = &ctv;
233 len = sizeof(struct compat_timeval); 233 len = sizeof(struct compat_timeval);
234 } 234 }
235 235
236 cmlen = CMSG_COMPAT_LEN(len); 236 cmlen = CMSG_COMPAT_LEN(len);
237 if(kmsg->msg_controllen < cmlen) { 237 if(kmsg->msg_controllen < cmlen) {
238 kmsg->msg_flags |= MSG_CTRUNC; 238 kmsg->msg_flags |= MSG_CTRUNC;
@@ -419,7 +419,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
419 char __user *optval, int optlen) 419 char __user *optval, int optlen)
420{ 420{
421 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; 421 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
422 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); 422 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
423 compat_uptr_t ptr; 423 compat_uptr_t ptr;
424 u16 len; 424 u16 len;
425 425
@@ -610,14 +610,14 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
610 int ret; 610 int ret;
611 u32 a[6]; 611 u32 a[6];
612 u32 a0, a1; 612 u32 a0, a1;
613 613
614 if (call < SYS_SOCKET || call > SYS_RECVMSG) 614 if (call < SYS_SOCKET || call > SYS_RECVMSG)
615 return -EINVAL; 615 return -EINVAL;
616 if (copy_from_user(a, args, nas[call])) 616 if (copy_from_user(a, args, nas[call]))
617 return -EFAULT; 617 return -EFAULT;
618 a0 = a[0]; 618 a0 = a[0];
619 a1 = a[1]; 619 a1 = a[1];
620 620
621 switch(call) { 621 switch(call) {
622 case SYS_SOCKET: 622 case SYS_SOCKET:
623 ret = sys_socket(a0, a1, a[2]); 623 ret = sys_socket(a0, a1, a[2]);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 797fdd4352ce..186212b5b7da 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -430,7 +430,7 @@ EXPORT_SYMBOL(__skb_checksum_complete);
430 * @skb: skbuff 430 * @skb: skbuff
431 * @hlen: hardware length 431 * @hlen: hardware length
432 * @iov: io vector 432 * @iov: io vector
433 * 433 *
434 * Caller _must_ check that skb will fit to this iovec. 434 * Caller _must_ check that skb will fit to this iovec.
435 * 435 *
436 * Returns: 0 - success. 436 * Returns: 0 - success.
diff --git a/net/core/dev.c b/net/core/dev.c
index 1e94a1b9a0f4..85d58d799329 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -255,7 +255,7 @@ static int netdev_nit;
255 * is linked into kernel lists and may not be freed until it has been 255 * is linked into kernel lists and may not be freed until it has been
256 * removed from the kernel lists. 256 * removed from the kernel lists.
257 * 257 *
258 * This call does not sleep therefore it can not 258 * This call does not sleep therefore it can not
259 * guarantee all CPU's that are in middle of receiving packets 259 * guarantee all CPU's that are in middle of receiving packets
260 * will see the new packet type (until the next received packet). 260 * will see the new packet type (until the next received packet).
261 */ 261 */
@@ -282,7 +282,7 @@ void dev_add_pack(struct packet_type *pt)
282 * Remove a protocol handler that was previously added to the kernel 282 * Remove a protocol handler that was previously added to the kernel
283 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 283 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
284 * from the kernel lists and can be freed or reused once this function 284 * from the kernel lists and can be freed or reused once this function
285 * returns. 285 * returns.
286 * 286 *
287 * The packet type might still be in use by receivers 287 * The packet type might still be in use by receivers
288 * and must not be freed until after all the CPU's have gone 288 * and must not be freed until after all the CPU's have gone
@@ -327,7 +327,7 @@ out:
327void dev_remove_pack(struct packet_type *pt) 327void dev_remove_pack(struct packet_type *pt)
328{ 328{
329 __dev_remove_pack(pt); 329 __dev_remove_pack(pt);
330 330
331 synchronize_net(); 331 synchronize_net();
332} 332}
333 333
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype);
607 * @mask: bitmask of bits in if_flags to check 607 * @mask: bitmask of bits in if_flags to check
608 * 608 *
609 * Search for any interface with the given flags. Returns NULL if a device 609 * Search for any interface with the given flags. Returns NULL if a device
610 * is not found or a pointer to the device. The device returned has 610 * is not found or a pointer to the device. The device returned has
611 * had a reference added and the pointer is safe until the user calls 611 * had a reference added and the pointer is safe until the user calls
612 * dev_put to indicate they have finished with it. 612 * dev_put to indicate they have finished with it.
613 */ 613 */
@@ -802,7 +802,7 @@ void netdev_state_change(struct net_device *dev)
802 802
803void dev_load(const char *name) 803void dev_load(const char *name)
804{ 804{
805 struct net_device *dev; 805 struct net_device *dev;
806 806
807 read_lock(&dev_base_lock); 807 read_lock(&dev_base_lock);
808 dev = __dev_get_by_name(name); 808 dev = __dev_get_by_name(name);
@@ -860,7 +860,7 @@ int dev_open(struct net_device *dev)
860 clear_bit(__LINK_STATE_START, &dev->state); 860 clear_bit(__LINK_STATE_START, &dev->state);
861 } 861 }
862 862
863 /* 863 /*
864 * If it went open OK then: 864 * If it went open OK then:
865 */ 865 */
866 866
@@ -964,7 +964,7 @@ int dev_close(struct net_device *dev)
964 * is returned on a failure. 964 * is returned on a failure.
965 * 965 *
966 * When registered all registration and up events are replayed 966 * When registered all registration and up events are replayed
967 * to the new notifier to allow device to have a race free 967 * to the new notifier to allow device to have a race free
968 * view of the network device list. 968 * view of the network device list.
969 */ 969 */
970 970
@@ -979,7 +979,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
979 for (dev = dev_base; dev; dev = dev->next) { 979 for (dev = dev_base; dev; dev = dev->next) {
980 nb->notifier_call(nb, NETDEV_REGISTER, dev); 980 nb->notifier_call(nb, NETDEV_REGISTER, dev);
981 981
982 if (dev->flags & IFF_UP) 982 if (dev->flags & IFF_UP)
983 nb->notifier_call(nb, NETDEV_UP, dev); 983 nb->notifier_call(nb, NETDEV_UP, dev);
984 } 984 }
985 } 985 }
@@ -1157,7 +1157,7 @@ void netif_device_attach(struct net_device *dev)
1157 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1157 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1158 netif_running(dev)) { 1158 netif_running(dev)) {
1159 netif_wake_queue(dev); 1159 netif_wake_queue(dev);
1160 __netdev_watchdog_up(dev); 1160 __netdev_watchdog_up(dev);
1161 } 1161 }
1162} 1162}
1163EXPORT_SYMBOL(netif_device_attach); 1163EXPORT_SYMBOL(netif_device_attach);
@@ -1197,7 +1197,7 @@ int skb_checksum_help(struct sk_buff *skb)
1197 1197
1198out_set_summed: 1198out_set_summed:
1199 skb->ip_summed = CHECKSUM_NONE; 1199 skb->ip_summed = CHECKSUM_NONE;
1200out: 1200out:
1201 return ret; 1201 return ret;
1202} 1202}
1203 1203
@@ -1258,7 +1258,7 @@ EXPORT_SYMBOL(skb_gso_segment);
1258void netdev_rx_csum_fault(struct net_device *dev) 1258void netdev_rx_csum_fault(struct net_device *dev)
1259{ 1259{
1260 if (net_ratelimit()) { 1260 if (net_ratelimit()) {
1261 printk(KERN_ERR "%s: hw csum failure.\n", 1261 printk(KERN_ERR "%s: hw csum failure.\n",
1262 dev ? dev->name : "<unknown>"); 1262 dev ? dev->name : "<unknown>");
1263 dump_stack(); 1263 dump_stack();
1264 } 1264 }
@@ -1372,7 +1372,7 @@ gso:
1372 if (unlikely(netif_queue_stopped(dev) && skb->next)) 1372 if (unlikely(netif_queue_stopped(dev) && skb->next))
1373 return NETDEV_TX_BUSY; 1373 return NETDEV_TX_BUSY;
1374 } while (skb->next); 1374 } while (skb->next);
1375 1375
1376 skb->destructor = DEV_GSO_CB(skb)->destructor; 1376 skb->destructor = DEV_GSO_CB(skb)->destructor;
1377 1377
1378out_kfree_skb: 1378out_kfree_skb:
@@ -1449,25 +1449,25 @@ int dev_queue_xmit(struct sk_buff *skb)
1449 (!(dev->features & NETIF_F_GEN_CSUM) && 1449 (!(dev->features & NETIF_F_GEN_CSUM) &&
1450 (!(dev->features & NETIF_F_IP_CSUM) || 1450 (!(dev->features & NETIF_F_IP_CSUM) ||
1451 skb->protocol != htons(ETH_P_IP)))) 1451 skb->protocol != htons(ETH_P_IP))))
1452 if (skb_checksum_help(skb)) 1452 if (skb_checksum_help(skb))
1453 goto out_kfree_skb; 1453 goto out_kfree_skb;
1454 1454
1455gso: 1455gso:
1456 spin_lock_prefetch(&dev->queue_lock); 1456 spin_lock_prefetch(&dev->queue_lock);
1457 1457
1458 /* Disable soft irqs for various locks below. Also 1458 /* Disable soft irqs for various locks below. Also
1459 * stops preemption for RCU. 1459 * stops preemption for RCU.
1460 */ 1460 */
1461 rcu_read_lock_bh(); 1461 rcu_read_lock_bh();
1462 1462
1463 /* Updates of qdisc are serialized by queue_lock. 1463 /* Updates of qdisc are serialized by queue_lock.
1464 * The struct Qdisc which is pointed to by qdisc is now a 1464 * The struct Qdisc which is pointed to by qdisc is now a
1465 * rcu structure - it may be accessed without acquiring 1465 * rcu structure - it may be accessed without acquiring
1466 * a lock (but the structure may be stale.) The freeing of the 1466 * a lock (but the structure may be stale.) The freeing of the
1467 * qdisc will be deferred until it's known that there are no 1467 * qdisc will be deferred until it's known that there are no
1468 * more references to it. 1468 * more references to it.
1469 * 1469 *
1470 * If the qdisc has an enqueue function, we still need to 1470 * If the qdisc has an enqueue function, we still need to
1471 * hold the queue_lock before calling it, since queue_lock 1471 * hold the queue_lock before calling it, since queue_lock
1472 * also serializes access to the device queue. 1472 * also serializes access to the device queue.
1473 */ 1473 */
@@ -1715,8 +1715,8 @@ static __inline__ int handle_bridge(struct sk_buff **pskb,
1715 if (*pt_prev) { 1715 if (*pt_prev) {
1716 *ret = deliver_skb(*pskb, *pt_prev, orig_dev); 1716 *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1717 *pt_prev = NULL; 1717 *pt_prev = NULL;
1718 } 1718 }
1719 1719
1720 return br_handle_frame_hook(port, pskb); 1720 return br_handle_frame_hook(port, pskb);
1721} 1721}
1722#else 1722#else
@@ -1728,16 +1728,16 @@ static __inline__ int handle_bridge(struct sk_buff **pskb,
1728 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 1728 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1729 * a compare and 2 stores extra right now if we dont have it on 1729 * a compare and 2 stores extra right now if we dont have it on
1730 * but have CONFIG_NET_CLS_ACT 1730 * but have CONFIG_NET_CLS_ACT
1731 * NOTE: This doesnt stop any functionality; if you dont have 1731 * NOTE: This doesnt stop any functionality; if you dont have
1732 * the ingress scheduler, you just cant add policies on ingress. 1732 * the ingress scheduler, you just cant add policies on ingress.
1733 * 1733 *
1734 */ 1734 */
1735static int ing_filter(struct sk_buff *skb) 1735static int ing_filter(struct sk_buff *skb)
1736{ 1736{
1737 struct Qdisc *q; 1737 struct Qdisc *q;
1738 struct net_device *dev = skb->dev; 1738 struct net_device *dev = skb->dev;
1739 int result = TC_ACT_OK; 1739 int result = TC_ACT_OK;
1740 1740
1741 if (dev->qdisc_ingress) { 1741 if (dev->qdisc_ingress) {
1742 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); 1742 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1743 if (MAX_RED_LOOP < ttl++) { 1743 if (MAX_RED_LOOP < ttl++) {
@@ -1801,7 +1801,7 @@ int netif_receive_skb(struct sk_buff *skb)
1801 1801
1802 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1802 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1803 if (!ptype->dev || ptype->dev == skb->dev) { 1803 if (!ptype->dev || ptype->dev == skb->dev) {
1804 if (pt_prev) 1804 if (pt_prev)
1805 ret = deliver_skb(skb, pt_prev, orig_dev); 1805 ret = deliver_skb(skb, pt_prev, orig_dev);
1806 pt_prev = ptype; 1806 pt_prev = ptype;
1807 } 1807 }
@@ -1833,7 +1833,7 @@ ncls:
1833 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) { 1833 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1834 if (ptype->type == type && 1834 if (ptype->type == type &&
1835 (!ptype->dev || ptype->dev == skb->dev)) { 1835 (!ptype->dev || ptype->dev == skb->dev)) {
1836 if (pt_prev) 1836 if (pt_prev)
1837 ret = deliver_skb(skb, pt_prev, orig_dev); 1837 ret = deliver_skb(skb, pt_prev, orig_dev);
1838 pt_prev = ptype; 1838 pt_prev = ptype;
1839 } 1839 }
@@ -2061,7 +2061,7 @@ static int dev_ifconf(char __user *arg)
2061 total += done; 2061 total += done;
2062 } 2062 }
2063 } 2063 }
2064 } 2064 }
2065 2065
2066 /* 2066 /*
2067 * All done. Write the updated control block back to the caller. 2067 * All done. Write the updated control block back to the caller.
@@ -2154,7 +2154,7 @@ static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2154 struct netif_rx_stats *rc = NULL; 2154 struct netif_rx_stats *rc = NULL;
2155 2155
2156 while (*pos < NR_CPUS) 2156 while (*pos < NR_CPUS)
2157 if (cpu_online(*pos)) { 2157 if (cpu_online(*pos)) {
2158 rc = &per_cpu(netdev_rx_stat, *pos); 2158 rc = &per_cpu(netdev_rx_stat, *pos);
2159 break; 2159 break;
2160 } else 2160 } else
@@ -2282,7 +2282,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2282 } 2282 }
2283 2283
2284 slave->master = master; 2284 slave->master = master;
2285 2285
2286 synchronize_net(); 2286 synchronize_net();
2287 2287
2288 if (old) 2288 if (old)
@@ -2319,13 +2319,13 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
2319 dev_mc_upload(dev); 2319 dev_mc_upload(dev);
2320 printk(KERN_INFO "device %s %s promiscuous mode\n", 2320 printk(KERN_INFO "device %s %s promiscuous mode\n",
2321 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 2321 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2322 "left"); 2322 "left");
2323 audit_log(current->audit_context, GFP_ATOMIC, 2323 audit_log(current->audit_context, GFP_ATOMIC,
2324 AUDIT_ANOM_PROMISCUOUS, 2324 AUDIT_ANOM_PROMISCUOUS,
2325 "dev=%s prom=%d old_prom=%d auid=%u", 2325 "dev=%s prom=%d old_prom=%d auid=%u",
2326 dev->name, (dev->flags & IFF_PROMISC), 2326 dev->name, (dev->flags & IFF_PROMISC),
2327 (old_flags & IFF_PROMISC), 2327 (old_flags & IFF_PROMISC),
2328 audit_get_loginuid(current->audit_context)); 2328 audit_get_loginuid(current->audit_context));
2329 } 2329 }
2330} 2330}
2331 2331
@@ -2816,7 +2816,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
2816 rtnl_unlock(); 2816 rtnl_unlock();
2817 if (IW_IS_GET(cmd) && 2817 if (IW_IS_GET(cmd) &&
2818 copy_to_user(arg, &ifr, 2818 copy_to_user(arg, &ifr,
2819 sizeof(struct ifreq))) 2819 sizeof(struct ifreq)))
2820 ret = -EFAULT; 2820 ret = -EFAULT;
2821 return ret; 2821 return ret;
2822 } 2822 }
@@ -2906,7 +2906,7 @@ int register_netdevice(struct net_device *dev)
2906 goto out; 2906 goto out;
2907 } 2907 }
2908 } 2908 }
2909 2909
2910 if (!dev_valid_name(dev->name)) { 2910 if (!dev_valid_name(dev->name)) {
2911 ret = -EINVAL; 2911 ret = -EINVAL;
2912 goto out; 2912 goto out;
@@ -2923,9 +2923,9 @@ int register_netdevice(struct net_device *dev)
2923 = hlist_entry(p, struct net_device, name_hlist); 2923 = hlist_entry(p, struct net_device, name_hlist);
2924 if (!strncmp(d->name, dev->name, IFNAMSIZ)) { 2924 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2925 ret = -EEXIST; 2925 ret = -EEXIST;
2926 goto out; 2926 goto out;
2927 } 2927 }
2928 } 2928 }
2929 2929
2930 /* Fix illegal SG+CSUM combinations. */ 2930 /* Fix illegal SG+CSUM combinations. */
2931 if ((dev->features & NETIF_F_SG) && 2931 if ((dev->features & NETIF_F_SG) &&
@@ -3024,7 +3024,7 @@ int register_netdev(struct net_device *dev)
3024 if (err < 0) 3024 if (err < 0)
3025 goto out; 3025 goto out;
3026 } 3026 }
3027 3027
3028 err = register_netdevice(dev); 3028 err = register_netdevice(dev);
3029out: 3029out:
3030 rtnl_unlock(); 3030 rtnl_unlock();
@@ -3041,7 +3041,7 @@ EXPORT_SYMBOL(register_netdev);
3041 * for netdevice notification, and cleanup and put back the 3041 * for netdevice notification, and cleanup and put back the
3042 * reference if they receive an UNREGISTER event. 3042 * reference if they receive an UNREGISTER event.
3043 * We can get stuck here if buggy protocols don't correctly 3043 * We can get stuck here if buggy protocols don't correctly
3044 * call dev_put. 3044 * call dev_put.
3045 */ 3045 */
3046static void netdev_wait_allrefs(struct net_device *dev) 3046static void netdev_wait_allrefs(struct net_device *dev)
3047{ 3047{
@@ -3205,8 +3205,8 @@ EXPORT_SYMBOL(alloc_netdev);
3205 * free_netdev - free network device 3205 * free_netdev - free network device
3206 * @dev: device 3206 * @dev: device
3207 * 3207 *
3208 * This function does the last stage of destroying an allocated device 3208 * This function does the last stage of destroying an allocated device
3209 * interface. The reference to the device object is released. 3209 * interface. The reference to the device object is released.
3210 * If this is the last reference then it will be freed. 3210 * If this is the last reference then it will be freed.
3211 */ 3211 */
3212void free_netdev(struct net_device *dev) 3212void free_netdev(struct net_device *dev)
@@ -3227,9 +3227,9 @@ void free_netdev(struct net_device *dev)
3227 kfree((char *)dev - dev->padded); 3227 kfree((char *)dev - dev->padded);
3228#endif 3228#endif
3229} 3229}
3230 3230
3231/* Synchronize with packet receive processing. */ 3231/* Synchronize with packet receive processing. */
3232void synchronize_net(void) 3232void synchronize_net(void)
3233{ 3233{
3234 might_sleep(); 3234 might_sleep();
3235 synchronize_rcu(); 3235 synchronize_rcu();
@@ -3291,12 +3291,12 @@ void unregister_netdevice(struct net_device *dev)
3291 /* Shutdown queueing discipline. */ 3291 /* Shutdown queueing discipline. */
3292 dev_shutdown(dev); 3292 dev_shutdown(dev);
3293 3293
3294 3294
3295 /* Notify protocols, that we are about to destroy 3295 /* Notify protocols, that we are about to destroy
3296 this device. They should clean all the things. 3296 this device. They should clean all the things.
3297 */ 3297 */
3298 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); 3298 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3299 3299
3300 /* 3300 /*
3301 * Flush the multicast chain 3301 * Flush the multicast chain
3302 */ 3302 */
@@ -3483,7 +3483,7 @@ static int __init net_dev_init(void)
3483 goto out; 3483 goto out;
3484 3484
3485 INIT_LIST_HEAD(&ptype_all); 3485 INIT_LIST_HEAD(&ptype_all);
3486 for (i = 0; i < 16; i++) 3486 for (i = 0; i < 16; i++)
3487 INIT_LIST_HEAD(&ptype_base[i]); 3487 INIT_LIST_HEAD(&ptype_base[i]);
3488 3488
3489 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++) 3489 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index b22648d04d36..bfcbdf73a293 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * Linux NET3: Multicast List maintenance. 2 * Linux NET3: Multicast List maintenance.
3 * 3 *
4 * Authors: 4 * Authors:
5 * Tim Kordas <tjk@nostromo.eeap.cwru.edu> 5 * Tim Kordas <tjk@nostromo.eeap.cwru.edu>
6 * Richard Underwood <richard@wuzz.demon.co.uk> 6 * Richard Underwood <richard@wuzz.demon.co.uk>
7 * 7 *
8 * Stir fried together from the IP multicast and CAP patches above 8 * Stir fried together from the IP multicast and CAP patches above
9 * Alan Cox <Alan.Cox@linux.org> 9 * Alan Cox <Alan.Cox@linux.org>
10 * 10 *
11 * Fixes: 11 * Fixes:
12 * Alan Cox : Update the device on a real delete 12 * Alan Cox : Update the device on a real delete
@@ -50,11 +50,11 @@
50 50
51 51
52/* 52/*
53 * Device multicast list maintenance. 53 * Device multicast list maintenance.
54 * 54 *
55 * This is used both by IP and by the user level maintenance functions. 55 * This is used both by IP and by the user level maintenance functions.
56 * Unlike BSD we maintain a usage count on a given multicast address so 56 * Unlike BSD we maintain a usage count on a given multicast address so
57 * that a casual user application can add/delete multicasts used by 57 * that a casual user application can add/delete multicasts used by
58 * protocols without doing damage to the protocols when it deletes the 58 * protocols without doing damage to the protocols when it deletes the
59 * entries. It also helps IP as it tracks overlapping maps. 59 * entries. It also helps IP as it tracks overlapping maps.
60 * 60 *
@@ -67,7 +67,7 @@
67/* 67/*
68 * Update the multicast list into the physical NIC controller. 68 * Update the multicast list into the physical NIC controller.
69 */ 69 */
70 70
71static void __dev_mc_upload(struct net_device *dev) 71static void __dev_mc_upload(struct net_device *dev)
72{ 72{
73 /* Don't do anything till we up the interface 73 /* Don't do anything till we up the interface
@@ -100,7 +100,7 @@ void dev_mc_upload(struct net_device *dev)
100/* 100/*
101 * Delete a device level multicast 101 * Delete a device level multicast
102 */ 102 */
103 103
104int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) 104int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
105{ 105{
106 int err = 0; 106 int err = 0;
@@ -137,7 +137,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
137 * loaded filter is now wrong. Fix it 137 * loaded filter is now wrong. Fix it
138 */ 138 */
139 __dev_mc_upload(dev); 139 __dev_mc_upload(dev);
140 140
141 netif_tx_unlock_bh(dev); 141 netif_tx_unlock_bh(dev);
142 return 0; 142 return 0;
143 } 143 }
@@ -151,7 +151,7 @@ done:
151/* 151/*
152 * Add a device level multicast 152 * Add a device level multicast
153 */ 153 */
154 154
155int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) 155int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
156{ 156{
157 int err = 0; 157 int err = 0;
@@ -187,7 +187,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
187 dev->mc_count++; 187 dev->mc_count++;
188 188
189 __dev_mc_upload(dev); 189 __dev_mc_upload(dev);
190 190
191 netif_tx_unlock_bh(dev); 191 netif_tx_unlock_bh(dev);
192 return 0; 192 return 0;
193 193
@@ -204,7 +204,7 @@ done:
204void dev_mc_discard(struct net_device *dev) 204void dev_mc_discard(struct net_device *dev)
205{ 205{
206 netif_tx_lock_bh(dev); 206 netif_tx_lock_bh(dev);
207 207
208 while (dev->mc_list != NULL) { 208 while (dev->mc_list != NULL) {
209 struct dev_mc_list *tmp = dev->mc_list; 209 struct dev_mc_list *tmp = dev->mc_list;
210 dev->mc_list = tmp->next; 210 dev->mc_list = tmp->next;
@@ -225,7 +225,7 @@ static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
225 225
226 read_lock(&dev_base_lock); 226 read_lock(&dev_base_lock);
227 for (dev = dev_base; dev; dev = dev->next) { 227 for (dev = dev_base; dev; dev = dev->next) {
228 if (off++ == *pos) 228 if (off++ == *pos)
229 return dev; 229 return dev;
230 } 230 }
231 return NULL; 231 return NULL;
diff --git a/net/core/dst.c b/net/core/dst.c
index f9eace78d354..61dd9d3951f1 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -29,7 +29,7 @@
29 * 4) All operations modify state, so a spinlock is used. 29 * 4) All operations modify state, so a spinlock is used.
30 */ 30 */
31static struct dst_entry *dst_garbage_list; 31static struct dst_entry *dst_garbage_list;
32#if RT_CACHE_DEBUG >= 2 32#if RT_CACHE_DEBUG >= 2
33static atomic_t dst_total = ATOMIC_INIT(0); 33static atomic_t dst_total = ATOMIC_INIT(0);
34#endif 34#endif
35static DEFINE_SPINLOCK(dst_lock); 35static DEFINE_SPINLOCK(dst_lock);
@@ -141,7 +141,7 @@ void * dst_alloc(struct dst_ops * ops)
141 dst->path = dst; 141 dst->path = dst;
142 dst->input = dst_discard_in; 142 dst->input = dst_discard_in;
143 dst->output = dst_discard_out; 143 dst->output = dst_discard_out;
144#if RT_CACHE_DEBUG >= 2 144#if RT_CACHE_DEBUG >= 2
145 atomic_inc(&dst_total); 145 atomic_inc(&dst_total);
146#endif 146#endif
147 atomic_inc(&ops->entries); 147 atomic_inc(&ops->entries);
@@ -202,7 +202,7 @@ again:
202 dst->ops->destroy(dst); 202 dst->ops->destroy(dst);
203 if (dst->dev) 203 if (dst->dev)
204 dev_put(dst->dev); 204 dev_put(dst->dev);
205#if RT_CACHE_DEBUG >= 2 205#if RT_CACHE_DEBUG >= 2
206 atomic_dec(&dst_total); 206 atomic_dec(&dst_total);
207#endif 207#endif
208 kmem_cache_free(dst->ops->kmem_cachep, dst); 208 kmem_cache_free(dst->ops->kmem_cachep, dst);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 87dc556fd9d6..6168edd137dd 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,7 +17,7 @@
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19 19
20/* 20/*
21 * Some useful ethtool_ops methods that're device independent. 21 * Some useful ethtool_ops methods that're device independent.
22 * If we find that all drivers want to do the same thing here, 22 * If we find that all drivers want to do the same thing here,
23 * we can turn these into dev_() function calls. 23 * we can turn these into dev_() function calls.
@@ -87,12 +87,12 @@ int ethtool_op_get_perm_addr(struct net_device *dev, struct ethtool_perm_addr *a
87 unsigned char len = dev->addr_len; 87 unsigned char len = dev->addr_len;
88 if ( addr->size < len ) 88 if ( addr->size < len )
89 return -ETOOSMALL; 89 return -ETOOSMALL;
90 90
91 addr->size = len; 91 addr->size = len;
92 memcpy(data, dev->perm_addr, len); 92 memcpy(data, dev->perm_addr, len);
93 return 0; 93 return 0;
94} 94}
95 95
96 96
97u32 ethtool_op_get_ufo(struct net_device *dev) 97u32 ethtool_op_get_ufo(struct net_device *dev)
98{ 98{
@@ -550,7 +550,7 @@ static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
550 if (copy_from_user(&edata, useraddr, sizeof(edata))) 550 if (copy_from_user(&edata, useraddr, sizeof(edata)))
551 return -EFAULT; 551 return -EFAULT;
552 552
553 if (edata.data && 553 if (edata.data &&
554 !(dev->features & NETIF_F_ALL_CSUM)) 554 !(dev->features & NETIF_F_ALL_CSUM))
555 return -EINVAL; 555 return -EINVAL;
556 556
@@ -951,7 +951,7 @@ int dev_ethtool(struct ifreq *ifr)
951 default: 951 default:
952 rc = -EOPNOTSUPP; 952 rc = -EOPNOTSUPP;
953 } 953 }
954 954
955 if(dev->ethtool_ops->complete) 955 if(dev->ethtool_ops->complete)
956 dev->ethtool_ops->complete(dev); 956 dev->ethtool_ops->complete(dev);
957 957
diff --git a/net/core/filter.c b/net/core/filter.c
index 0df843b667f4..8123a31d919d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -53,7 +53,7 @@ static void *__load_pointer(struct sk_buff *skb, int k)
53} 53}
54 54
55static inline void *load_pointer(struct sk_buff *skb, int k, 55static inline void *load_pointer(struct sk_buff *skb, int k,
56 unsigned int size, void *buffer) 56 unsigned int size, void *buffer)
57{ 57{
58 if (k >= 0) 58 if (k >= 0)
59 return skb_header_pointer(skb, k, size, buffer); 59 return skb_header_pointer(skb, k, size, buffer);
@@ -91,7 +91,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
91 */ 91 */
92 for (pc = 0; pc < flen; pc++) { 92 for (pc = 0; pc < flen; pc++) {
93 fentry = &filter[pc]; 93 fentry = &filter[pc];
94 94
95 switch (fentry->code) { 95 switch (fentry->code) {
96 case BPF_ALU|BPF_ADD|BPF_X: 96 case BPF_ALU|BPF_ADD|BPF_X:
97 A += X; 97 A += X;
@@ -399,7 +399,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
399 */ 399 */
400int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 400int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
401{ 401{
402 struct sk_filter *fp; 402 struct sk_filter *fp;
403 unsigned int fsize = sizeof(struct sock_filter) * fprog->len; 403 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
404 int err; 404 int err;
405 405
@@ -411,7 +411,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
411 if (!fp) 411 if (!fp)
412 return -ENOMEM; 412 return -ENOMEM;
413 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 413 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
414 sock_kfree_s(sk, fp, fsize+sizeof(*fp)); 414 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
415 return -EFAULT; 415 return -EFAULT;
416 } 416 }
417 417
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 3cad026764f0..17daf4c9f793 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -144,7 +144,7 @@ static void est_timer(unsigned long arg)
144 * configuration TLV is created. Upon each interval, the latest statistics 144 * configuration TLV is created. Upon each interval, the latest statistics
145 * will be read from &bstats and the estimated rate will be stored in 145 * will be read from &bstats and the estimated rate will be stored in
146 * &rate_est with the statistics lock grabed during this period. 146 * &rate_est with the statistics lock grabed during this period.
147 * 147 *
148 * Returns 0 on success or a negative error code. 148 * Returns 0 on success or a negative error code.
149 */ 149 */
150int gen_new_estimator(struct gnet_stats_basic *bstats, 150int gen_new_estimator(struct gnet_stats_basic *bstats,
@@ -231,7 +231,7 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats,
231 * 231 *
232 * Replaces the configuration of a rate estimator by calling 232 * Replaces the configuration of a rate estimator by calling
233 * gen_kill_estimator() and gen_new_estimator(). 233 * gen_kill_estimator() and gen_new_estimator().
234 * 234 *
235 * Returns 0 on success or a negative error code. 235 * Returns 0 on success or a negative error code.
236 */ 236 */
237int 237int
@@ -242,7 +242,7 @@ gen_replace_estimator(struct gnet_stats_basic *bstats,
242 gen_kill_estimator(bstats, rate_est); 242 gen_kill_estimator(bstats, rate_est);
243 return gen_new_estimator(bstats, rate_est, stats_lock, opt); 243 return gen_new_estimator(bstats, rate_est, stats_lock, opt);
244} 244}
245 245
246 246
247EXPORT_SYMBOL(gen_kill_estimator); 247EXPORT_SYMBOL(gen_kill_estimator);
248EXPORT_SYMBOL(gen_new_estimator); 248EXPORT_SYMBOL(gen_new_estimator);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 8f21490355fa..259473d0559d 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -57,7 +57,7 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
57 int xstats_type, spinlock_t *lock, struct gnet_dump *d) 57 int xstats_type, spinlock_t *lock, struct gnet_dump *d)
58{ 58{
59 memset(d, 0, sizeof(*d)); 59 memset(d, 0, sizeof(*d));
60 60
61 spin_lock_bh(lock); 61 spin_lock_bh(lock);
62 d->lock = lock; 62 d->lock = lock;
63 if (type) 63 if (type)
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 04b249c40b5b..4fb1cb9b79b9 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -40,7 +40,7 @@
40int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode) 40int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode)
41{ 41{
42 int size, err, ct; 42 int size, err, ct;
43 43
44 if (m->msg_namelen) { 44 if (m->msg_namelen) {
45 if (mode == VERIFY_READ) { 45 if (mode == VERIFY_READ) {
46 err = move_addr_to_kernel(m->msg_name, m->msg_namelen, 46 err = move_addr_to_kernel(m->msg_name, m->msg_namelen,
@@ -79,7 +79,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode)
79 * 79 *
80 * Note: this modifies the original iovec. 80 * Note: this modifies the original iovec.
81 */ 81 */
82 82
83int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) 83int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
84{ 84{
85 while (len > 0) { 85 while (len > 0) {
@@ -103,7 +103,7 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
103 * 103 *
104 * Note: this modifies the original iovec. 104 * Note: this modifies the original iovec.
105 */ 105 */
106 106
107int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) 107int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
108{ 108{
109 while (len > 0) { 109 while (len > 0) {
@@ -209,7 +209,7 @@ int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
209 if (partial_cnt) { 209 if (partial_cnt) {
210 copy -= partial_cnt; 210 copy -= partial_cnt;
211 if (copy_from_user(kdata + copy, base + copy, 211 if (copy_from_user(kdata + copy, base + copy,
212 partial_cnt)) 212 partial_cnt))
213 goto out_fault; 213 goto out_fault;
214 } 214 }
215 } 215 }
@@ -224,7 +224,7 @@ int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
224 kdata += copy + partial_cnt; 224 kdata += copy + partial_cnt;
225 iov++; 225 iov++;
226 } 226 }
227 *csump = csum; 227 *csump = csum;
228out: 228out:
229 return err; 229 return err;
230 230
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 549a2ce951b0..8b45c9d3b249 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -124,7 +124,7 @@ void linkwatch_run_queue(void)
124 124
125 dev_put(dev); 125 dev_put(dev);
126 } 126 }
127} 127}
128 128
129 129
130static void linkwatch_event(struct work_struct *dummy) 130static void linkwatch_event(struct work_struct *dummy)
@@ -133,7 +133,7 @@ static void linkwatch_event(struct work_struct *dummy)
133 * per second so that a runaway driver does not 133 * per second so that a runaway driver does not
134 * cause a storm of messages on the netlink 134 * cause a storm of messages on the netlink
135 * socket 135 * socket
136 */ 136 */
137 linkwatch_nextevent = jiffies + HZ; 137 linkwatch_nextevent = jiffies + HZ;
138 clear_bit(LW_RUNNING, &linkwatch_flags); 138 clear_bit(LW_RUNNING, &linkwatch_flags);
139 139
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index efb673ad1854..c08d69650566 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -343,7 +343,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
343 struct neighbour *n; 343 struct neighbour *n;
344 int key_len = tbl->key_len; 344 int key_len = tbl->key_len;
345 u32 hash_val = tbl->hash(pkey, dev); 345 u32 hash_val = tbl->hash(pkey, dev);
346 346
347 NEIGH_CACHE_STAT_INC(tbl, lookups); 347 NEIGH_CACHE_STAT_INC(tbl, lookups);
348 348
349 read_lock_bh(&tbl->lock); 349 read_lock_bh(&tbl->lock);
@@ -685,9 +685,9 @@ next_elt:
685 np = &n->next; 685 np = &n->next;
686 } 686 }
687 687
688 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 688 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
689 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 689 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
690 * base_reachable_time. 690 * base_reachable_time.
691 */ 691 */
692 expire = tbl->parms.base_reachable_time >> 1; 692 expire = tbl->parms.base_reachable_time >> 1;
693 expire /= (tbl->hash_mask + 1); 693 expire /= (tbl->hash_mask + 1);
@@ -742,7 +742,7 @@ static void neigh_timer_handler(unsigned long arg)
742 } 742 }
743 743
744 if (state & NUD_REACHABLE) { 744 if (state & NUD_REACHABLE) {
745 if (time_before_eq(now, 745 if (time_before_eq(now,
746 neigh->confirmed + neigh->parms->reachable_time)) { 746 neigh->confirmed + neigh->parms->reachable_time)) {
747 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh); 747 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
748 next = neigh->confirmed + neigh->parms->reachable_time; 748 next = neigh->confirmed + neigh->parms->reachable_time;
@@ -761,7 +761,7 @@ static void neigh_timer_handler(unsigned long arg)
761 notify = 1; 761 notify = 1;
762 } 762 }
763 } else if (state & NUD_DELAY) { 763 } else if (state & NUD_DELAY) {
764 if (time_before_eq(now, 764 if (time_before_eq(now,
765 neigh->confirmed + neigh->parms->delay_probe_time)) { 765 neigh->confirmed + neigh->parms->delay_probe_time)) {
766 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh); 766 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
767 neigh->nud_state = NUD_REACHABLE; 767 neigh->nud_state = NUD_REACHABLE;
@@ -847,7 +847,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
847 goto out_unlock_bh; 847 goto out_unlock_bh;
848 848
849 now = jiffies; 849 now = jiffies;
850 850
851 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 851 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
852 if (neigh->parms->mcast_probes + neigh->parms->app_probes) { 852 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
853 atomic_set(&neigh->probes, neigh->parms->ucast_probes); 853 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
@@ -915,13 +915,13 @@ static void neigh_update_hhs(struct neighbour *neigh)
915 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, 915 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
916 if it is different. 916 if it is different.
917 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" 917 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
918 lladdr instead of overriding it 918 lladdr instead of overriding it
919 if it is different. 919 if it is different.
920 It also allows to retain current state 920 It also allows to retain current state
921 if lladdr is unchanged. 921 if lladdr is unchanged.
922 NEIGH_UPDATE_F_ADMIN means that the change is administrative. 922 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
923 923
924 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 924 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
925 NTF_ROUTER flag. 925 NTF_ROUTER flag.
926 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as 926 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
927 a router. 927 a router.
@@ -944,7 +944,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
944 old = neigh->nud_state; 944 old = neigh->nud_state;
945 err = -EPERM; 945 err = -EPERM;
946 946
947 if (!(flags & NEIGH_UPDATE_F_ADMIN) && 947 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
948 (old & (NUD_NOARP | NUD_PERMANENT))) 948 (old & (NUD_NOARP | NUD_PERMANENT)))
949 goto out; 949 goto out;
950 950
@@ -968,7 +968,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
968 - compare new & old 968 - compare new & old
969 - if they are different, check override flag 969 - if they are different, check override flag
970 */ 970 */
971 if ((old & NUD_VALID) && 971 if ((old & NUD_VALID) &&
972 !memcmp(lladdr, neigh->ha, dev->addr_len)) 972 !memcmp(lladdr, neigh->ha, dev->addr_len))
973 lladdr = neigh->ha; 973 lladdr = neigh->ha;
974 } else { 974 } else {
@@ -1012,8 +1012,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1012 neigh_del_timer(neigh); 1012 neigh_del_timer(neigh);
1013 if (new & NUD_IN_TIMER) { 1013 if (new & NUD_IN_TIMER) {
1014 neigh_hold(neigh); 1014 neigh_hold(neigh);
1015 neigh_add_timer(neigh, (jiffies + 1015 neigh_add_timer(neigh, (jiffies +
1016 ((new & NUD_REACHABLE) ? 1016 ((new & NUD_REACHABLE) ?
1017 neigh->parms->reachable_time : 1017 neigh->parms->reachable_time :
1018 0))); 1018 0)));
1019 } 1019 }
@@ -1075,7 +1075,7 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1075 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, 1075 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1076 lladdr || !dev->addr_len); 1076 lladdr || !dev->addr_len);
1077 if (neigh) 1077 if (neigh)
1078 neigh_update(neigh, lladdr, NUD_STALE, 1078 neigh_update(neigh, lladdr, NUD_STALE,
1079 NEIGH_UPDATE_F_OVERRIDE); 1079 NEIGH_UPDATE_F_OVERRIDE);
1080 return neigh; 1080 return neigh;
1081} 1081}
@@ -1127,7 +1127,7 @@ int neigh_compat_output(struct sk_buff *skb)
1127 1127
1128 if (dev->hard_header && 1128 if (dev->hard_header &&
1129 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, 1129 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1130 skb->len) < 0 && 1130 skb->len) < 0 &&
1131 dev->rebuild_header(skb)) 1131 dev->rebuild_header(skb))
1132 return 0; 1132 return 0;
1133 1133
@@ -1347,10 +1347,10 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1347 tbl->stats = alloc_percpu(struct neigh_statistics); 1347 tbl->stats = alloc_percpu(struct neigh_statistics);
1348 if (!tbl->stats) 1348 if (!tbl->stats)
1349 panic("cannot create neighbour cache statistics"); 1349 panic("cannot create neighbour cache statistics");
1350 1350
1351#ifdef CONFIG_PROC_FS 1351#ifdef CONFIG_PROC_FS
1352 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat); 1352 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1353 if (!tbl->pde) 1353 if (!tbl->pde)
1354 panic("cannot create neighbour proc dir entry"); 1354 panic("cannot create neighbour proc dir entry");
1355 tbl->pde->proc_fops = &neigh_stat_seq_fops; 1355 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1356 tbl->pde->data = tbl; 1356 tbl->pde->data = tbl;
@@ -1565,7 +1565,7 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1565 err = -ENOENT; 1565 err = -ENOENT;
1566 goto out_dev_put; 1566 goto out_dev_put;
1567 } 1567 }
1568 1568
1569 neigh = __neigh_lookup_errno(tbl, dst, dev); 1569 neigh = __neigh_lookup_errno(tbl, dst, dev);
1570 if (IS_ERR(neigh)) { 1570 if (IS_ERR(neigh)) {
1571 err = PTR_ERR(neigh); 1571 err = PTR_ERR(neigh);
@@ -1742,12 +1742,12 @@ errout:
1742 nlmsg_cancel(skb, nlh); 1742 nlmsg_cancel(skb, nlh);
1743 return -EMSGSIZE; 1743 return -EMSGSIZE;
1744} 1744}
1745 1745
1746static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, 1746static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1747 int ifindex) 1747 int ifindex)
1748{ 1748{
1749 struct neigh_parms *p; 1749 struct neigh_parms *p;
1750 1750
1751 for (p = &tbl->parms; p; p = p->next) 1751 for (p = &tbl->parms; p; p = p->next)
1752 if ((p->dev && p->dev->ifindex == ifindex) || 1752 if ((p->dev && p->dev->ifindex == ifindex) ||
1753 (!p->dev && !ifindex)) 1753 (!p->dev && !ifindex))
@@ -1813,7 +1813,7 @@ int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1813 goto errout_locked; 1813 goto errout_locked;
1814 } 1814 }
1815 1815
1816 /* 1816 /*
1817 * We acquire tbl->lock to be nice to the periodic timers and 1817 * We acquire tbl->lock to be nice to the periodic timers and
1818 * make sure they always see a consistent set of values. 1818 * make sure they always see a consistent set of values.
1819 */ 1819 */
@@ -2321,7 +2321,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2321 2321
2322 if (*pos == 0) 2322 if (*pos == 0)
2323 return SEQ_START_TOKEN; 2323 return SEQ_START_TOKEN;
2324 2324
2325 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { 2325 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2326 if (!cpu_possible(cpu)) 2326 if (!cpu_possible(cpu))
2327 continue; 2327 continue;
@@ -2629,7 +2629,7 @@ static struct neigh_sysctl_table {
2629}; 2629};
2630 2630
2631int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 2631int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2632 int p_id, int pdev_id, char *p_name, 2632 int p_id, int pdev_id, char *p_name,
2633 proc_handler *handler, ctl_handler *strategy) 2633 proc_handler *handler, ctl_handler *strategy)
2634{ 2634{
2635 struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template, 2635 struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
@@ -2661,7 +2661,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2661 t->neigh_vars[14].procname = NULL; 2661 t->neigh_vars[14].procname = NULL;
2662 t->neigh_vars[15].procname = NULL; 2662 t->neigh_vars[15].procname = NULL;
2663 } else { 2663 } else {
2664 dev_name_source = t->neigh_dev[0].procname; 2664 dev_name_source = t->neigh_dev[0].procname;
2665 t->neigh_vars[12].data = (int *)(p + 1); 2665 t->neigh_vars[12].data = (int *)(p + 1);
2666 t->neigh_vars[13].data = (int *)(p + 1) + 1; 2666 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2667 t->neigh_vars[14].data = (int *)(p + 1) + 2; 2667 t->neigh_vars[14].data = (int *)(p + 1) + 2;
@@ -2696,7 +2696,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2696 goto free; 2696 goto free;
2697 } 2697 }
2698 2698
2699 t->neigh_dev[0].procname = dev_name; 2699 t->neigh_dev[0].procname = dev_name;
2700 2700
2701 t->neigh_neigh_dir[0].ctl_name = pdev_id; 2701 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2702 2702
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 44db095a8f7e..6189dc03108d 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -2,7 +2,7 @@
2 * net-sysfs.c - network device class and attributes 2 * net-sysfs.c - network device class and attributes
3 * 3 *
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> 4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
@@ -23,7 +23,7 @@ static const char fmt_long_hex[] = "%#lx\n";
23static const char fmt_dec[] = "%d\n"; 23static const char fmt_dec[] = "%d\n";
24static const char fmt_ulong[] = "%lu\n"; 24static const char fmt_ulong[] = "%lu\n";
25 25
26static inline int dev_isalive(const struct net_device *dev) 26static inline int dev_isalive(const struct net_device *dev)
27{ 27{
28 return dev->reg_state <= NETREG_REGISTERED; 28 return dev->reg_state <= NETREG_REGISTERED;
29} 29}
@@ -265,7 +265,7 @@ static ssize_t netstat_show(const struct device *d,
265 265
266 read_lock(&dev_base_lock); 266 read_lock(&dev_base_lock);
267 if (dev_isalive(dev) && dev->get_stats && 267 if (dev_isalive(dev) && dev->get_stats &&
268 (stats = (*dev->get_stats)(dev))) 268 (stats = (*dev->get_stats)(dev)))
269 ret = sprintf(buf, fmt_ulong, 269 ret = sprintf(buf, fmt_ulong,
270 *(unsigned long *)(((u8 *) stats) + offset)); 270 *(unsigned long *)(((u8 *) stats) + offset));
271 271
@@ -349,7 +349,7 @@ static ssize_t wireless_show(struct device *d, char *buf,
349 struct net_device *dev = to_net_dev(d); 349 struct net_device *dev = to_net_dev(d);
350 const struct iw_statistics *iw = NULL; 350 const struct iw_statistics *iw = NULL;
351 ssize_t ret = -EINVAL; 351 ssize_t ret = -EINVAL;
352 352
353 read_lock(&dev_base_lock); 353 read_lock(&dev_base_lock);
354 if (dev_isalive(dev)) { 354 if (dev_isalive(dev)) {
355 if(dev->wireless_handlers && 355 if(dev->wireless_handlers &&
@@ -430,7 +430,7 @@ static int netdev_uevent(struct device *d, char **envp,
430#endif 430#endif
431 431
432/* 432/*
433 * netdev_release -- destroy and free a dead device. 433 * netdev_release -- destroy and free a dead device.
434 * Called when last reference to device kobject is gone. 434 * Called when last reference to device kobject is gone.
435 */ 435 */
436static void netdev_release(struct device *d) 436static void netdev_release(struct device *d)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 823215d8e90f..da1019451ccb 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -237,13 +237,13 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
237{ 237{
238 int status = NETDEV_TX_BUSY; 238 int status = NETDEV_TX_BUSY;
239 unsigned long tries; 239 unsigned long tries;
240 struct net_device *dev = np->dev; 240 struct net_device *dev = np->dev;
241 struct netpoll_info *npinfo = np->dev->npinfo; 241 struct netpoll_info *npinfo = np->dev->npinfo;
242 242
243 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 243 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
244 __kfree_skb(skb); 244 __kfree_skb(skb);
245 return; 245 return;
246 } 246 }
247 247
248 /* don't get messages out of order, and no recursion */ 248 /* don't get messages out of order, and no recursion */
249 if (skb_queue_len(&npinfo->txq) == 0 && 249 if (skb_queue_len(&npinfo->txq) == 0 &&
@@ -676,7 +676,7 @@ int netpoll_setup(struct netpoll *np)
676 } 676 }
677 677
678 atleast = jiffies + HZ/10; 678 atleast = jiffies + HZ/10;
679 atmost = jiffies + 4*HZ; 679 atmost = jiffies + 4*HZ;
680 while (!netif_carrier_ok(ndev)) { 680 while (!netif_carrier_ok(ndev)) {
681 if (time_after(jiffies, atmost)) { 681 if (time_after(jiffies, atmost)) {
682 printk(KERN_NOTICE 682 printk(KERN_NOTICE
@@ -772,9 +772,9 @@ void netpoll_cleanup(struct netpoll *np)
772 np->dev->npinfo = NULL; 772 np->dev->npinfo = NULL;
773 if (atomic_dec_and_test(&npinfo->refcnt)) { 773 if (atomic_dec_and_test(&npinfo->refcnt)) {
774 skb_queue_purge(&npinfo->arp_tx); 774 skb_queue_purge(&npinfo->arp_tx);
775 skb_queue_purge(&npinfo->txq); 775 skb_queue_purge(&npinfo->txq);
776 cancel_rearming_delayed_work(&npinfo->tx_work); 776 cancel_rearming_delayed_work(&npinfo->tx_work);
777 flush_scheduled_work(); 777 flush_scheduled_work();
778 778
779 kfree(npinfo); 779 kfree(npinfo);
780 } 780 }
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 04d4b93c68eb..c2818e07a4bd 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -15,7 +15,7 @@
15 * 15 *
16 * 16 *
17 * A tool for loading the network with preconfigurated packets. 17 * A tool for loading the network with preconfigurated packets.
18 * The tool is implemented as a linux module. Parameters are output 18 * The tool is implemented as a linux module. Parameters are output
19 * device, delay (to hard_xmit), number of packets, and whether 19 * device, delay (to hard_xmit), number of packets, and whether
20 * to use multiple SKBs or just the same one. 20 * to use multiple SKBs or just the same one.
21 * pktgen uses the installed interface's output routine. 21 * pktgen uses the installed interface's output routine.
@@ -44,14 +44,14 @@
44 * * Add IOCTL interface to easily get counters & configuration. 44 * * Add IOCTL interface to easily get counters & configuration.
45 * --Ben Greear <greearb@candelatech.com> 45 * --Ben Greear <greearb@candelatech.com>
46 * 46 *
47 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct 47 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct
48 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 48 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0
49 * as a "fastpath" with a configurable number of clones after alloc's. 49 * as a "fastpath" with a configurable number of clones after alloc's.
50 * clone_skb=0 means all packets are allocated this also means ranges time 50 * clone_skb=0 means all packets are allocated this also means ranges time
51 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 51 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100
52 * clones. 52 * clones.
53 * 53 *
54 * Also moved to /proc/net/pktgen/ 54 * Also moved to /proc/net/pktgen/
55 * --ro 55 * --ro
56 * 56 *
57 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever 57 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever
@@ -60,28 +60,28 @@
60 * 60 *
61 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) 61 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br)
62 * 62 *
63 * 63 *
64 * 021124 Finished major redesign and rewrite for new functionality. 64 * 021124 Finished major redesign and rewrite for new functionality.
65 * See Documentation/networking/pktgen.txt for how to use this. 65 * See Documentation/networking/pktgen.txt for how to use this.
66 * 66 *
67 * The new operation: 67 * The new operation:
68 * For each CPU one thread/process is created at start. This process checks 68 * For each CPU one thread/process is created at start. This process checks
69 * for running devices in the if_list and sends packets until count is 0 it 69 * for running devices in the if_list and sends packets until count is 0 it
70 * also the thread checks the thread->control which is used for inter-process 70 * also the thread checks the thread->control which is used for inter-process
71 * communication. controlling process "posts" operations to the threads this 71 * communication. controlling process "posts" operations to the threads this
72 * way. The if_lock should be possible to remove when add/rem_device is merged 72 * way. The if_lock should be possible to remove when add/rem_device is merged
73 * into this too. 73 * into this too.
74 * 74 *
75 * By design there should only be *one* "controlling" process. In practice 75 * By design there should only be *one* "controlling" process. In practice
76 * multiple write accesses gives unpredictable result. Understood by "write" 76 * multiple write accesses gives unpredictable result. Understood by "write"
77 * to /proc gives result code thats should be read be the "writer". 77 * to /proc gives result code thats should be read be the "writer".
78 * For practical use this should be no problem. 78 * For practical use this should be no problem.
79 * 79 *
80 * Note when adding devices to a specific CPU there good idea to also assign 80 * Note when adding devices to a specific CPU there good idea to also assign
81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU.
82 * --ro 82 * --ro
83 * 83 *
84 * Fix refcount off by one if first packet fails, potential null deref, 84 * Fix refcount off by one if first packet fails, potential null deref,
85 * memleak 030710- KJP 85 * memleak 030710- KJP
86 * 86 *
87 * First "ranges" functionality for ipv6 030726 --ro 87 * First "ranges" functionality for ipv6 030726 --ro
@@ -89,22 +89,22 @@
89 * Included flow support. 030802 ANK. 89 * Included flow support. 030802 ANK.
90 * 90 *
91 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> 91 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org>
92 * 92 *
93 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 93 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419
94 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 94 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604
95 * 95 *
96 * New xmit() return, do_div and misc clean up by Stephen Hemminger 96 * New xmit() return, do_div and misc clean up by Stephen Hemminger
97 * <shemminger@osdl.org> 040923 97 * <shemminger@osdl.org> 040923
98 * 98 *
99 * Randy Dunlap fixed u64 printk compiler waring 99 * Randy Dunlap fixed u64 printk compiler waring
100 * 100 *
101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org>
102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
103 * 103 *
104 * Corrections from Nikolai Malykh (nmalykh@bilim.com) 104 * Corrections from Nikolai Malykh (nmalykh@bilim.com)
105 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 105 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230
106 * 106 *
107 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> 107 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com>
108 * 050103 108 * 050103
109 * 109 *
110 * MPLS support by Steven Whitehouse <steve@chygwyn.com> 110 * MPLS support by Steven Whitehouse <steve@chygwyn.com>
@@ -456,7 +456,7 @@ static inline __u64 pg_div64(__u64 n, __u64 base)
456/* 456/*
457 * How do we know if the architecture we are running on 457 * How do we know if the architecture we are running on
458 * supports division with 64 bit base? 458 * supports division with 64 bit base?
459 * 459 *
460 */ 460 */
461#if defined(__sparc_v9__) || defined(__powerpc64__) || defined(__alpha__) || defined(__x86_64__) || defined(__ia64__) 461#if defined(__sparc_v9__) || defined(__powerpc64__) || defined(__alpha__) || defined(__x86_64__) || defined(__ia64__)
462 462
@@ -529,7 +529,7 @@ static struct notifier_block pktgen_notifier_block = {
529}; 529};
530 530
531/* 531/*
532 * /proc handling functions 532 * /proc handling functions
533 * 533 *
534 */ 534 */
535 535
@@ -1979,7 +1979,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1979 1979
1980 if (pkt_dev->flags & F_IPV6) { 1980 if (pkt_dev->flags & F_IPV6) {
1981 /* 1981 /*
1982 * Skip this automatic address setting until locks or functions 1982 * Skip this automatic address setting until locks or functions
1983 * gets exported 1983 * gets exported
1984 */ 1984 */
1985 1985
@@ -2477,10 +2477,10 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2477} 2477}
2478 2478
2479/* 2479/*
2480 * scan_ip6, fmt_ip taken from dietlibc-0.21 2480 * scan_ip6, fmt_ip taken from dietlibc-0.21
2481 * Author Felix von Leitner <felix-dietlibc@fefe.de> 2481 * Author Felix von Leitner <felix-dietlibc@fefe.de>
2482 * 2482 *
2483 * Slightly modified for kernel. 2483 * Slightly modified for kernel.
2484 * Should be candidate for net/ipv4/utils.c 2484 * Should be candidate for net/ipv4/utils.c
2485 * --ro 2485 * --ro
2486 */ 2486 */
@@ -3256,7 +3256,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3256out:; 3256out:;
3257} 3257}
3258 3258
3259/* 3259/*
3260 * Main loop of the thread goes here 3260 * Main loop of the thread goes here
3261 */ 3261 */
3262 3262
@@ -3365,8 +3365,8 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
3365 return pkt_dev; 3365 return pkt_dev;
3366} 3366}
3367 3367
3368/* 3368/*
3369 * Adds a dev at front of if_list. 3369 * Adds a dev at front of if_list.
3370 */ 3370 */
3371 3371
3372static int add_dev_to_thread(struct pktgen_thread *t, 3372static int add_dev_to_thread(struct pktgen_thread *t,
@@ -3510,8 +3510,8 @@ static int __init pktgen_create_thread(int cpu)
3510 return 0; 3510 return 0;
3511} 3511}
3512 3512
3513/* 3513/*
3514 * Removes a device from the thread if_list. 3514 * Removes a device from the thread if_list.
3515 */ 3515 */
3516static void _rem_dev_from_if_list(struct pktgen_thread *t, 3516static void _rem_dev_from_if_list(struct pktgen_thread *t,
3517 struct pktgen_dev *pkt_dev) 3517 struct pktgen_dev *pkt_dev)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9bf9ae05f157..ee5a787271f6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -874,7 +874,7 @@ void __init rtnetlink_init(void)
874 panic("rtnetlink_init: cannot allocate rta_buf\n"); 874 panic("rtnetlink_init: cannot allocate rta_buf\n");
875 875
876 rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv, 876 rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv,
877 THIS_MODULE); 877 THIS_MODULE);
878 if (rtnl == NULL) 878 if (rtnl == NULL)
879 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 879 panic("rtnetlink_init: cannot initialize rtnetlink\n");
880 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV); 880 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
diff --git a/net/core/scm.c b/net/core/scm.c
index 271cf060ef8c..292ad8d5ad76 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -36,7 +36,7 @@
36 36
37 37
38/* 38/*
39 * Only allow a user to send credentials, that they could set with 39 * Only allow a user to send credentials, that they could set with
40 * setu(g)id. 40 * setu(g)id.
41 */ 41 */
42 42
@@ -79,11 +79,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
79 79
80 if (fpl->count + num > SCM_MAX_FD) 80 if (fpl->count + num > SCM_MAX_FD)
81 return -EINVAL; 81 return -EINVAL;
82 82
83 /* 83 /*
84 * Verify the descriptors and increment the usage count. 84 * Verify the descriptors and increment the usage count.
85 */ 85 */
86 86
87 for (i=0; i< num; i++) 87 for (i=0; i< num; i++)
88 { 88 {
89 int fd = fdp[i]; 89 int fd = fdp[i];
@@ -123,7 +123,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
123 /* The first check was omitted in <= 2.2.5. The reasoning was 123 /* The first check was omitted in <= 2.2.5. The reasoning was
124 that parser checks cmsg_len in any case, so that 124 that parser checks cmsg_len in any case, so that
125 additional check would be work duplication. 125 additional check would be work duplication.
126 But if cmsg_level is not SOL_SOCKET, we do not check 126 But if cmsg_level is not SOL_SOCKET, we do not check
127 for too short ancillary data object at all! Oops. 127 for too short ancillary data object at all! Oops.
128 OK, let's add it... 128 OK, let's add it...
129 */ 129 */
@@ -159,7 +159,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
159 p->fp = NULL; 159 p->fp = NULL;
160 } 160 }
161 return 0; 161 return 0;
162 162
163error: 163error:
164 scm_destroy(p); 164 scm_destroy(p);
165 return err; 165 return err;
@@ -189,7 +189,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
189 189
190 err = -EFAULT; 190 err = -EFAULT;
191 if (copy_to_user(cm, &cmhdr, sizeof cmhdr)) 191 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
192 goto out; 192 goto out;
193 if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr))) 193 if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
194 goto out; 194 goto out;
195 cmlen = CMSG_SPACE(len); 195 cmlen = CMSG_SPACE(len);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f3404ae9f190..3d5646869022 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -88,7 +88,7 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
88void skb_over_panic(struct sk_buff *skb, int sz, void *here) 88void skb_over_panic(struct sk_buff *skb, int sz, void *here)
89{ 89{
90 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 90 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
91 "data:%p tail:%p end:%p dev:%s\n", 91 "data:%p tail:%p end:%p dev:%s\n",
92 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 92 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
93 skb->dev ? skb->dev->name : "<NULL>"); 93 skb->dev ? skb->dev->name : "<NULL>");
94 BUG(); 94 BUG();
@@ -106,7 +106,7 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
106void skb_under_panic(struct sk_buff *skb, int sz, void *here) 106void skb_under_panic(struct sk_buff *skb, int sz, void *here)
107{ 107{
108 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 108 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
109 "data:%p tail:%p end:%p dev:%s\n", 109 "data:%p tail:%p end:%p dev:%s\n",
110 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 110 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
111 skb->dev ? skb->dev->name : "<NULL>"); 111 skb->dev ? skb->dev->name : "<NULL>");
112 BUG(); 112 BUG();
@@ -271,7 +271,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
271 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 271 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
272 struct sk_buff *skb; 272 struct sk_buff *skb;
273 273
274 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 274 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
275 if (likely(skb)) { 275 if (likely(skb)) {
276 skb_reserve(skb, NET_SKB_PAD); 276 skb_reserve(skb, NET_SKB_PAD);
277 skb->dev = dev; 277 skb->dev = dev;
@@ -819,12 +819,12 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
819 * 819 *
820 * May return error in out of memory cases. The skb is freed on error. 820 * May return error in out of memory cases. The skb is freed on error.
821 */ 821 */
822 822
823int skb_pad(struct sk_buff *skb, int pad) 823int skb_pad(struct sk_buff *skb, int pad)
824{ 824{
825 int err; 825 int err;
826 int ntail; 826 int ntail;
827 827
828 /* If the skbuff is non linear tailroom is always zero.. */ 828 /* If the skbuff is non linear tailroom is always zero.. */
829 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 829 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
830 memset(skb->data+skb->len, 0, pad); 830 memset(skb->data+skb->len, 0, pad);
@@ -851,8 +851,8 @@ int skb_pad(struct sk_buff *skb, int pad)
851free_skb: 851free_skb:
852 kfree_skb(skb); 852 kfree_skb(skb);
853 return err; 853 return err;
854} 854}
855 855
856/* Trims skb to length len. It can change skb pointers. 856/* Trims skb to length len. It can change skb pointers.
857 */ 857 */
858 858
diff --git a/net/core/sock.c b/net/core/sock.c
index 0ed5b4f0bc40..1e35d9973f57 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -34,7 +34,7 @@
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler 35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer 36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major 37 * code. The ACK stuff can wait and needs major
38 * TCP layer surgery. 38 * TCP layer surgery.
39 * Alan Cox : Fixed TCP ack bug, removed remove sock 39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race. 40 * and fixed timer/inet_bh race.
@@ -217,8 +217,8 @@ static void sock_warn_obsolete_bsdism(const char *name)
217{ 217{
218 static int warned; 218 static int warned;
219 static char warncomm[TASK_COMM_LEN]; 219 static char warncomm[TASK_COMM_LEN];
220 if (strcmp(warncomm, current->comm) && warned < 5) { 220 if (strcmp(warncomm, current->comm) && warned < 5) {
221 strcpy(warncomm, current->comm); 221 strcpy(warncomm, current->comm);
222 printk(KERN_WARNING "process `%s' is using obsolete " 222 printk(KERN_WARNING "process `%s' is using obsolete "
223 "%s SO_BSDCOMPAT\n", warncomm, name); 223 "%s SO_BSDCOMPAT\n", warncomm, name);
224 warned++; 224 warned++;
@@ -226,8 +226,8 @@ static void sock_warn_obsolete_bsdism(const char *name)
226} 226}
227 227
228static void sock_disable_timestamp(struct sock *sk) 228static void sock_disable_timestamp(struct sock *sk)
229{ 229{
230 if (sock_flag(sk, SOCK_TIMESTAMP)) { 230 if (sock_flag(sk, SOCK_TIMESTAMP)) {
231 sock_reset_flag(sk, SOCK_TIMESTAMP); 231 sock_reset_flag(sk, SOCK_TIMESTAMP);
232 net_disable_timestamp(); 232 net_disable_timestamp();
233 } 233 }
@@ -347,7 +347,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
347 int valbool; 347 int valbool;
348 struct linger ling; 348 struct linger ling;
349 int ret = 0; 349 int ret = 0;
350 350
351 /* 351 /*
352 * Options without arguments 352 * Options without arguments
353 */ 353 */
@@ -360,20 +360,20 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
360 return 0; 360 return 0;
361 } 361 }
362#endif 362#endif
363 363
364 if(optlen<sizeof(int)) 364 if(optlen<sizeof(int))
365 return(-EINVAL); 365 return(-EINVAL);
366 366
367 if (get_user(val, (int __user *)optval)) 367 if (get_user(val, (int __user *)optval))
368 return -EFAULT; 368 return -EFAULT;
369 369
370 valbool = val?1:0; 370 valbool = val?1:0;
371 371
372 lock_sock(sk); 372 lock_sock(sk);
373 373
374 switch(optname) 374 switch(optname)
375 { 375 {
376 case SO_DEBUG: 376 case SO_DEBUG:
377 if(val && !capable(CAP_NET_ADMIN)) 377 if(val && !capable(CAP_NET_ADMIN))
378 { 378 {
379 ret = -EACCES; 379 ret = -EACCES;
@@ -389,7 +389,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
389 case SO_TYPE: 389 case SO_TYPE:
390 case SO_ERROR: 390 case SO_ERROR:
391 ret = -ENOPROTOOPT; 391 ret = -ENOPROTOOPT;
392 break; 392 break;
393 case SO_DONTROUTE: 393 case SO_DONTROUTE:
394 if (valbool) 394 if (valbool)
395 sock_set_flag(sk, SOCK_LOCALROUTE); 395 sock_set_flag(sk, SOCK_LOCALROUTE);
@@ -404,7 +404,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
404 about it this is right. Otherwise apps have to 404 about it this is right. Otherwise apps have to
405 play 'guess the biggest size' games. RCVBUF/SNDBUF 405 play 'guess the biggest size' games. RCVBUF/SNDBUF
406 are treated in BSD as hints */ 406 are treated in BSD as hints */
407 407
408 if (val > sysctl_wmem_max) 408 if (val > sysctl_wmem_max)
409 val = sysctl_wmem_max; 409 val = sysctl_wmem_max;
410set_sndbuf: 410set_sndbuf:
@@ -433,7 +433,7 @@ set_sndbuf:
433 about it this is right. Otherwise apps have to 433 about it this is right. Otherwise apps have to
434 play 'guess the biggest size' games. RCVBUF/SNDBUF 434 play 'guess the biggest size' games. RCVBUF/SNDBUF
435 are treated in BSD as hints */ 435 are treated in BSD as hints */
436 436
437 if (val > sysctl_rmem_max) 437 if (val > sysctl_rmem_max)
438 val = sysctl_rmem_max; 438 val = sysctl_rmem_max;
439set_rcvbuf: 439set_rcvbuf:
@@ -474,16 +474,16 @@ set_rcvbuf:
474 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 474 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
475 break; 475 break;
476 476
477 case SO_OOBINLINE: 477 case SO_OOBINLINE:
478 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 478 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
479 break; 479 break;
480 480
481 case SO_NO_CHECK: 481 case SO_NO_CHECK:
482 sk->sk_no_check = valbool; 482 sk->sk_no_check = valbool;
483 break; 483 break;
484 484
485 case SO_PRIORITY: 485 case SO_PRIORITY:
486 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 486 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
487 sk->sk_priority = val; 487 sk->sk_priority = val;
488 else 488 else
489 ret = -EPERM; 489 ret = -EPERM;
@@ -547,9 +547,9 @@ set_rcvbuf:
547#ifdef CONFIG_NETDEVICES 547#ifdef CONFIG_NETDEVICES
548 case SO_BINDTODEVICE: 548 case SO_BINDTODEVICE:
549 { 549 {
550 char devname[IFNAMSIZ]; 550 char devname[IFNAMSIZ];
551 551
552 /* Sorry... */ 552 /* Sorry... */
553 if (!capable(CAP_NET_RAW)) { 553 if (!capable(CAP_NET_RAW)) {
554 ret = -EPERM; 554 ret = -EPERM;
555 break; 555 break;
@@ -557,9 +557,9 @@ set_rcvbuf:
557 557
558 /* Bind this socket to a particular device like "eth0", 558 /* Bind this socket to a particular device like "eth0",
559 * as specified in the passed interface name. If the 559 * as specified in the passed interface name. If the
560 * name is "" or the option length is zero the socket 560 * name is "" or the option length is zero the socket
561 * is not bound. 561 * is not bound.
562 */ 562 */
563 563
564 if (!valbool) { 564 if (!valbool) {
565 sk->sk_bound_dev_if = 0; 565 sk->sk_bound_dev_if = 0;
@@ -608,7 +608,7 @@ set_rcvbuf:
608 case SO_DETACH_FILTER: 608 case SO_DETACH_FILTER:
609 rcu_read_lock_bh(); 609 rcu_read_lock_bh();
610 filter = rcu_dereference(sk->sk_filter); 610 filter = rcu_dereference(sk->sk_filter);
611 if (filter) { 611 if (filter) {
612 rcu_assign_pointer(sk->sk_filter, NULL); 612 rcu_assign_pointer(sk->sk_filter, NULL);
613 sk_filter_release(sk, filter); 613 sk_filter_release(sk, filter);
614 rcu_read_unlock_bh(); 614 rcu_read_unlock_bh();
@@ -628,9 +628,9 @@ set_rcvbuf:
628 /* We implement the SO_SNDLOWAT etc to 628 /* We implement the SO_SNDLOWAT etc to
629 not be settable (1003.1g 5.3) */ 629 not be settable (1003.1g 5.3) */
630 default: 630 default:
631 ret = -ENOPROTOOPT; 631 ret = -ENOPROTOOPT;
632 break; 632 break;
633 } 633 }
634 release_sock(sk); 634 release_sock(sk);
635 return ret; 635 return ret;
636} 636}
@@ -640,32 +640,32 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
640 char __user *optval, int __user *optlen) 640 char __user *optval, int __user *optlen)
641{ 641{
642 struct sock *sk = sock->sk; 642 struct sock *sk = sock->sk;
643 643
644 union 644 union
645 { 645 {
646 int val; 646 int val;
647 struct linger ling; 647 struct linger ling;
648 struct timeval tm; 648 struct timeval tm;
649 } v; 649 } v;
650 650
651 unsigned int lv = sizeof(int); 651 unsigned int lv = sizeof(int);
652 int len; 652 int len;
653 653
654 if(get_user(len,optlen)) 654 if(get_user(len,optlen))
655 return -EFAULT; 655 return -EFAULT;
656 if(len < 0) 656 if(len < 0)
657 return -EINVAL; 657 return -EINVAL;
658 658
659 switch(optname) 659 switch(optname)
660 { 660 {
661 case SO_DEBUG: 661 case SO_DEBUG:
662 v.val = sock_flag(sk, SOCK_DBG); 662 v.val = sock_flag(sk, SOCK_DBG);
663 break; 663 break;
664 664
665 case SO_DONTROUTE: 665 case SO_DONTROUTE:
666 v.val = sock_flag(sk, SOCK_LOCALROUTE); 666 v.val = sock_flag(sk, SOCK_LOCALROUTE);
667 break; 667 break;
668 668
669 case SO_BROADCAST: 669 case SO_BROADCAST:
670 v.val = !!sock_flag(sk, SOCK_BROADCAST); 670 v.val = !!sock_flag(sk, SOCK_BROADCAST);
671 break; 671 break;
@@ -673,7 +673,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
673 case SO_SNDBUF: 673 case SO_SNDBUF:
674 v.val = sk->sk_sndbuf; 674 v.val = sk->sk_sndbuf;
675 break; 675 break;
676 676
677 case SO_RCVBUF: 677 case SO_RCVBUF:
678 v.val = sk->sk_rcvbuf; 678 v.val = sk->sk_rcvbuf;
679 break; 679 break;
@@ -687,7 +687,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
687 break; 687 break;
688 688
689 case SO_TYPE: 689 case SO_TYPE:
690 v.val = sk->sk_type; 690 v.val = sk->sk_type;
691 break; 691 break;
692 692
693 case SO_ERROR: 693 case SO_ERROR:
@@ -699,7 +699,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
699 case SO_OOBINLINE: 699 case SO_OOBINLINE:
700 v.val = !!sock_flag(sk, SOCK_URGINLINE); 700 v.val = !!sock_flag(sk, SOCK_URGINLINE);
701 break; 701 break;
702 702
703 case SO_NO_CHECK: 703 case SO_NO_CHECK:
704 v.val = sk->sk_no_check; 704 v.val = sk->sk_no_check;
705 break; 705 break;
@@ -707,13 +707,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
707 case SO_PRIORITY: 707 case SO_PRIORITY:
708 v.val = sk->sk_priority; 708 v.val = sk->sk_priority;
709 break; 709 break;
710 710
711 case SO_LINGER: 711 case SO_LINGER:
712 lv = sizeof(v.ling); 712 lv = sizeof(v.ling);
713 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 713 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
714 v.ling.l_linger = sk->sk_lingertime / HZ; 714 v.ling.l_linger = sk->sk_lingertime / HZ;
715 break; 715 break;
716 716
717 case SO_BSDCOMPAT: 717 case SO_BSDCOMPAT:
718 sock_warn_obsolete_bsdism("getsockopt"); 718 sock_warn_obsolete_bsdism("getsockopt");
719 break; 719 break;
@@ -750,7 +750,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
750 750
751 case SO_SNDLOWAT: 751 case SO_SNDLOWAT:
752 v.val=1; 752 v.val=1;
753 break; 753 break;
754 754
755 case SO_PASSCRED: 755 case SO_PASSCRED:
756 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 756 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
@@ -798,9 +798,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
798 if (copy_to_user(optval, &v, len)) 798 if (copy_to_user(optval, &v, len))
799 return -EFAULT; 799 return -EFAULT;
800lenout: 800lenout:
801 if (put_user(len, optlen)) 801 if (put_user(len, optlen))
802 return -EFAULT; 802 return -EFAULT;
803 return 0; 803 return 0;
804} 804}
805 805
806/* 806/*
@@ -846,7 +846,7 @@ struct sock *sk_alloc(int family, gfp_t priority,
846 sk->sk_prot = sk->sk_prot_creator = prot; 846 sk->sk_prot = sk->sk_prot_creator = prot;
847 sock_lock_init(sk); 847 sock_lock_init(sk);
848 } 848 }
849 849
850 if (security_sk_alloc(sk, family, priority)) 850 if (security_sk_alloc(sk, family, priority))
851 goto out_free; 851 goto out_free;
852 852
@@ -988,8 +988,8 @@ void __init sk_init(void)
988 */ 988 */
989 989
990 990
991/* 991/*
992 * Write buffer destructor automatically called from kfree_skb. 992 * Write buffer destructor automatically called from kfree_skb.
993 */ 993 */
994void sock_wfree(struct sk_buff *skb) 994void sock_wfree(struct sk_buff *skb)
995{ 995{
@@ -1002,8 +1002,8 @@ void sock_wfree(struct sk_buff *skb)
1002 sock_put(sk); 1002 sock_put(sk);
1003} 1003}
1004 1004
1005/* 1005/*
1006 * Read buffer destructor automatically called from kfree_skb. 1006 * Read buffer destructor automatically called from kfree_skb.
1007 */ 1007 */
1008void sock_rfree(struct sk_buff *skb) 1008void sock_rfree(struct sk_buff *skb)
1009{ 1009{
@@ -1051,7 +1051,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1051 1051
1052/* 1052/*
1053 * Allocate a skb from the socket's receive buffer. 1053 * Allocate a skb from the socket's receive buffer.
1054 */ 1054 */
1055struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1055struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1056 gfp_t priority) 1056 gfp_t priority)
1057{ 1057{
@@ -1065,16 +1065,16 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1065 return NULL; 1065 return NULL;
1066} 1066}
1067 1067
1068/* 1068/*
1069 * Allocate a memory block from the socket's option memory buffer. 1069 * Allocate a memory block from the socket's option memory buffer.
1070 */ 1070 */
1071void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1071void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1072{ 1072{
1073 if ((unsigned)size <= sysctl_optmem_max && 1073 if ((unsigned)size <= sysctl_optmem_max &&
1074 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1074 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1075 void *mem; 1075 void *mem;
1076 /* First do the add, to avoid the race if kmalloc 1076 /* First do the add, to avoid the race if kmalloc
1077 * might sleep. 1077 * might sleep.
1078 */ 1078 */
1079 atomic_add(size, &sk->sk_omem_alloc); 1079 atomic_add(size, &sk->sk_omem_alloc);
1080 mem = kmalloc(size, priority); 1080 mem = kmalloc(size, priority);
@@ -1210,7 +1210,7 @@ failure:
1210 return NULL; 1210 return NULL;
1211} 1211}
1212 1212
1213struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1213struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1214 int noblock, int *errcode) 1214 int noblock, int *errcode)
1215{ 1215{
1216 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1216 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
@@ -1298,7 +1298,7 @@ int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1298 return -EOPNOTSUPP; 1298 return -EOPNOTSUPP;
1299} 1299}
1300 1300
1301int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1301int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1302 int len, int flags) 1302 int len, int flags)
1303{ 1303{
1304 return -EOPNOTSUPP; 1304 return -EOPNOTSUPP;
@@ -1314,7 +1314,7 @@ int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1314 return -EOPNOTSUPP; 1314 return -EOPNOTSUPP;
1315} 1315}
1316 1316
1317int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1317int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1318 int *len, int peer) 1318 int *len, int peer)
1319{ 1319{
1320 return -EOPNOTSUPP; 1320 return -EOPNOTSUPP;
@@ -1400,7 +1400,7 @@ static void sock_def_error_report(struct sock *sk)
1400 read_lock(&sk->sk_callback_lock); 1400 read_lock(&sk->sk_callback_lock);
1401 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1401 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1402 wake_up_interruptible(sk->sk_sleep); 1402 wake_up_interruptible(sk->sk_sleep);
1403 sk_wake_async(sk,0,POLL_ERR); 1403 sk_wake_async(sk,0,POLL_ERR);
1404 read_unlock(&sk->sk_callback_lock); 1404 read_unlock(&sk->sk_callback_lock);
1405} 1405}
1406 1406
@@ -1473,7 +1473,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1473 sk->sk_send_head = NULL; 1473 sk->sk_send_head = NULL;
1474 1474
1475 init_timer(&sk->sk_timer); 1475 init_timer(&sk->sk_timer);
1476 1476
1477 sk->sk_allocation = GFP_KERNEL; 1477 sk->sk_allocation = GFP_KERNEL;
1478 sk->sk_rcvbuf = sysctl_rmem_default; 1478 sk->sk_rcvbuf = sysctl_rmem_default;
1479 sk->sk_sndbuf = sysctl_wmem_default; 1479 sk->sk_sndbuf = sysctl_wmem_default;
@@ -1553,26 +1553,26 @@ void fastcall release_sock(struct sock *sk)
1553EXPORT_SYMBOL(release_sock); 1553EXPORT_SYMBOL(release_sock);
1554 1554
1555int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 1555int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1556{ 1556{
1557 if (!sock_flag(sk, SOCK_TIMESTAMP)) 1557 if (!sock_flag(sk, SOCK_TIMESTAMP))
1558 sock_enable_timestamp(sk); 1558 sock_enable_timestamp(sk);
1559 if (sk->sk_stamp.tv_sec == -1) 1559 if (sk->sk_stamp.tv_sec == -1)
1560 return -ENOENT; 1560 return -ENOENT;
1561 if (sk->sk_stamp.tv_sec == 0) 1561 if (sk->sk_stamp.tv_sec == 0)
1562 do_gettimeofday(&sk->sk_stamp); 1562 do_gettimeofday(&sk->sk_stamp);
1563 return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ? 1563 return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ?
1564 -EFAULT : 0; 1564 -EFAULT : 0;
1565} 1565}
1566EXPORT_SYMBOL(sock_get_timestamp); 1566EXPORT_SYMBOL(sock_get_timestamp);
1567 1567
1568void sock_enable_timestamp(struct sock *sk) 1568void sock_enable_timestamp(struct sock *sk)
1569{ 1569{
1570 if (!sock_flag(sk, SOCK_TIMESTAMP)) { 1570 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
1571 sock_set_flag(sk, SOCK_TIMESTAMP); 1571 sock_set_flag(sk, SOCK_TIMESTAMP);
1572 net_enable_timestamp(); 1572 net_enable_timestamp();
1573 } 1573 }
1574} 1574}
1575EXPORT_SYMBOL(sock_enable_timestamp); 1575EXPORT_SYMBOL(sock_enable_timestamp);
1576 1576
1577/* 1577/*
1578 * Get a socket option on an socket. 1578 * Get a socket option on an socket.
diff --git a/net/core/stream.c b/net/core/stream.c
index d1d7decf70b0..755bacbcb321 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -69,7 +69,7 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
69 sk->sk_write_pending++; 69 sk->sk_write_pending++;
70 done = sk_wait_event(sk, timeo_p, 70 done = sk_wait_event(sk, timeo_p,
71 !sk->sk_err && 71 !sk->sk_err &&
72 !((1 << sk->sk_state) & 72 !((1 << sk->sk_state) &
73 ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); 73 ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
74 finish_wait(sk->sk_sleep, &wait); 74 finish_wait(sk->sk_sleep, &wait);
75 sk->sk_write_pending--; 75 sk->sk_write_pending--;
@@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
139 139
140 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 140 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
141 sk->sk_write_pending++; 141 sk->sk_write_pending++;
142 sk_wait_event(sk, &current_timeo, !sk->sk_err && 142 sk_wait_event(sk, &current_timeo, !sk->sk_err &&
143 !(sk->sk_shutdown & SEND_SHUTDOWN) && 143 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
144 sk_stream_memory_free(sk) && 144 sk_stream_memory_free(sk) &&
145 vm_wait); 145 vm_wait);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 248a6b666aff..0ad1cd57bc39 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -58,7 +58,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
58 if (copy > len) 58 if (copy > len)
59 copy = len; 59 copy = len;
60 cookie = dma_memcpy_to_iovec(chan, to, pinned_list, 60 cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
61 skb->data + offset, copy); 61 skb->data + offset, copy);
62 if (cookie < 0) 62 if (cookie < 0)
63 goto fault; 63 goto fault;
64 len -= copy; 64 len -= copy;
@@ -108,8 +108,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
108 if (copy > len) 108 if (copy > len)
109 copy = len; 109 copy = len;
110 cookie = dma_skb_copy_datagram_iovec(chan, list, 110 cookie = dma_skb_copy_datagram_iovec(chan, list,
111 offset - start, to, copy, 111 offset - start, to, copy,
112 pinned_list); 112 pinned_list);
113 if (cookie < 0) 113 if (cookie < 0)
114 goto fault; 114 goto fault;
115 len -= copy; 115 len -= copy;
@@ -128,5 +128,5 @@ end:
128 } 128 }
129 129
130fault: 130fault:
131 return -EFAULT; 131 return -EFAULT;
132} 132}
diff --git a/net/core/utils.c b/net/core/utils.c
index 61556065f07e..07236c17fab9 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -33,9 +33,9 @@
33int net_msg_cost = 5*HZ; 33int net_msg_cost = 5*HZ;
34int net_msg_burst = 10; 34int net_msg_burst = 10;
35 35
36/* 36/*
37 * All net warning printk()s should be guarded by this function. 37 * All net warning printk()s should be guarded by this function.
38 */ 38 */
39int net_ratelimit(void) 39int net_ratelimit(void)
40{ 40{
41 return __printk_ratelimit(net_msg_cost, net_msg_burst); 41 return __printk_ratelimit(net_msg_cost, net_msg_burst);
diff --git a/net/core/wireless.c b/net/core/wireless.c
index f69ab7b4408e..64017d47b25b 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -369,7 +369,7 @@ static const struct iw_ioctl_description standard_event[] = {
369 .header_type = IW_HEADER_TYPE_ADDR, 369 .header_type = IW_HEADER_TYPE_ADDR,
370 }, 370 },
371 [IWEVEXPIRED - IWEVFIRST] = { 371 [IWEVEXPIRED - IWEVFIRST] = {
372 .header_type = IW_HEADER_TYPE_ADDR, 372 .header_type = IW_HEADER_TYPE_ADDR,
373 }, 373 },
374 [IWEVGENIE - IWEVFIRST] = { 374 [IWEVGENIE - IWEVFIRST] = {
375 .header_type = IW_HEADER_TYPE_POINT, 375 .header_type = IW_HEADER_TYPE_POINT,
@@ -377,7 +377,7 @@ static const struct iw_ioctl_description standard_event[] = {
377 .max_tokens = IW_GENERIC_IE_MAX, 377 .max_tokens = IW_GENERIC_IE_MAX,
378 }, 378 },
379 [IWEVMICHAELMICFAILURE - IWEVFIRST] = { 379 [IWEVMICHAELMICFAILURE - IWEVFIRST] = {
380 .header_type = IW_HEADER_TYPE_POINT, 380 .header_type = IW_HEADER_TYPE_POINT,
381 .token_size = 1, 381 .token_size = 1,
382 .max_tokens = sizeof(struct iw_michaelmicfailure), 382 .max_tokens = sizeof(struct iw_michaelmicfailure),
383 }, 383 },
@@ -630,11 +630,11 @@ static __inline__ void wireless_seq_printf_stats(struct seq_file *seq,
630 dev->name, stats->status, stats->qual.qual, 630 dev->name, stats->status, stats->qual.qual,
631 stats->qual.updated & IW_QUAL_QUAL_UPDATED 631 stats->qual.updated & IW_QUAL_QUAL_UPDATED
632 ? '.' : ' ', 632 ? '.' : ' ',
633 ((__s32) stats->qual.level) - 633 ((__s32) stats->qual.level) -
634 ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), 634 ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0),
635 stats->qual.updated & IW_QUAL_LEVEL_UPDATED 635 stats->qual.updated & IW_QUAL_LEVEL_UPDATED
636 ? '.' : ' ', 636 ? '.' : ' ',
637 ((__s32) stats->qual.noise) - 637 ((__s32) stats->qual.noise) -
638 ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), 638 ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0),
639 stats->qual.updated & IW_QUAL_NOISE_UPDATED 639 stats->qual.updated & IW_QUAL_NOISE_UPDATED
640 ? '.' : ' ', 640 ? '.' : ' ',
@@ -862,7 +862,7 @@ static int ioctl_standard_call(struct net_device * dev,
862 iwr->u.data.length * 862 iwr->u.data.length *
863 descr->token_size); 863 descr->token_size);
864 if (err) 864 if (err)
865 ret = -EFAULT; 865 ret = -EFAULT;
866#ifdef WE_IOCTL_DEBUG 866#ifdef WE_IOCTL_DEBUG
867 printk(KERN_DEBUG "%s (WE) : Wrote %d bytes\n", 867 printk(KERN_DEBUG "%s (WE) : Wrote %d bytes\n",
868 dev->name, 868 dev->name,
@@ -1040,7 +1040,7 @@ static inline int ioctl_private_call(struct net_device * dev,
1040 err = copy_to_user(iwr->u.data.pointer, extra, 1040 err = copy_to_user(iwr->u.data.pointer, extra,
1041 extra_size); 1041 extra_size);
1042 if (err) 1042 if (err)
1043 ret = -EFAULT; 1043 ret = -EFAULT;
1044#ifdef WE_IOCTL_DEBUG 1044#ifdef WE_IOCTL_DEBUG
1045 printk(KERN_DEBUG "%s (WE) : Wrote %d elem\n", 1045 printk(KERN_DEBUG "%s (WE) : Wrote %d elem\n",
1046 dev->name, iwr->u.data.length); 1046 dev->name, iwr->u.data.length);
@@ -1080,7 +1080,7 @@ int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd)
1080 /* A bunch of special cases, then the generic case... 1080 /* A bunch of special cases, then the generic case...
1081 * Note that 'cmd' is already filtered in dev_ioctl() with 1081 * Note that 'cmd' is already filtered in dev_ioctl() with
1082 * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ 1082 * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */
1083 switch(cmd) 1083 switch(cmd)
1084 { 1084 {
1085 case SIOCGIWSTATS: 1085 case SIOCGIWSTATS:
1086 /* Get Wireless Stats */ 1086 /* Get Wireless Stats */
@@ -2015,7 +2015,7 @@ void wireless_send_event(struct net_device * dev,
2015 * The best the driver could do is to log an error message. 2015 * The best the driver could do is to log an error message.
2016 * We will do it ourselves instead... 2016 * We will do it ourselves instead...
2017 */ 2017 */
2018 printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n", 2018 printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n",
2019 dev->name, cmd); 2019 dev->name, cmd);
2020 return; 2020 return;
2021 } 2021 }
@@ -2029,11 +2029,11 @@ void wireless_send_event(struct net_device * dev,
2029 if(descr->header_type == IW_HEADER_TYPE_POINT) { 2029 if(descr->header_type == IW_HEADER_TYPE_POINT) {
2030 /* Check if number of token fits within bounds */ 2030 /* Check if number of token fits within bounds */
2031 if(wrqu->data.length > descr->max_tokens) { 2031 if(wrqu->data.length > descr->max_tokens) {
2032 printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length); 2032 printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length);
2033 return; 2033 return;
2034 } 2034 }
2035 if(wrqu->data.length < descr->min_tokens) { 2035 if(wrqu->data.length < descr->min_tokens) {
2036 printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length); 2036 printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length);
2037 return; 2037 return;
2038 } 2038 }
2039 /* Calculate extra_len - extra is NULL for restricted events */ 2039 /* Calculate extra_len - extra is NULL for restricted events */
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index fd38b05d6f79..248d20f4c7c4 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -139,7 +139,7 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
139 return 0; 139 return 0;
140 } 140 }
141 141
142 hctx = ccid2_hc_tx_sk(sk); 142 hctx = ccid2_hc_tx_sk(sk);
143 143
144 ccid2_pr_debug("pipe=%d cwnd=%d\n", hctx->ccid2hctx_pipe, 144 ccid2_pr_debug("pipe=%d cwnd=%d\n", hctx->ccid2hctx_pipe,
145 hctx->ccid2hctx_cwnd); 145 hctx->ccid2hctx_cwnd);
@@ -368,13 +368,13 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
368static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset, 368static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
369 unsigned char **vec, unsigned char *veclen) 369 unsigned char **vec, unsigned char *veclen)
370{ 370{
371 const struct dccp_hdr *dh = dccp_hdr(skb); 371 const struct dccp_hdr *dh = dccp_hdr(skb);
372 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); 372 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
373 unsigned char *opt_ptr; 373 unsigned char *opt_ptr;
374 const unsigned char *opt_end = (unsigned char *)dh + 374 const unsigned char *opt_end = (unsigned char *)dh +
375 (dh->dccph_doff * 4); 375 (dh->dccph_doff * 4);
376 unsigned char opt, len; 376 unsigned char opt, len;
377 unsigned char *value; 377 unsigned char *value;
378 378
379 BUG_ON(offset < 0); 379 BUG_ON(offset < 0);
380 options += offset; 380 options += offset;
@@ -383,29 +383,29 @@ static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
383 return -1; 383 return -1;
384 384
385 while (opt_ptr != opt_end) { 385 while (opt_ptr != opt_end) {
386 opt = *opt_ptr++; 386 opt = *opt_ptr++;
387 len = 0; 387 len = 0;
388 value = NULL; 388 value = NULL;
389 389
390 /* Check if this isn't a single byte option */ 390 /* Check if this isn't a single byte option */
391 if (opt > DCCPO_MAX_RESERVED) { 391 if (opt > DCCPO_MAX_RESERVED) {
392 if (opt_ptr == opt_end) 392 if (opt_ptr == opt_end)
393 goto out_invalid_option; 393 goto out_invalid_option;
394 394
395 len = *opt_ptr++; 395 len = *opt_ptr++;
396 if (len < 3) 396 if (len < 3)
397 goto out_invalid_option; 397 goto out_invalid_option;
398 /* 398 /*
399 * Remove the type and len fields, leaving 399 * Remove the type and len fields, leaving
400 * just the value size 400 * just the value size
401 */ 401 */
402 len -= 2; 402 len -= 2;
403 value = opt_ptr; 403 value = opt_ptr;
404 opt_ptr += len; 404 opt_ptr += len;
405 405
406 if (opt_ptr > opt_end) 406 if (opt_ptr > opt_end)
407 goto out_invalid_option; 407 goto out_invalid_option;
408 } 408 }
409 409
410 switch (opt) { 410 switch (opt) {
411 case DCCPO_ACK_VECTOR_0: 411 case DCCPO_ACK_VECTOR_0:
@@ -432,7 +432,7 @@ static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
432} 432}
433 433
434static inline void ccid2_new_ack(struct sock *sk, 434static inline void ccid2_new_ack(struct sock *sk,
435 struct ccid2_seq *seqp, 435 struct ccid2_seq *seqp,
436 unsigned int *maxincr) 436 unsigned int *maxincr)
437{ 437{
438 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 438 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
@@ -759,7 +759,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
759 759
760static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 760static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
761{ 761{
762 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid); 762 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid);
763 763
764 ccid2_change_cwnd(hctx, 1); 764 ccid2_change_cwnd(hctx, 1);
765 /* Initialize ssthresh to infinity. This means that we will exit the 765 /* Initialize ssthresh to infinity. This means that we will exit the
@@ -793,7 +793,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
793 793
794static void ccid2_hc_tx_exit(struct sock *sk) 794static void ccid2_hc_tx_exit(struct sock *sk)
795{ 795{
796 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); 796 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
797 int i; 797 int i;
798 798
799 ccid2_hc_tx_kill_rto_timer(sk); 799 ccid2_hc_tx_kill_rto_timer(sk);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 5c452a3ec4d1..5361a4d8e13b 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -284,7 +284,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
284 284
285restart_timer: 285restart_timer:
286 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 286 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
287 jiffies + usecs_to_jiffies(t_nfb)); 287 jiffies + usecs_to_jiffies(t_nfb));
288out: 288out:
289 bh_unlock_sock(sk); 289 bh_unlock_sock(sk);
290 sock_put(sk); 290 sock_put(sk);
@@ -319,7 +319,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
319 case TFRC_SSTATE_NO_SENT: 319 case TFRC_SSTATE_NO_SENT:
320 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 320 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
321 (jiffies + 321 (jiffies +
322 usecs_to_jiffies(TFRC_INITIAL_TIMEOUT))); 322 usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
323 hctx->ccid3hctx_last_win_count = 0; 323 hctx->ccid3hctx_last_win_count = 0;
324 hctx->ccid3hctx_t_last_win_count = now; 324 hctx->ccid3hctx_t_last_win_count = now;
325 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); 325 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
@@ -487,7 +487,7 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
487 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); 487 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
488 } else { 488 } else {
489 hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt + 489 hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt +
490 (u32)r_sample) / 10; 490 (u32)r_sample) / 10;
491 491
492 /* Update sending rate (step 4 of [RFC 3448, 4.3]) */ 492 /* Update sending rate (step 4 of [RFC 3448, 4.3]) */
493 if (hctx->ccid3hctx_p > 0) 493 if (hctx->ccid3hctx_p > 0)
@@ -924,7 +924,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
924} 924}
925 925
926static int ccid3_hc_rx_detect_loss(struct sock *sk, 926static int ccid3_hc_rx_detect_loss(struct sock *sk,
927 struct dccp_rx_hist_entry *packet) 927 struct dccp_rx_hist_entry *packet)
928{ 928{
929 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 929 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
930 struct dccp_rx_hist_entry *rx_hist = 930 struct dccp_rx_hist_entry *rx_hist =
@@ -1074,7 +1074,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1074 dccp_role(sk), sk, dccp_state_name(sk->sk_state)); 1074 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1075 1075
1076 p_prev = hcrx->ccid3hcrx_p; 1076 p_prev = hcrx->ccid3hcrx_p;
1077 1077
1078 /* Calculate loss event rate */ 1078 /* Calculate loss event rate */
1079 if (!list_empty(&hcrx->ccid3hcrx_li_hist)) { 1079 if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
1080 u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist); 1080 u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
@@ -1156,7 +1156,7 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
1156{ 1156{
1157 const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 1157 const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
1158 const void *val; 1158 const void *val;
1159 1159
1160 /* Listen socks doesn't have a private CCID block */ 1160 /* Listen socks doesn't have a private CCID block */
1161 if (sk->sk_state == DCCP_LISTEN) 1161 if (sk->sk_state == DCCP_LISTEN)
1162 return -EINVAL; 1162 return -EINVAL;
@@ -1183,7 +1183,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
1183{ 1183{
1184 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 1184 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
1185 const void *val; 1185 const void *val;
1186 1186
1187 /* Listen socks doesn't have a private CCID block */ 1187 /* Listen socks doesn't have a private CCID block */
1188 if (sk->sk_state == DCCP_LISTEN) 1188 if (sk->sk_state == DCCP_LISTEN)
1189 return -EINVAL; 1189 return -EINVAL;
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 1f960c19ea1b..60d00f015390 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -137,7 +137,7 @@ extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
137static inline struct dccp_rx_hist_entry * 137static inline struct dccp_rx_hist_entry *
138 dccp_rx_hist_entry_new(struct dccp_rx_hist *hist, 138 dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
139 const struct sock *sk, 139 const struct sock *sk,
140 const u32 ndp, 140 const u32 ndp,
141 const struct sk_buff *skb, 141 const struct sk_buff *skb,
142 const gfp_t prio) 142 const gfp_t prio)
143{ 143{
@@ -169,7 +169,7 @@ static inline struct dccp_rx_hist_entry *
169} 169}
170 170
171extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq, 171extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
172 u8 *ccval); 172 u8 *ccval);
173extern struct dccp_rx_hist_entry * 173extern struct dccp_rx_hist_entry *
174 dccp_rx_hist_find_data_packet(const struct list_head *list); 174 dccp_rx_hist_find_data_packet(const struct list_head *list);
175 175
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 90009fd77e15..e4e64b76c10c 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -26,7 +26,7 @@
26 The following two-column lookup table implements a part of the TCP throughput 26 The following two-column lookup table implements a part of the TCP throughput
27 equation from [RFC 3448, sec. 3.1]: 27 equation from [RFC 3448, sec. 3.1]:
28 28
29 s 29 s
30 X_calc = -------------------------------------------------------------- 30 X_calc = --------------------------------------------------------------
31 R * sqrt(2*b*p/3) + (3 * t_RTO * sqrt(3*b*p/8) * (p + 32*p^3)) 31 R * sqrt(2*b*p/3) + (3 * t_RTO * sqrt(3*b*p/8) * (p + 32*p^3))
32 32
@@ -35,7 +35,7 @@
35 s is the packet size in bytes 35 s is the packet size in bytes
36 R is the round trip time in seconds 36 R is the round trip time in seconds
37 p is the loss event rate, between 0 and 1.0, of the number of loss 37 p is the loss event rate, between 0 and 1.0, of the number of loss
38 events as a fraction of the number of packets transmitted 38 events as a fraction of the number of packets transmitted
39 t_RTO is the TCP retransmission timeout value in seconds 39 t_RTO is the TCP retransmission timeout value in seconds
40 b is the number of packets acknowledged by a single TCP ACK 40 b is the number of packets acknowledged by a single TCP ACK
41 41
@@ -47,9 +47,9 @@
47 47
48 which we can break down into: 48 which we can break down into:
49 49
50 s 50 s
51 X_calc = --------- 51 X_calc = ---------
52 R * f(p) 52 R * f(p)
53 53
54 where f(p) is given for 0 < p <= 1 by: 54 where f(p) is given for 0 < p <= 1 by:
55 55
@@ -62,7 +62,7 @@
62 * the return result f(p) 62 * the return result f(p)
63 The lookup table therefore actually tabulates the following function g(q): 63 The lookup table therefore actually tabulates the following function g(q):
64 64
65 g(q) = 1000000 * f(q/1000000) 65 g(q) = 1000000 * f(q/1000000)
66 66
67 Hence, when p <= 1, q must be less than or equal to 1000000. To achieve finer 67 Hence, when p <= 1, q must be less than or equal to 1000000. To achieve finer
68 granularity for the practically more relevant case of small values of p (up to 68 granularity for the practically more relevant case of small values of p (up to
@@ -628,7 +628,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
628 if (R == 0) { /* possible divide by zero */ 628 if (R == 0) { /* possible divide by zero */
629 DCCP_CRIT("WARNING: RTT is 0, returning maximum X_calc."); 629 DCCP_CRIT("WARNING: RTT is 0, returning maximum X_calc.");
630 return ~0U; 630 return ~0U;
631 } 631 }
632 632
633 if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */ 633 if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */
634 if (p < TFRC_SMALLEST_P) { /* 0.0000 < p < 0.0001 */ 634 if (p < TFRC_SMALLEST_P) { /* 0.0000 < p < 0.0001 */
@@ -638,7 +638,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
638 } else /* 0.0001 <= p <= 0.05 */ 638 } else /* 0.0001 <= p <= 0.05 */
639 index = p/TFRC_SMALLEST_P - 1; 639 index = p/TFRC_SMALLEST_P - 1;
640 640
641 f = tfrc_calc_x_lookup[index][1]; 641 f = tfrc_calc_x_lookup[index][1];
642 642
643 } else { /* 0.05 < p <= 1.00 */ 643 } else { /* 0.05 < p <= 1.00 */
644 index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1; 644 index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1;
@@ -687,8 +687,8 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
687 if (fvalue <= tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][1]) { 687 if (fvalue <= tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][1]) {
688 index = tfrc_binsearch(fvalue, 1); 688 index = tfrc_binsearch(fvalue, 1);
689 return (index + 1) * TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE; 689 return (index + 1) * TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE;
690 } 690 }
691 691
692 /* else ... it must be in the coarse-grained column */ 692 /* else ... it must be in the coarse-grained column */
693 index = tfrc_binsearch(fvalue, 0); 693 index = tfrc_binsearch(fvalue, 0);
694 return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; 694 return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a0900bf98e6b..e33a9edb4036 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -22,7 +22,7 @@
22 * DCCP - specific warning and debugging macros. 22 * DCCP - specific warning and debugging macros.
23 */ 23 */
24#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \ 24#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \
25 __FUNCTION__, ##a) 25 __FUNCTION__, ##a)
26#define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ 26#define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \
27 __FILE__, __LINE__, __FUNCTION__) 27 __FILE__, __LINE__, __FUNCTION__)
28#define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) 28#define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0)
@@ -34,7 +34,7 @@
34#ifdef MODULE 34#ifdef MODULE
35#define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \ 35#define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \
36 printk(fmt, ##args); \ 36 printk(fmt, ##args); \
37 } while(0) 37 } while(0)
38#else 38#else
39#define DCCP_PRINTK(enable, fmt, args...) printk(fmt, ##args) 39#define DCCP_PRINTK(enable, fmt, args...) printk(fmt, ##args)
40#endif 40#endif
@@ -128,7 +128,7 @@ enum {
128 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ 128 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
129 DCCP_MIB_ESTABRESETS, /* EstabResets */ 129 DCCP_MIB_ESTABRESETS, /* EstabResets */
130 DCCP_MIB_CURRESTAB, /* CurrEstab */ 130 DCCP_MIB_CURRESTAB, /* CurrEstab */
131 DCCP_MIB_OUTSEGS, /* OutSegs */ 131 DCCP_MIB_OUTSEGS, /* OutSegs */
132 DCCP_MIB_OUTRSTS, 132 DCCP_MIB_OUTRSTS,
133 DCCP_MIB_ABORTONTIMEOUT, 133 DCCP_MIB_ABORTONTIMEOUT,
134 DCCP_MIB_TIMEOUTS, 134 DCCP_MIB_TIMEOUTS,
@@ -372,7 +372,7 @@ static inline void dccp_update_gss(struct sock *sk, u64 seq)
372 (dp->dccps_gss - 372 (dp->dccps_gss -
373 dccp_msk(sk)->dccpms_sequence_window + 1)); 373 dccp_msk(sk)->dccpms_sequence_window + 1));
374} 374}
375 375
376static inline int dccp_ack_pending(const struct sock *sk) 376static inline int dccp_ack_pending(const struct sock *sk)
377{ 377{
378 const struct dccp_sock *dp = dccp_sk(sk); 378 const struct dccp_sock *dp = dccp_sk(sk);
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 95b6927ec653..cd845df5320d 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -480,8 +480,8 @@ void dccp_feat_clean(struct dccp_minisock *dmsk)
480 480
481 list_for_each_entry_safe(opt, next, &dmsk->dccpms_pending, 481 list_for_each_entry_safe(opt, next, &dmsk->dccpms_pending,
482 dccpop_node) { 482 dccpop_node) {
483 BUG_ON(opt->dccpop_val == NULL); 483 BUG_ON(opt->dccpop_val == NULL);
484 kfree(opt->dccpop_val); 484 kfree(opt->dccpop_val);
485 485
486 if (opt->dccpop_sc != NULL) { 486 if (opt->dccpop_sc != NULL) {
487 BUG_ON(opt->dccpop_sc->dccpoc_val == NULL); 487 BUG_ON(opt->dccpop_sc->dccpoc_val == NULL);
@@ -489,8 +489,8 @@ void dccp_feat_clean(struct dccp_minisock *dmsk)
489 kfree(opt->dccpop_sc); 489 kfree(opt->dccpop_sc);
490 } 490 }
491 491
492 kfree(opt); 492 kfree(opt);
493 } 493 }
494 INIT_LIST_HEAD(&dmsk->dccpms_pending); 494 INIT_LIST_HEAD(&dmsk->dccpms_pending);
495 495
496 list_for_each_entry_safe(opt, next, &dmsk->dccpms_conf, dccpop_node) { 496 list_for_each_entry_safe(opt, next, &dmsk->dccpms_conf, dccpop_node) {
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index 2c373ad7edcf..177f7dee4d10 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -30,7 +30,7 @@ static inline int dccp_feat_is_reserved(const u8 feat)
30{ 30{
31 return (feat > DCCPF_DATA_CHECKSUM && 31 return (feat > DCCPF_DATA_CHECKSUM &&
32 feat < DCCPF_MIN_CCID_SPECIFIC) || 32 feat < DCCPF_MIN_CCID_SPECIFIC) ||
33 feat == DCCPF_RESERVED; 33 feat == DCCPF_RESERVED;
34} 34}
35 35
36/* feature negotiation knows only these four option types (RFC 4340, sec. 6) */ 36/* feature negotiation knows only these four option types (RFC 4340, sec. 6) */
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 565bc80557ce..4dee462f00db 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -91,7 +91,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
91 else 91 else
92 return -1; 92 return -1;
93 } 93 }
94 94
95 /* 95 /*
96 * Step 6: Check sequence numbers 96 * Step 6: Check sequence numbers
97 * Let LSWL = S.SWL and LAWL = S.AWL 97 * Let LSWL = S.SWL and LAWL = S.AWL
@@ -136,7 +136,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
136 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq, 136 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq,
137 (unsigned long long) dp->dccps_swh, 137 (unsigned long long) dp->dccps_swh,
138 (DCCP_SKB_CB(skb)->dccpd_ack_seq == 138 (DCCP_SKB_CB(skb)->dccpd_ack_seq ==
139 DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists", 139 DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
140 (unsigned long long) lawl, 140 (unsigned long long) lawl,
141 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq, 141 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq,
142 (unsigned long long) dp->dccps_awh); 142 (unsigned long long) dp->dccps_awh);
@@ -308,11 +308,11 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
308 if (dccp_parse_options(sk, skb)) 308 if (dccp_parse_options(sk, skb))
309 goto out_invalid_packet; 309 goto out_invalid_packet;
310 310
311 if (dccp_msk(sk)->dccpms_send_ack_vector && 311 if (dccp_msk(sk)->dccpms_send_ack_vector &&
312 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, 312 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
313 DCCP_SKB_CB(skb)->dccpd_seq, 313 DCCP_SKB_CB(skb)->dccpd_seq,
314 DCCP_ACKVEC_STATE_RECEIVED)) 314 DCCP_ACKVEC_STATE_RECEIVED))
315 goto out_invalid_packet; /* FIXME: change error code */ 315 goto out_invalid_packet; /* FIXME: change error code */
316 316
317 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq; 317 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
318 dccp_update_gsr(sk, dp->dccps_isr); 318 dccp_update_gsr(sk, dp->dccps_isr);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index fa2c982d4309..4a83978aa660 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -106,7 +106,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
106 goto failure; 106 goto failure;
107 107
108 err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, 108 err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport,
109 sk); 109 sk);
110 if (err != 0) 110 if (err != 0)
111 goto failure; 111 goto failure;
112 112
@@ -157,7 +157,7 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
157 /* We don't check in the destentry if pmtu discovery is forbidden 157 /* We don't check in the destentry if pmtu discovery is forbidden
158 * on this route. We just assume that no packet_to_big packets 158 * on this route. We just assume that no packet_to_big packets
159 * are send back when pmtu discovery is not active. 159 * are send back when pmtu discovery is not active.
160 * There is a small race when the user changes this flag in the 160 * There is a small race when the user changes this flag in the
161 * route, but I think that's acceptable. 161 * route, but I think that's acceptable.
162 */ 162 */
163 if ((dst = __sk_dst_check(sk, 0)) == NULL) 163 if ((dst = __sk_dst_check(sk, 0)) == NULL)
@@ -486,7 +486,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
486 struct sk_buff *skb; 486 struct sk_buff *skb;
487 487
488 /* First, grab a route. */ 488 /* First, grab a route. */
489 489
490 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) 490 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
491 goto out; 491 goto out;
492 492
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 79140b3e592e..7f51e8db3967 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -734,7 +734,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
734 looks not very well thought. For now we latch 734 looks not very well thought. For now we latch
735 options, received in the last packet, enqueued 735 options, received in the last packet, enqueued
736 by tcp. Feel free to propose better solution. 736 by tcp. Feel free to propose better solution.
737 --ANK (980728) 737 --ANK (980728)
738 */ 738 */
739 if (np->rxopt.all) 739 if (np->rxopt.all)
740 /* 740 /*
diff --git a/net/dccp/options.c b/net/dccp/options.c
index c03ba61eb6da..ca13f7731994 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -242,7 +242,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
242 DCCP_CRIT("DCCP(%p): option %d(len=%d) not " 242 DCCP_CRIT("DCCP(%p): option %d(len=%d) not "
243 "implemented, ignoring", sk, opt, len); 243 "implemented, ignoring", sk, opt, len);
244 break; 244 break;
245 } 245 }
246 246
247 if (opt != DCCPO_MANDATORY) 247 if (opt != DCCPO_MANDATORY)
248 mandatory = 0; 248 mandatory = 0;
@@ -442,7 +442,7 @@ static int dccp_insert_option_timestamp_echo(struct sock *sk,
442} 442}
443 443
444static int dccp_insert_feat_opt(struct sk_buff *skb, u8 type, u8 feat, 444static int dccp_insert_feat_opt(struct sk_buff *skb, u8 type, u8 feat,
445 u8 *val, u8 len) 445 u8 *val, u8 len)
446{ 446{
447 u8 *to; 447 u8 *to;
448 448
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 3435542e9652..f5c6aca1dfa4 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -87,7 +87,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
87 kfree_skb(skb); 87 kfree_skb(skb);
88 return -EPROTO; 88 return -EPROTO;
89 } 89 }
90 90
91 91
92 /* Build DCCP header and checksum it. */ 92 /* Build DCCP header and checksum it. */
93 dh = dccp_zeroed_hdr(skb, dccp_header_size); 93 dh = dccp_zeroed_hdr(skb, dccp_header_size);
@@ -415,7 +415,7 @@ static inline void dccp_connect_init(struct sock *sk)
415 415
416 sk->sk_err = 0; 416 sk->sk_err = 0;
417 sock_reset_flag(sk, SOCK_DONE); 417 sock_reset_flag(sk, SOCK_DONE);
418 418
419 dccp_sync_mss(sk, dst_mtu(dst)); 419 dccp_sync_mss(sk, dst_mtu(dst));
420 420
421 /* 421 /*
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 48438565d70f..cf28c53a389a 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -478,7 +478,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
478 err = -EINVAL; 478 err = -EINVAL;
479 else 479 else
480 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L, 480 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
481 (struct dccp_so_feat __user *) 481 (struct dccp_so_feat __user *)
482 optval); 482 optval);
483 break; 483 break;
484 case DCCP_SOCKOPT_CHANGE_R: 484 case DCCP_SOCKOPT_CHANGE_R:
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index e5348f369c60..41ea0f6594c4 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -60,7 +60,7 @@ static int dccp_write_timeout(struct sock *sk)
60 be far nicer to have all of the black holes fixed rather than fixing 60 be far nicer to have all of the black holes fixed rather than fixing
61 all of the TCP implementations." 61 all of the TCP implementations."
62 62
63 Golden words :-). 63 Golden words :-).
64 */ 64 */
65 65
66 dst_negative_advice(&sk->sk_dst_cache); 66 dst_negative_advice(&sk->sk_dst_cache);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 21f20f21dd32..70061641ee59 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -45,7 +45,7 @@
45 45
46/****************************************************************************** 46/******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
48 48
49 This program is free software; you can redistribute it and/or modify 49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by 50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or 51 the Free Software Foundation; either version 2 of the License, or
@@ -63,7 +63,7 @@ Version Kernel Date Author/Comments
63Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat 63Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com) 64 (emserrat@geocities.com)
65 65
66 First Development of DECnet Socket La- 66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing 67 yer for Linux. Only supports outgoing
68 connections. 68 connections.
69 69
@@ -75,28 +75,28 @@ Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
75Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat 75Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com) 76 (emserrat@geocities.com)
77 _ 77 _
78 Added support for incoming connections 78 Added support for incoming connections
79 so we can start developing server apps 79 so we can start developing server apps
80 on Linux. 80 on Linux.
81 - 81 -
82 Module Support 82 Module Support
83Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat 83Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com) 84 (emserrat@geocities.com)
85 _ 85 _
86 Added support for X11R6.4. Now we can 86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!! 87 use DECnet transport for X on Linux!!!
88 - 88 -
89Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat 89Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com) 90 (emserrat@geocities.com)
91 Removed bugs on flow control 91 Removed bugs on flow control
92 Removed bugs on incoming accessdata 92 Removed bugs on incoming accessdata
93 order 93 order
94 - 94 -
95Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat 95Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
96 dn_recvmsg fixes 96 dn_recvmsg fixes
97 97
98 Patrick J. Caulfield 98 Patrick J. Caulfield
99 dn_bind fixes 99 dn_bind fixes
100*******************************************************************************/ 100*******************************************************************************/
101 101
102#include <linux/module.h> 102#include <linux/module.h>
@@ -169,7 +169,7 @@ static struct hlist_head *dn_find_list(struct sock *sk)
169 return &dn_sk_hash[dn_ntohs(scp->addrloc) & DN_SK_HASH_MASK]; 169 return &dn_sk_hash[dn_ntohs(scp->addrloc) & DN_SK_HASH_MASK];
170} 170}
171 171
172/* 172/*
173 * Valid ports are those greater than zero and not already in use. 173 * Valid ports are those greater than zero and not already in use.
174 */ 174 */
175static int check_port(__le16 port) 175static int check_port(__le16 port)
@@ -218,7 +218,7 @@ static int dn_hash_sock(struct sock *sk)
218 BUG_ON(sk_hashed(sk)); 218 BUG_ON(sk_hashed(sk));
219 219
220 write_lock_bh(&dn_hash_lock); 220 write_lock_bh(&dn_hash_lock);
221 221
222 if (!scp->addrloc && !port_alloc(sk)) 222 if (!scp->addrloc && !port_alloc(sk))
223 goto out; 223 goto out;
224 224
@@ -400,7 +400,7 @@ struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
400 400
401 sk = sk_head(&dn_wild_sk); 401 sk = sk_head(&dn_wild_sk);
402 if (sk) { 402 if (sk) {
403 if (sk->sk_state == TCP_LISTEN) 403 if (sk->sk_state == TCP_LISTEN)
404 sock_hold(sk); 404 sock_hold(sk);
405 else 405 else
406 sk = NULL; 406 sk = NULL;
@@ -500,7 +500,7 @@ static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp)
500 scp->ackxmt_oth = 0; /* Last oth data ack'ed */ 500 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
501 scp->ackrcv_dat = 0; /* Highest data ack recv*/ 501 scp->ackrcv_dat = 0; /* Highest data ack recv*/
502 scp->ackrcv_oth = 0; /* Last oth data ack rec*/ 502 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
503 scp->flowrem_sw = DN_SEND; 503 scp->flowrem_sw = DN_SEND;
504 scp->flowloc_sw = DN_SEND; 504 scp->flowloc_sw = DN_SEND;
505 scp->flowrem_dat = 0; 505 scp->flowrem_dat = 0;
506 scp->flowrem_oth = 1; 506 scp->flowrem_oth = 1;
@@ -690,7 +690,7 @@ static int dn_create(struct socket *sock, int protocol)
690 } 690 }
691 691
692 692
693 if ((sk = dn_alloc_sock(sock, GFP_KERNEL)) == NULL) 693 if ((sk = dn_alloc_sock(sock, GFP_KERNEL)) == NULL)
694 return -ENOBUFS; 694 return -ENOBUFS;
695 695
696 sk->sk_protocol = protocol; 696 sk->sk_protocol = protocol;
@@ -713,7 +713,7 @@ dn_release(struct socket *sock)
713 sock_put(sk); 713 sock_put(sk);
714 } 714 }
715 715
716 return 0; 716 return 0;
717} 717}
718 718
719static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 719static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
@@ -770,7 +770,7 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
770 } 770 }
771 release_sock(sk); 771 release_sock(sk);
772 772
773 return rv; 773 return rv;
774} 774}
775 775
776 776
@@ -791,7 +791,7 @@ static int dn_auto_bind(struct socket *sock)
791 */ 791 */
792 if ((scp->accessdata.acc_accl != 0) && 792 if ((scp->accessdata.acc_accl != 0) &&
793 (scp->accessdata.acc_accl <= 12)) { 793 (scp->accessdata.acc_accl <= 12)) {
794 794
795 scp->addr.sdn_objnamel = dn_htons(scp->accessdata.acc_accl); 795 scp->addr.sdn_objnamel = dn_htons(scp->accessdata.acc_accl);
796 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, dn_ntohs(scp->addr.sdn_objnamel)); 796 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, dn_ntohs(scp->addr.sdn_objnamel));
797 797
@@ -997,20 +997,20 @@ static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int
997 997
998static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc) 998static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
999{ 999{
1000 unsigned char *ptr = skb->data; 1000 unsigned char *ptr = skb->data;
1001 1001
1002 acc->acc_userl = *ptr++; 1002 acc->acc_userl = *ptr++;
1003 memcpy(&acc->acc_user, ptr, acc->acc_userl); 1003 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1004 ptr += acc->acc_userl; 1004 ptr += acc->acc_userl;
1005 1005
1006 acc->acc_passl = *ptr++; 1006 acc->acc_passl = *ptr++;
1007 memcpy(&acc->acc_pass, ptr, acc->acc_passl); 1007 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1008 ptr += acc->acc_passl; 1008 ptr += acc->acc_passl;
1009 1009
1010 acc->acc_accl = *ptr++; 1010 acc->acc_accl = *ptr++;
1011 memcpy(&acc->acc_acc, ptr, acc->acc_accl); 1011 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1012 1012
1013 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3); 1013 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1014 1014
1015} 1015}
1016 1016
@@ -1071,7 +1071,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1071 1071
1072 lock_sock(sk); 1072 lock_sock(sk);
1073 1073
1074 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) { 1074 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1075 release_sock(sk); 1075 release_sock(sk);
1076 return -EINVAL; 1076 return -EINVAL;
1077 } 1077 }
@@ -1098,13 +1098,13 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1098 dst_release(xchg(&newsk->sk_dst_cache, skb->dst)); 1098 dst_release(xchg(&newsk->sk_dst_cache, skb->dst));
1099 skb->dst = NULL; 1099 skb->dst = NULL;
1100 1100
1101 DN_SK(newsk)->state = DN_CR; 1101 DN_SK(newsk)->state = DN_CR;
1102 DN_SK(newsk)->addrrem = cb->src_port; 1102 DN_SK(newsk)->addrrem = cb->src_port;
1103 DN_SK(newsk)->services_rem = cb->services; 1103 DN_SK(newsk)->services_rem = cb->services;
1104 DN_SK(newsk)->info_rem = cb->info; 1104 DN_SK(newsk)->info_rem = cb->info;
1105 DN_SK(newsk)->segsize_rem = cb->segsize; 1105 DN_SK(newsk)->segsize_rem = cb->segsize;
1106 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode; 1106 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1107 1107
1108 if (DN_SK(newsk)->segsize_rem < 230) 1108 if (DN_SK(newsk)->segsize_rem < 230)
1109 DN_SK(newsk)->segsize_rem = 230; 1109 DN_SK(newsk)->segsize_rem = 230;
1110 1110
@@ -1154,15 +1154,15 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1154 dn_send_conn_ack(newsk); 1154 dn_send_conn_ack(newsk);
1155 1155
1156 /* 1156 /*
1157 * Here we use sk->sk_allocation since although the conn conf is 1157 * Here we use sk->sk_allocation since although the conn conf is
1158 * for the newsk, the context is the old socket. 1158 * for the newsk, the context is the old socket.
1159 */ 1159 */
1160 if (DN_SK(newsk)->accept_mode == ACC_IMMED) 1160 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1161 err = dn_confirm_accept(newsk, &timeo, 1161 err = dn_confirm_accept(newsk, &timeo,
1162 sk->sk_allocation); 1162 sk->sk_allocation);
1163 } 1163 }
1164 release_sock(newsk); 1164 release_sock(newsk);
1165 return err; 1165 return err;
1166} 1166}
1167 1167
1168 1168
@@ -1177,10 +1177,10 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len
1177 lock_sock(sk); 1177 lock_sock(sk);
1178 1178
1179 if (peer) { 1179 if (peer) {
1180 if ((sock->state != SS_CONNECTED && 1180 if ((sock->state != SS_CONNECTED &&
1181 sock->state != SS_CONNECTING) && 1181 sock->state != SS_CONNECTING) &&
1182 scp->accept_mode == ACC_IMMED) { 1182 scp->accept_mode == ACC_IMMED) {
1183 release_sock(sk); 1183 release_sock(sk);
1184 return -ENOTCONN; 1184 return -ENOTCONN;
1185 } 1185 }
1186 1186
@@ -1191,7 +1191,7 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len
1191 1191
1192 release_sock(sk); 1192 release_sock(sk);
1193 1193
1194 return 0; 1194 return 0;
1195} 1195}
1196 1196
1197 1197
@@ -1285,7 +1285,7 @@ static int dn_listen(struct socket *sock, int backlog)
1285out: 1285out:
1286 release_sock(sk); 1286 release_sock(sk);
1287 1287
1288 return err; 1288 return err;
1289} 1289}
1290 1290
1291 1291
@@ -1333,7 +1333,7 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
1333 return err; 1333 return err;
1334} 1334}
1335 1335
1336static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags) 1336static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags)
1337{ 1337{
1338 struct sock *sk = sock->sk; 1338 struct sock *sk = sock->sk;
1339 struct dn_scp *scp = DN_SK(sk); 1339 struct dn_scp *scp = DN_SK(sk);
@@ -1360,7 +1360,7 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1360 1360
1361 switch(optname) { 1361 switch(optname) {
1362 case DSO_CONDATA: 1362 case DSO_CONDATA:
1363 if (sock->state == SS_CONNECTED) 1363 if (sock->state == SS_CONNECTED)
1364 return -EISCONN; 1364 return -EISCONN;
1365 if ((scp->state != DN_O) && (scp->state != DN_CR)) 1365 if ((scp->state != DN_O) && (scp->state != DN_CR))
1366 return -EINVAL; 1366 return -EINVAL;
@@ -1375,7 +1375,7 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1375 break; 1375 break;
1376 1376
1377 case DSO_DISDATA: 1377 case DSO_DISDATA:
1378 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED) 1378 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
1379 return -ENOTCONN; 1379 return -ENOTCONN;
1380 1380
1381 if (optlen != sizeof(struct optdata_dn)) 1381 if (optlen != sizeof(struct optdata_dn))
@@ -1388,7 +1388,7 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1388 break; 1388 break;
1389 1389
1390 case DSO_CONACCESS: 1390 case DSO_CONACCESS:
1391 if (sock->state == SS_CONNECTED) 1391 if (sock->state == SS_CONNECTED)
1392 return -EISCONN; 1392 return -EISCONN;
1393 if (scp->state != DN_O) 1393 if (scp->state != DN_O)
1394 return -EINVAL; 1394 return -EINVAL;
@@ -1521,7 +1521,7 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1521 1521
1522 if(get_user(r_len , optlen)) 1522 if(get_user(r_len , optlen))
1523 return -EFAULT; 1523 return -EFAULT;
1524 1524
1525 switch(optname) { 1525 switch(optname) {
1526 case DSO_CONDATA: 1526 case DSO_CONDATA:
1527 if (r_len > sizeof(struct optdata_dn)) 1527 if (r_len > sizeof(struct optdata_dn))
@@ -1573,11 +1573,11 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1573#ifdef CONFIG_NETFILTER 1573#ifdef CONFIG_NETFILTER
1574 { 1574 {
1575 int val, len; 1575 int val, len;
1576 1576
1577 if(get_user(len, optlen)) 1577 if(get_user(len, optlen))
1578 return -EFAULT; 1578 return -EFAULT;
1579 1579
1580 val = nf_getsockopt(sk, PF_DECnet, optname, 1580 val = nf_getsockopt(sk, PF_DECnet, optname,
1581 optval, &len); 1581 optval, &len);
1582 if (val >= 0) 1582 if (val >= 0)
1583 val = put_user(len, optlen); 1583 val = put_user(len, optlen);
@@ -1588,7 +1588,7 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1588 case DSO_SEQPACKET: 1588 case DSO_SEQPACKET:
1589 case DSO_CONACCEPT: 1589 case DSO_CONACCEPT:
1590 case DSO_CONREJECT: 1590 case DSO_CONREJECT:
1591 return -ENOPROTOOPT; 1591 return -ENOPROTOOPT;
1592 1592
1593 case DSO_MAXWINDOW: 1593 case DSO_MAXWINDOW:
1594 if (r_len > sizeof(unsigned long)) 1594 if (r_len > sizeof(unsigned long))
@@ -1724,7 +1724,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1724 } 1724 }
1725 } 1725 }
1726 } 1726 }
1727 1727
1728 if (scp->state != DN_RUN) 1728 if (scp->state != DN_RUN)
1729 goto out; 1729 goto out;
1730 1730
@@ -1773,7 +1773,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1773 if (skb->len == 0) { 1773 if (skb->len == 0) {
1774 skb_unlink(skb, queue); 1774 skb_unlink(skb, queue);
1775 kfree_skb(skb); 1775 kfree_skb(skb);
1776 /* 1776 /*
1777 * N.B. Don't refer to skb or cb after this point 1777 * N.B. Don't refer to skb or cb after this point
1778 * in loop. 1778 * in loop.
1779 */ 1779 */
@@ -1783,7 +1783,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1783 } 1783 }
1784 } 1784 }
1785 1785
1786 if (eor) { 1786 if (eor) {
1787 if (sk->sk_type == SOCK_SEQPACKET) 1787 if (sk->sk_type == SOCK_SEQPACKET)
1788 break; 1788 break;
1789 if (!(flags & MSG_WAITALL)) 1789 if (!(flags & MSG_WAITALL))
@@ -1884,7 +1884,7 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1884 return mss_now; 1884 return mss_now;
1885} 1885}
1886 1886
1887/* 1887/*
1888 * N.B. We get the timeout wrong here, but then we always did get it 1888 * N.B. We get the timeout wrong here, but then we always did get it
1889 * wrong before and this is another step along the road to correcting 1889 * wrong before and this is another step along the road to correcting
1890 * it. It ought to get updated each time we pass through the routine, 1890 * it. It ought to get updated each time we pass through the routine,
@@ -2044,7 +2044,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
2044 cb->nsp_flags |= 0x20; 2044 cb->nsp_flags |= 0x20;
2045 2045
2046 scp->seg_total += len; 2046 scp->seg_total += len;
2047 2047
2048 if (((sent + len) == size) && (flags & MSG_EOR)) { 2048 if (((sent + len) == size) && (flags & MSG_EOR)) {
2049 cb->nsp_flags |= 0x40; 2049 cb->nsp_flags |= 0x40;
2050 scp->seg_total = 0; 2050 scp->seg_total = 0;
@@ -2202,7 +2202,7 @@ static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2202static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf) 2202static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2203{ 2203{
2204 int i; 2204 int i;
2205 2205
2206 switch (dn_ntohs(dn->sdn_objnamel)) { 2206 switch (dn_ntohs(dn->sdn_objnamel)) {
2207 case 0: 2207 case 0:
2208 sprintf(buf, "%d", dn->sdn_objnum); 2208 sprintf(buf, "%d", dn->sdn_objnum);
@@ -2214,7 +2214,7 @@ static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2214 buf[i] = '.'; 2214 buf[i] = '.';
2215 } 2215 }
2216 buf[i] = 0; 2216 buf[i] = 0;
2217 } 2217 }
2218} 2218}
2219 2219
2220static char *dn_state2asc(unsigned char state) 2220static char *dn_state2asc(unsigned char state)
@@ -2381,7 +2381,7 @@ static int __init decnet_init(void)
2381{ 2381{
2382 int rc; 2382 int rc;
2383 2383
2384 printk(banner); 2384 printk(banner);
2385 2385
2386 rc = proto_register(&dn_proto, 1); 2386 rc = proto_register(&dn_proto, 1);
2387 if (rc != 0) 2387 if (rc != 0)
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 90b3dfd72b49..c5e28114beb8 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -224,27 +224,27 @@ static struct dn_dev_sysctl_table {
224 {0} 224 {0}
225 }, 225 },
226 {{ 226 {{
227 .ctl_name = 0, 227 .ctl_name = 0,
228 .procname = "", 228 .procname = "",
229 .mode = 0555, 229 .mode = 0555,
230 .child = dn_dev_sysctl.dn_dev_vars 230 .child = dn_dev_sysctl.dn_dev_vars
231 }, {0}}, 231 }, {0}},
232 {{ 232 {{
233 .ctl_name = NET_DECNET_CONF, 233 .ctl_name = NET_DECNET_CONF,
234 .procname = "conf", 234 .procname = "conf",
235 .mode = 0555, 235 .mode = 0555,
236 .child = dn_dev_sysctl.dn_dev_dev 236 .child = dn_dev_sysctl.dn_dev_dev
237 }, {0}}, 237 }, {0}},
238 {{ 238 {{
239 .ctl_name = NET_DECNET, 239 .ctl_name = NET_DECNET,
240 .procname = "decnet", 240 .procname = "decnet",
241 .mode = 0555, 241 .mode = 0555,
242 .child = dn_dev_sysctl.dn_dev_conf_dir 242 .child = dn_dev_sysctl.dn_dev_conf_dir
243 }, {0}}, 243 }, {0}},
244 {{ 244 {{
245 .ctl_name = CTL_NET, 245 .ctl_name = CTL_NET,
246 .procname = "net", 246 .procname = "net",
247 .mode = 0555, 247 .mode = 0555,
248 .child = dn_dev_sysctl.dn_dev_proto_dir 248 .child = dn_dev_sysctl.dn_dev_proto_dir
249 }, {0}} 249 }, {0}}
250}; 250};
@@ -299,7 +299,7 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
299 } 299 }
300} 300}
301 301
302static int dn_forwarding_proc(ctl_table *table, int write, 302static int dn_forwarding_proc(ctl_table *table, int write,
303 struct file *filep, 303 struct file *filep,
304 void __user *buffer, 304 void __user *buffer,
305 size_t *lenp, loff_t *ppos) 305 size_t *lenp, loff_t *ppos)
@@ -456,7 +456,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
456 456
457 ASSERT_RTNL(); 457 ASSERT_RTNL();
458 458
459 /* Check for duplicates */ 459 /* Check for duplicates */
460 for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { 460 for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
461 if (ifa1->ifa_local == ifa->ifa_local) 461 if (ifa1->ifa_local == ifa->ifa_local)
462 return -EEXIST; 462 return -EEXIST;
@@ -708,7 +708,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
708 if (!dn_db) 708 if (!dn_db)
709 return err; 709 return err;
710 } 710 }
711 711
712 if ((ifa = dn_dev_alloc_ifa()) == NULL) 712 if ((ifa = dn_dev_alloc_ifa()) == NULL)
713 return -ENOBUFS; 713 return -ENOBUFS;
714 714
@@ -853,7 +853,7 @@ out:
853 return rv; 853 return rv;
854} 854}
855 855
856/* 856/*
857 * Find a default address to bind to. 857 * Find a default address to bind to.
858 * 858 *
859 * This is one of those areas where the initial VMS concepts don't really 859 * This is one of those areas where the initial VMS concepts don't really
@@ -884,39 +884,39 @@ last_chance:
884 884
885static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) 885static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
886{ 886{
887 struct endnode_hello_message *msg; 887 struct endnode_hello_message *msg;
888 struct sk_buff *skb = NULL; 888 struct sk_buff *skb = NULL;
889 __le16 *pktlen; 889 __le16 *pktlen;
890 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 890 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
891 891
892 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) 892 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
893 return; 893 return;
894 894
895 skb->dev = dev; 895 skb->dev = dev;
896 896
897 msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg)); 897 msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg));
898 898
899 msg->msgflg = 0x0D; 899 msg->msgflg = 0x0D;
900 memcpy(msg->tiver, dn_eco_version, 3); 900 memcpy(msg->tiver, dn_eco_version, 3);
901 dn_dn2eth(msg->id, ifa->ifa_local); 901 dn_dn2eth(msg->id, ifa->ifa_local);
902 msg->iinfo = DN_RT_INFO_ENDN; 902 msg->iinfo = DN_RT_INFO_ENDN;
903 msg->blksize = dn_htons(mtu2blksize(dev)); 903 msg->blksize = dn_htons(mtu2blksize(dev));
904 msg->area = 0x00; 904 msg->area = 0x00;
905 memset(msg->seed, 0, 8); 905 memset(msg->seed, 0, 8);
906 memcpy(msg->neighbor, dn_hiord, ETH_ALEN); 906 memcpy(msg->neighbor, dn_hiord, ETH_ALEN);
907 907
908 if (dn_db->router) { 908 if (dn_db->router) {
909 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; 909 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
910 dn_dn2eth(msg->neighbor, dn->addr); 910 dn_dn2eth(msg->neighbor, dn->addr);
911 } 911 }
912 912
913 msg->timer = dn_htons((unsigned short)dn_db->parms.t3); 913 msg->timer = dn_htons((unsigned short)dn_db->parms.t3);
914 msg->mpd = 0x00; 914 msg->mpd = 0x00;
915 msg->datalen = 0x02; 915 msg->datalen = 0x02;
916 memset(msg->data, 0xAA, 2); 916 memset(msg->data, 0xAA, 2);
917 917
918 pktlen = (__le16 *)skb_push(skb,2); 918 pktlen = (__le16 *)skb_push(skb,2);
919 *pktlen = dn_htons(skb->len - 2); 919 *pktlen = dn_htons(skb->len - 2);
920 920
921 skb->nh.raw = skb->data; 921 skb->nh.raw = skb->data;
922 922
@@ -986,11 +986,11 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
986 dn_dn2eth(ptr, ifa->ifa_local); 986 dn_dn2eth(ptr, ifa->ifa_local);
987 src = ptr; 987 src = ptr;
988 ptr += ETH_ALEN; 988 ptr += ETH_ALEN;
989 *ptr++ = dn_db->parms.forwarding == 1 ? 989 *ptr++ = dn_db->parms.forwarding == 1 ?
990 DN_RT_INFO_L1RT : DN_RT_INFO_L2RT; 990 DN_RT_INFO_L1RT : DN_RT_INFO_L2RT;
991 *((__le16 *)ptr) = dn_htons(mtu2blksize(dev)); 991 *((__le16 *)ptr) = dn_htons(mtu2blksize(dev));
992 ptr += 2; 992 ptr += 2;
993 *ptr++ = dn_db->parms.priority; /* Priority */ 993 *ptr++ = dn_db->parms.priority; /* Priority */
994 *ptr++ = 0; /* Area: Reserved */ 994 *ptr++ = 0; /* Area: Reserved */
995 *((__le16 *)ptr) = dn_htons((unsigned short)dn_db->parms.t3); 995 *((__le16 *)ptr) = dn_htons((unsigned short)dn_db->parms.t3);
996 ptr += 2; 996 ptr += 2;
@@ -1408,18 +1408,18 @@ static char *dn_type2asc(char type)
1408static int dn_dev_seq_show(struct seq_file *seq, void *v) 1408static int dn_dev_seq_show(struct seq_file *seq, void *v)
1409{ 1409{
1410 if (v == SEQ_START_TOKEN) 1410 if (v == SEQ_START_TOKEN)
1411 seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n"); 1411 seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n");
1412 else { 1412 else {
1413 struct net_device *dev = v; 1413 struct net_device *dev = v;
1414 char peer_buf[DN_ASCBUF_LEN]; 1414 char peer_buf[DN_ASCBUF_LEN];
1415 char router_buf[DN_ASCBUF_LEN]; 1415 char router_buf[DN_ASCBUF_LEN];
1416 struct dn_dev *dn_db = dev->dn_ptr; 1416 struct dn_dev *dn_db = dev->dn_ptr;
1417 1417
1418 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" 1418 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
1419 " %04hu %03d %02x %-10s %-7s %-7s\n", 1419 " %04hu %03d %02x %-10s %-7s %-7s\n",
1420 dev->name ? dev->name : "???", 1420 dev->name ? dev->name : "???",
1421 dn_type2asc(dn_db->parms.mode), 1421 dn_type2asc(dn_db->parms.mode),
1422 0, 0, 1422 0, 0,
1423 dn_db->t3, dn_db->parms.t3, 1423 dn_db->t3, dn_db->parms.t3,
1424 mtu2blksize(dev), 1424 mtu2blksize(dev),
1425 dn_db->parms.priority, 1425 dn_db->parms.priority,
@@ -1476,17 +1476,17 @@ MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
1476 1476
1477void __init dn_dev_init(void) 1477void __init dn_dev_init(void)
1478{ 1478{
1479 if (addr[0] > 63 || addr[0] < 0) { 1479 if (addr[0] > 63 || addr[0] < 0) {
1480 printk(KERN_ERR "DECnet: Area must be between 0 and 63"); 1480 printk(KERN_ERR "DECnet: Area must be between 0 and 63");
1481 return; 1481 return;
1482 } 1482 }
1483 1483
1484 if (addr[1] > 1023 || addr[1] < 0) { 1484 if (addr[1] > 1023 || addr[1] < 0) {
1485 printk(KERN_ERR "DECnet: Node must be between 0 and 1023"); 1485 printk(KERN_ERR "DECnet: Node must be between 0 and 1023");
1486 return; 1486 return;
1487 } 1487 }
1488 1488
1489 decnet_address = dn_htons((addr[0] << 10) | addr[1]); 1489 decnet_address = dn_htons((addr[0] << 10) | addr[1]);
1490 1490
1491 dn_dev_devices_on(); 1491 dn_dev_devices_on();
1492 1492
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 1cf010124ec5..3cbfddc98430 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -527,7 +527,7 @@ int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
527 return -EINVAL; 527 return -EINVAL;
528 528
529 tb = dn_fib_get_table(rtm_get_table(rta, r->rtm_table), 1); 529 tb = dn_fib_get_table(rtm_get_table(rta, r->rtm_table), 1);
530 if (tb) 530 if (tb)
531 return tb->insert(tb, r, (struct dn_kern_rta *)rta, nlh, &NETLINK_CB(skb)); 531 return tb->insert(tb, r, (struct dn_kern_rta *)rta, nlh, &NETLINK_CB(skb));
532 532
533 return -ENOBUFS; 533 return -ENOBUFS;
@@ -654,80 +654,80 @@ static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event,
654 654
655static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force) 655static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force)
656{ 656{
657 int ret = 0; 657 int ret = 0;
658 int scope = RT_SCOPE_NOWHERE; 658 int scope = RT_SCOPE_NOWHERE;
659 659
660 if (force) 660 if (force)
661 scope = -1; 661 scope = -1;
662 662
663 for_fib_info() { 663 for_fib_info() {
664 /* 664 /*
665 * This makes no sense for DECnet.... we will almost 665 * This makes no sense for DECnet.... we will almost
666 * certainly have more than one local address the same 666 * certainly have more than one local address the same
667 * over all our interfaces. It needs thinking about 667 * over all our interfaces. It needs thinking about
668 * some more. 668 * some more.
669 */ 669 */
670 if (local && fi->fib_prefsrc == local) { 670 if (local && fi->fib_prefsrc == local) {
671 fi->fib_flags |= RTNH_F_DEAD; 671 fi->fib_flags |= RTNH_F_DEAD;
672 ret++; 672 ret++;
673 } else if (dev && fi->fib_nhs) { 673 } else if (dev && fi->fib_nhs) {
674 int dead = 0; 674 int dead = 0;
675 675
676 change_nexthops(fi) { 676 change_nexthops(fi) {
677 if (nh->nh_flags&RTNH_F_DEAD) 677 if (nh->nh_flags&RTNH_F_DEAD)
678 dead++; 678 dead++;
679 else if (nh->nh_dev == dev && 679 else if (nh->nh_dev == dev &&
680 nh->nh_scope != scope) { 680 nh->nh_scope != scope) {
681 spin_lock_bh(&dn_fib_multipath_lock); 681 spin_lock_bh(&dn_fib_multipath_lock);
682 nh->nh_flags |= RTNH_F_DEAD; 682 nh->nh_flags |= RTNH_F_DEAD;
683 fi->fib_power -= nh->nh_power; 683 fi->fib_power -= nh->nh_power;
684 nh->nh_power = 0; 684 nh->nh_power = 0;
685 spin_unlock_bh(&dn_fib_multipath_lock); 685 spin_unlock_bh(&dn_fib_multipath_lock);
686 dead++; 686 dead++;
687 } 687 }
688 } endfor_nexthops(fi) 688 } endfor_nexthops(fi)
689 if (dead == fi->fib_nhs) { 689 if (dead == fi->fib_nhs) {
690 fi->fib_flags |= RTNH_F_DEAD; 690 fi->fib_flags |= RTNH_F_DEAD;
691 ret++; 691 ret++;
692 } 692 }
693 } 693 }
694 } endfor_fib_info(); 694 } endfor_fib_info();
695 return ret; 695 return ret;
696} 696}
697 697
698 698
699static int dn_fib_sync_up(struct net_device *dev) 699static int dn_fib_sync_up(struct net_device *dev)
700{ 700{
701 int ret = 0; 701 int ret = 0;
702 702
703 if (!(dev->flags&IFF_UP)) 703 if (!(dev->flags&IFF_UP))
704 return 0; 704 return 0;
705 705
706 for_fib_info() { 706 for_fib_info() {
707 int alive = 0; 707 int alive = 0;
708 708
709 change_nexthops(fi) { 709 change_nexthops(fi) {
710 if (!(nh->nh_flags&RTNH_F_DEAD)) { 710 if (!(nh->nh_flags&RTNH_F_DEAD)) {
711 alive++; 711 alive++;
712 continue; 712 continue;
713 } 713 }
714 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) 714 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
715 continue; 715 continue;
716 if (nh->nh_dev != dev || dev->dn_ptr == NULL) 716 if (nh->nh_dev != dev || dev->dn_ptr == NULL)
717 continue; 717 continue;
718 alive++; 718 alive++;
719 spin_lock_bh(&dn_fib_multipath_lock); 719 spin_lock_bh(&dn_fib_multipath_lock);
720 nh->nh_power = 0; 720 nh->nh_power = 0;
721 nh->nh_flags &= ~RTNH_F_DEAD; 721 nh->nh_flags &= ~RTNH_F_DEAD;
722 spin_unlock_bh(&dn_fib_multipath_lock); 722 spin_unlock_bh(&dn_fib_multipath_lock);
723 } endfor_nexthops(fi); 723 } endfor_nexthops(fi);
724 724
725 if (alive > 0) { 725 if (alive > 0) {
726 fi->fib_flags &= ~RTNH_F_DEAD; 726 fi->fib_flags &= ~RTNH_F_DEAD;
727 ret++; 727 ret++;
728 } 728 }
729 } endfor_fib_info(); 729 } endfor_fib_info();
730 return ret; 730 return ret;
731} 731}
732 732
733static struct notifier_block dn_fib_dnaddr_notifier = { 733static struct notifier_block dn_fib_dnaddr_notifier = {
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 7322bb36e825..11d692dfb4f3 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -3,7 +3,7 @@
3 * operating system. DECnet is implemented using the BSD Socket 3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * DECnet Neighbour Functions (Adjacency Database and 6 * DECnet Neighbour Functions (Adjacency Database and
7 * On-Ethernet Cache) 7 * On-Ethernet Cache)
8 * 8 *
9 * Author: Steve Whitehouse <SteveW@ACM.org> 9 * Author: Steve Whitehouse <SteveW@ACM.org>
@@ -277,19 +277,19 @@ static int dn_short_output(struct sk_buff *skb)
277 struct dn_skb_cb *cb = DN_SKB_CB(skb); 277 struct dn_skb_cb *cb = DN_SKB_CB(skb);
278 278
279 279
280 if (skb_headroom(skb) < headroom) { 280 if (skb_headroom(skb) < headroom) {
281 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); 281 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
282 if (skb2 == NULL) { 282 if (skb2 == NULL) {
283 if (net_ratelimit()) 283 if (net_ratelimit())
284 printk(KERN_CRIT "dn_short_output: no memory\n"); 284 printk(KERN_CRIT "dn_short_output: no memory\n");
285 kfree_skb(skb); 285 kfree_skb(skb);
286 return -ENOBUFS; 286 return -ENOBUFS;
287 } 287 }
288 kfree_skb(skb); 288 kfree_skb(skb);
289 skb = skb2; 289 skb = skb2;
290 if (net_ratelimit()) 290 if (net_ratelimit())
291 printk(KERN_INFO "dn_short_output: Increasing headroom\n"); 291 printk(KERN_INFO "dn_short_output: Increasing headroom\n");
292 } 292 }
293 293
294 data = skb_push(skb, sizeof(struct dn_short_packet) + 2); 294 data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
295 *((__le16 *)data) = dn_htons(skb->len - 2); 295 *((__le16 *)data) = dn_htons(skb->len - 2);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 39a6cf7fb566..0f244e81a377 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -33,7 +33,7 @@
33 33
34/****************************************************************************** 34/******************************************************************************
35 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 35 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
36 36
37 This program is free software; you can redistribute it and/or modify 37 This program is free software; you can redistribute it and/or modify
38 it under the terms of the GNU General Public License as published by 38 it under the terms of the GNU General Public License as published by
39 the Free Software Foundation; either version 2 of the License, or 39 the Free Software Foundation; either version 2 of the License, or
@@ -63,7 +63,7 @@
63#include <asm/system.h> 63#include <asm/system.h>
64#include <linux/fcntl.h> 64#include <linux/fcntl.h>
65#include <linux/mm.h> 65#include <linux/mm.h>
66#include <linux/termios.h> 66#include <linux/termios.h>
67#include <linux/interrupt.h> 67#include <linux/interrupt.h>
68#include <linux/proc_fs.h> 68#include <linux/proc_fs.h>
69#include <linux/stat.h> 69#include <linux/stat.h>
@@ -139,7 +139,7 @@ static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth)
139 ptr++; 139 ptr++;
140 len += 2; 140 len += 2;
141 if ((ack & 0x4000) == 0) { 141 if ((ack & 0x4000) == 0) {
142 if (oth) 142 if (oth)
143 ack ^= 0x2000; 143 ack ^= 0x2000;
144 dn_ack(sk, skb, ack); 144 dn_ack(sk, skb, ack);
145 } 145 }
@@ -152,7 +152,7 @@ static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth)
152 skb_pull(skb, 2); 152 skb_pull(skb, 2);
153 len += 2; 153 len += 2;
154 if ((ack & 0x4000) == 0) { 154 if ((ack & 0x4000) == 0) {
155 if (oth) 155 if (oth)
156 ack ^= 0x2000; 156 ack ^= 0x2000;
157 dn_ack(sk, skb, ack); 157 dn_ack(sk, skb, ack);
158 } 158 }
@@ -349,9 +349,9 @@ static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
349 349
350 if ((scp->state == DN_CI) || (scp->state == DN_CD)) { 350 if ((scp->state == DN_CI) || (scp->state == DN_CD)) {
351 scp->persist = 0; 351 scp->persist = 0;
352 scp->addrrem = cb->src_port; 352 scp->addrrem = cb->src_port;
353 sk->sk_state = TCP_ESTABLISHED; 353 sk->sk_state = TCP_ESTABLISHED;
354 scp->state = DN_RUN; 354 scp->state = DN_RUN;
355 scp->services_rem = cb->services; 355 scp->services_rem = cb->services;
356 scp->info_rem = cb->info; 356 scp->info_rem = cb->info;
357 scp->segsize_rem = cb->segsize; 357 scp->segsize_rem = cb->segsize;
@@ -366,13 +366,13 @@ static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
366 memcpy(scp->conndata_in.opt_data, skb->data + 1, dlen); 366 memcpy(scp->conndata_in.opt_data, skb->data + 1, dlen);
367 } 367 }
368 } 368 }
369 dn_nsp_send_link(sk, DN_NOCHANGE, 0); 369 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
370 if (!sock_flag(sk, SOCK_DEAD)) 370 if (!sock_flag(sk, SOCK_DEAD))
371 sk->sk_state_change(sk); 371 sk->sk_state_change(sk);
372 } 372 }
373 373
374out: 374out:
375 kfree_skb(skb); 375 kfree_skb(skb);
376} 376}
377 377
378static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb) 378static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb)
@@ -435,7 +435,7 @@ static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
435 sk->sk_state_change(sk); 435 sk->sk_state_change(sk);
436 } 436 }
437 437
438 /* 438 /*
439 * It appears that its possible for remote machines to send disc 439 * It appears that its possible for remote machines to send disc
440 * init messages with no port identifier if we are in the CI and 440 * init messages with no port identifier if we are in the CI and
441 * possibly also the CD state. Obviously we shouldn't reply with 441 * possibly also the CD state. Obviously we shouldn't reply with
@@ -519,7 +519,7 @@ static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
519 519
520 /* 520 /*
521 * Here we ignore erronous packets which should really 521 * Here we ignore erronous packets which should really
522 * should cause a connection abort. It is not critical 522 * should cause a connection abort. It is not critical
523 * for now though. 523 * for now though.
524 */ 524 */
525 if (lsflags & 0xf8) 525 if (lsflags & 0xf8)
@@ -530,7 +530,7 @@ static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
530 switch(lsflags & 0x04) { /* FCVAL INT */ 530 switch(lsflags & 0x04) { /* FCVAL INT */
531 case 0x00: /* Normal Request */ 531 case 0x00: /* Normal Request */
532 switch(lsflags & 0x03) { /* FCVAL MOD */ 532 switch(lsflags & 0x03) { /* FCVAL MOD */
533 case 0x00: /* Request count */ 533 case 0x00: /* Request count */
534 if (fcval < 0) { 534 if (fcval < 0) {
535 unsigned char p_fcval = -fcval; 535 unsigned char p_fcval = -fcval;
536 if ((scp->flowrem_dat > p_fcval) && 536 if ((scp->flowrem_dat > p_fcval) &&
@@ -541,7 +541,7 @@ static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
541 scp->flowrem_dat += fcval; 541 scp->flowrem_dat += fcval;
542 wake_up = 1; 542 wake_up = 1;
543 } 543 }
544 break; 544 break;
545 case 0x01: /* Stop outgoing data */ 545 case 0x01: /* Stop outgoing data */
546 scp->flowrem_sw = DN_DONTSEND; 546 scp->flowrem_sw = DN_DONTSEND;
547 break; 547 break;
@@ -557,10 +557,10 @@ static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
557 wake_up = 1; 557 wake_up = 1;
558 } 558 }
559 break; 559 break;
560 } 560 }
561 if (wake_up && !sock_flag(sk, SOCK_DEAD)) 561 if (wake_up && !sock_flag(sk, SOCK_DEAD))
562 sk->sk_state_change(sk); 562 sk->sk_state_change(sk);
563 } 563 }
564 564
565 dn_nsp_send_oth_ack(sk); 565 dn_nsp_send_oth_ack(sk);
566 566
@@ -576,38 +576,38 @@ out:
576static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 576static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
577{ 577{
578 int err; 578 int err;
579 579
580 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 580 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
581 number of warnings when compiling with -W --ANK 581 number of warnings when compiling with -W --ANK
582 */ 582 */
583 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 583 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
584 (unsigned)sk->sk_rcvbuf) { 584 (unsigned)sk->sk_rcvbuf) {
585 err = -ENOMEM; 585 err = -ENOMEM;
586 goto out; 586 goto out;
587 } 587 }
588 588
589 err = sk_filter(sk, skb); 589 err = sk_filter(sk, skb);
590 if (err) 590 if (err)
591 goto out; 591 goto out;
592 592
593 skb_set_owner_r(skb, sk); 593 skb_set_owner_r(skb, sk);
594 skb_queue_tail(queue, skb); 594 skb_queue_tail(queue, skb);
595 595
596 /* This code only runs from BH or BH protected context. 596 /* This code only runs from BH or BH protected context.
597 * Therefore the plain read_lock is ok here. -DaveM 597 * Therefore the plain read_lock is ok here. -DaveM
598 */ 598 */
599 read_lock(&sk->sk_callback_lock); 599 read_lock(&sk->sk_callback_lock);
600 if (!sock_flag(sk, SOCK_DEAD)) { 600 if (!sock_flag(sk, SOCK_DEAD)) {
601 struct socket *sock = sk->sk_socket; 601 struct socket *sock = sk->sk_socket;
602 wake_up_interruptible(sk->sk_sleep); 602 wake_up_interruptible(sk->sk_sleep);
603 if (sock && sock->fasync_list && 603 if (sock && sock->fasync_list &&
604 !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) 604 !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
605 __kill_fasync(sock->fasync_list, sig, 605 __kill_fasync(sock->fasync_list, sig,
606 (sig == SIGURG) ? POLL_PRI : POLL_IN); 606 (sig == SIGURG) ? POLL_PRI : POLL_IN);
607 } 607 }
608 read_unlock(&sk->sk_callback_lock); 608 read_unlock(&sk->sk_callback_lock);
609out: 609out:
610 return err; 610 return err;
611} 611}
612 612
613static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb) 613static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb)
@@ -652,16 +652,16 @@ static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
652 skb_pull(skb, 2); 652 skb_pull(skb, 2);
653 653
654 if (seq_next(scp->numdat_rcv, segnum)) { 654 if (seq_next(scp->numdat_rcv, segnum)) {
655 if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) { 655 if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) {
656 seq_add(&scp->numdat_rcv, 1); 656 seq_add(&scp->numdat_rcv, 1);
657 queued = 1; 657 queued = 1;
658 } 658 }
659 659
660 if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) { 660 if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) {
661 scp->flowloc_sw = DN_DONTSEND; 661 scp->flowloc_sw = DN_DONTSEND;
662 dn_nsp_send_link(sk, DN_DONTSEND, 0); 662 dn_nsp_send_link(sk, DN_DONTSEND, 0);
663 } 663 }
664 } 664 }
665 665
666 dn_nsp_send_data_ack(sk); 666 dn_nsp_send_data_ack(sk);
667out: 667out:
@@ -732,7 +732,7 @@ static int dn_nsp_rx_packet(struct sk_buff *skb)
732 if (decnet_debug_level & 2) 732 if (decnet_debug_level & 2)
733 printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x\n", (int)cb->nsp_flags); 733 printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x\n", (int)cb->nsp_flags);
734 734
735 if (cb->nsp_flags & 0x83) 735 if (cb->nsp_flags & 0x83)
736 goto free_out; 736 goto free_out;
737 737
738 /* 738 /*
@@ -852,7 +852,7 @@ int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
852 case 0x30: 852 case 0x30:
853 dn_nsp_disc_init(sk, skb); 853 dn_nsp_disc_init(sk, skb);
854 break; 854 break;
855 case 0x40: 855 case 0x40:
856 dn_nsp_disc_conf(sk, skb); 856 dn_nsp_disc_conf(sk, skb);
857 break; 857 break;
858 } 858 }
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index b342e4e8f5f8..23d5ca88dfa3 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -26,7 +26,7 @@
26 26
27/****************************************************************************** 27/******************************************************************************
28 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 28 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
29 29
30 This program is free software; you can redistribute it and/or modify 30 This program is free software; you can redistribute it and/or modify
31 it under the terms of the GNU General Public License as published by 31 it under the terms of the GNU General Public License as published by
32 the Free Software Foundation; either version 2 of the License, or 32 the Free Software Foundation; either version 2 of the License, or
@@ -55,7 +55,7 @@
55#include <asm/system.h> 55#include <asm/system.h>
56#include <linux/fcntl.h> 56#include <linux/fcntl.h>
57#include <linux/mm.h> 57#include <linux/mm.h>
58#include <linux/termios.h> 58#include <linux/termios.h>
59#include <linux/interrupt.h> 59#include <linux/interrupt.h>
60#include <linux/proc_fs.h> 60#include <linux/proc_fs.h>
61#include <linux/stat.h> 61#include <linux/stat.h>
@@ -176,14 +176,14 @@ static void dn_nsp_rtt(struct sock *sk, long rtt)
176 * gathering this value might turn out negative, so we make sure 176 * gathering this value might turn out negative, so we make sure
177 * that is it always positive here. 177 * that is it always positive here.
178 */ 178 */
179 if (rtt < 0) 179 if (rtt < 0)
180 rtt = -rtt; 180 rtt = -rtt;
181 /* 181 /*
182 * Add new rtt to smoothed average 182 * Add new rtt to smoothed average
183 */ 183 */
184 delta = ((rtt << 3) - srtt); 184 delta = ((rtt << 3) - srtt);
185 srtt += (delta >> 3); 185 srtt += (delta >> 3);
186 if (srtt >= 1) 186 if (srtt >= 1)
187 scp->nsp_srtt = (unsigned long)srtt; 187 scp->nsp_srtt = (unsigned long)srtt;
188 else 188 else
189 scp->nsp_srtt = 1; 189 scp->nsp_srtt = 1;
@@ -193,7 +193,7 @@ static void dn_nsp_rtt(struct sock *sk, long rtt)
193 */ 193 */
194 delta >>= 1; 194 delta >>= 1;
195 rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2); 195 rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2);
196 if (rttvar >= 1) 196 if (rttvar >= 1)
197 scp->nsp_rttvar = (unsigned long)rttvar; 197 scp->nsp_rttvar = (unsigned long)rttvar;
198 else 198 else
199 scp->nsp_rttvar = 1; 199 scp->nsp_rttvar = 1;
@@ -434,7 +434,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
434 * further. 434 * further.
435 */ 435 */
436 if (xmit_count == 1) { 436 if (xmit_count == 1) {
437 if (dn_equal(segnum, acknum)) 437 if (dn_equal(segnum, acknum))
438 dn_nsp_rtt(sk, (long)(pkttime - reftime)); 438 dn_nsp_rtt(sk, (long)(pkttime - reftime));
439 439
440 if (scp->snd_window < scp->max_window) 440 if (scp->snd_window < scp->max_window)
@@ -486,16 +486,16 @@ void dn_send_conn_ack (struct sock *sk)
486{ 486{
487 struct dn_scp *scp = DN_SK(sk); 487 struct dn_scp *scp = DN_SK(sk);
488 struct sk_buff *skb = NULL; 488 struct sk_buff *skb = NULL;
489 struct nsp_conn_ack_msg *msg; 489 struct nsp_conn_ack_msg *msg;
490 490
491 if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL) 491 if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
492 return; 492 return;
493 493
494 msg = (struct nsp_conn_ack_msg *)skb_put(skb, 3); 494 msg = (struct nsp_conn_ack_msg *)skb_put(skb, 3);
495 msg->msgflg = 0x24; 495 msg->msgflg = 0x24;
496 msg->dstaddr = scp->addrrem; 496 msg->dstaddr = scp->addrrem;
497 497
498 dn_nsp_send(skb); 498 dn_nsp_send(skb);
499} 499}
500 500
501void dn_nsp_delayed_ack(struct sock *sk) 501void dn_nsp_delayed_ack(struct sock *sk)
@@ -523,25 +523,25 @@ void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
523{ 523{
524 struct dn_scp *scp = DN_SK(sk); 524 struct dn_scp *scp = DN_SK(sk);
525 struct sk_buff *skb = NULL; 525 struct sk_buff *skb = NULL;
526 struct nsp_conn_init_msg *msg; 526 struct nsp_conn_init_msg *msg;
527 __u8 len = (__u8)dn_ntohs(scp->conndata_out.opt_optl); 527 __u8 len = (__u8)dn_ntohs(scp->conndata_out.opt_optl);
528 528
529 if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL) 529 if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
530 return; 530 return;
531 531
532 msg = (struct nsp_conn_init_msg *)skb_put(skb, sizeof(*msg)); 532 msg = (struct nsp_conn_init_msg *)skb_put(skb, sizeof(*msg));
533 msg->msgflg = 0x28; 533 msg->msgflg = 0x28;
534 msg->dstaddr = scp->addrrem; 534 msg->dstaddr = scp->addrrem;
535 msg->srcaddr = scp->addrloc; 535 msg->srcaddr = scp->addrloc;
536 msg->services = scp->services_loc; 536 msg->services = scp->services_loc;
537 msg->info = scp->info_loc; 537 msg->info = scp->info_loc;
538 msg->segsize = dn_htons(scp->segsize_loc); 538 msg->segsize = dn_htons(scp->segsize_loc);
539 539
540 *skb_put(skb,1) = len; 540 *skb_put(skb,1) = len;
541 541
542 if (len > 0) 542 if (len > 0)
543 memcpy(skb_put(skb, len), scp->conndata_out.opt_data, len); 543 memcpy(skb_put(skb, len), scp->conndata_out.opt_data, len);
544 544
545 545
546 dn_nsp_send(skb); 546 dn_nsp_send(skb);
547 547
@@ -550,7 +550,7 @@ void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
550} 550}
551 551
552 552
553static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, 553static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
554 unsigned short reason, gfp_t gfp, 554 unsigned short reason, gfp_t gfp,
555 struct dst_entry *dst, 555 struct dst_entry *dst,
556 int ddl, unsigned char *dd, __le16 rem, __le16 loc) 556 int ddl, unsigned char *dd, __le16 rem, __le16 loc)
@@ -593,7 +593,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
593} 593}
594 594
595 595
596void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, 596void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
597 unsigned short reason, gfp_t gfp) 597 unsigned short reason, gfp_t gfp)
598{ 598{
599 struct dn_scp *scp = DN_SK(sk); 599 struct dn_scp *scp = DN_SK(sk);
@@ -605,19 +605,19 @@ void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
605 if (reason == 0) 605 if (reason == 0)
606 reason = dn_ntohs(scp->discdata_out.opt_status); 606 reason = dn_ntohs(scp->discdata_out.opt_status);
607 607
608 dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->sk_dst_cache, ddl, 608 dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->sk_dst_cache, ddl,
609 scp->discdata_out.opt_data, scp->addrrem, scp->addrloc); 609 scp->discdata_out.opt_data, scp->addrrem, scp->addrloc);
610} 610}
611 611
612 612
613void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg, 613void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
614 unsigned short reason) 614 unsigned short reason)
615{ 615{
616 struct dn_skb_cb *cb = DN_SKB_CB(skb); 616 struct dn_skb_cb *cb = DN_SKB_CB(skb);
617 int ddl = 0; 617 int ddl = 0;
618 gfp_t gfp = GFP_ATOMIC; 618 gfp_t gfp = GFP_ATOMIC;
619 619
620 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 620 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl,
621 NULL, cb->src_port, cb->dst_port); 621 NULL, cb->src_port, cb->dst_port);
622} 622}
623 623
@@ -676,8 +676,8 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
676 676
677 msg->srcaddr = scp->addrloc; 677 msg->srcaddr = scp->addrloc;
678 msg->services = scp->services_loc; /* Requested flow control */ 678 msg->services = scp->services_loc; /* Requested flow control */
679 msg->info = scp->info_loc; /* Version Number */ 679 msg->info = scp->info_loc; /* Version Number */
680 msg->segsize = dn_htons(scp->segsize_loc); /* Max segment size */ 680 msg->segsize = dn_htons(scp->segsize_loc); /* Max segment size */
681 681
682 if (scp->peer.sdn_objnum) 682 if (scp->peer.sdn_objnum)
683 type = 0; 683 type = 0;
@@ -692,7 +692,7 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
692 menuver |= DN_MENUVER_UIC; 692 menuver |= DN_MENUVER_UIC;
693 693
694 *skb_put(skb, 1) = menuver; /* Menu Version */ 694 *skb_put(skb, 1) = menuver; /* Menu Version */
695 695
696 aux = scp->accessdata.acc_userl; 696 aux = scp->accessdata.acc_userl;
697 *skb_put(skb, 1) = aux; 697 *skb_put(skb, 1) = aux;
698 if (aux > 0) 698 if (aux > 0)
@@ -718,6 +718,6 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
718 718
719 cb->rt_flags = DN_RT_F_RQR; 719 cb->rt_flags = DN_RT_F_RQR;
720 720
721 dn_nsp_send(skb); 721 dn_nsp_send(skb);
722} 722}
723 723
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9881933167bd..efccc42ff1c6 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -43,7 +43,7 @@
43 43
44/****************************************************************************** 44/******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
46 46
47 This program is free software; you can redistribute it and/or modify 47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by 48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or 49 the Free Software Foundation; either version 2 of the License, or
@@ -167,11 +167,11 @@ static void dn_dst_check_expire(unsigned long dummy)
167 while((rt=*rtp) != NULL) { 167 while((rt=*rtp) != NULL) {
168 if (atomic_read(&rt->u.dst.__refcnt) || 168 if (atomic_read(&rt->u.dst.__refcnt) ||
169 (now - rt->u.dst.lastuse) < expire) { 169 (now - rt->u.dst.lastuse) < expire) {
170 rtp = &rt->u.rt_next; 170 rtp = &rt->u.dst.dn_next;
171 continue; 171 continue;
172 } 172 }
173 *rtp = rt->u.rt_next; 173 *rtp = rt->u.dst.dn_next;
174 rt->u.rt_next = NULL; 174 rt->u.dst.dn_next = NULL;
175 dnrt_free(rt); 175 dnrt_free(rt);
176 } 176 }
177 spin_unlock(&dn_rt_hash_table[i].lock); 177 spin_unlock(&dn_rt_hash_table[i].lock);
@@ -198,11 +198,11 @@ static int dn_dst_gc(void)
198 while((rt=*rtp) != NULL) { 198 while((rt=*rtp) != NULL) {
199 if (atomic_read(&rt->u.dst.__refcnt) || 199 if (atomic_read(&rt->u.dst.__refcnt) ||
200 (now - rt->u.dst.lastuse) < expire) { 200 (now - rt->u.dst.lastuse) < expire) {
201 rtp = &rt->u.rt_next; 201 rtp = &rt->u.dst.dn_next;
202 continue; 202 continue;
203 } 203 }
204 *rtp = rt->u.rt_next; 204 *rtp = rt->u.dst.dn_next;
205 rt->u.rt_next = NULL; 205 rt->u.dst.dn_next = NULL;
206 dnrt_drop(rt); 206 dnrt_drop(rt);
207 break; 207 break;
208 } 208 }
@@ -246,7 +246,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
246 } 246 }
247} 247}
248 248
249/* 249/*
250 * When a route has been marked obsolete. (e.g. routing cache flush) 250 * When a route has been marked obsolete. (e.g. routing cache flush)
251 */ 251 */
252static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) 252static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
@@ -286,8 +286,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
286 while((rth = *rthp) != NULL) { 286 while((rth = *rthp) != NULL) {
287 if (compare_keys(&rth->fl, &rt->fl)) { 287 if (compare_keys(&rth->fl, &rt->fl)) {
288 /* Put it first */ 288 /* Put it first */
289 *rthp = rth->u.rt_next; 289 *rthp = rth->u.dst.dn_next;
290 rcu_assign_pointer(rth->u.rt_next, 290 rcu_assign_pointer(rth->u.dst.dn_next,
291 dn_rt_hash_table[hash].chain); 291 dn_rt_hash_table[hash].chain);
292 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 292 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
293 293
@@ -300,12 +300,12 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
300 *rp = rth; 300 *rp = rth;
301 return 0; 301 return 0;
302 } 302 }
303 rthp = &rth->u.rt_next; 303 rthp = &rth->u.dst.dn_next;
304 } 304 }
305 305
306 rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain); 306 rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
307 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 307 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
308 308
309 dst_hold(&rt->u.dst); 309 dst_hold(&rt->u.dst);
310 rt->u.dst.__use++; 310 rt->u.dst.__use++;
311 rt->u.dst.lastuse = now; 311 rt->u.dst.lastuse = now;
@@ -326,8 +326,8 @@ void dn_run_flush(unsigned long dummy)
326 goto nothing_to_declare; 326 goto nothing_to_declare;
327 327
328 for(; rt; rt=next) { 328 for(; rt; rt=next) {
329 next = rt->u.rt_next; 329 next = rt->u.dst.dn_next;
330 rt->u.rt_next = NULL; 330 rt->u.dst.dn_next = NULL;
331 dst_free((struct dst_entry *)rt); 331 dst_free((struct dst_entry *)rt);
332 } 332 }
333 333
@@ -506,23 +506,23 @@ static int dn_route_rx_long(struct sk_buff *skb)
506 skb_pull(skb, 20); 506 skb_pull(skb, 20);
507 skb->h.raw = skb->data; 507 skb->h.raw = skb->data;
508 508
509 /* Destination info */ 509 /* Destination info */
510 ptr += 2; 510 ptr += 2;
511 cb->dst = dn_eth2dn(ptr); 511 cb->dst = dn_eth2dn(ptr);
512 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 512 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
513 goto drop_it; 513 goto drop_it;
514 ptr += 6; 514 ptr += 6;
515 515
516 516
517 /* Source info */ 517 /* Source info */
518 ptr += 2; 518 ptr += 2;
519 cb->src = dn_eth2dn(ptr); 519 cb->src = dn_eth2dn(ptr);
520 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 520 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
521 goto drop_it; 521 goto drop_it;
522 ptr += 6; 522 ptr += 6;
523 /* Other junk */ 523 /* Other junk */
524 ptr++; 524 ptr++;
525 cb->hops = *ptr++; /* Visit Count */ 525 cb->hops = *ptr++; /* Visit Count */
526 526
527 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 527 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
528 528
@@ -545,16 +545,16 @@ static int dn_route_rx_short(struct sk_buff *skb)
545 skb->h.raw = skb->data; 545 skb->h.raw = skb->data;
546 546
547 cb->dst = *(__le16 *)ptr; 547 cb->dst = *(__le16 *)ptr;
548 ptr += 2; 548 ptr += 2;
549 cb->src = *(__le16 *)ptr; 549 cb->src = *(__le16 *)ptr;
550 ptr += 2; 550 ptr += 2;
551 cb->hops = *ptr & 0x3f; 551 cb->hops = *ptr & 0x3f;
552 552
553 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 553 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
554 554
555drop_it: 555drop_it:
556 kfree_skb(skb); 556 kfree_skb(skb);
557 return NET_RX_DROP; 557 return NET_RX_DROP;
558} 558}
559 559
560static int dn_route_discard(struct sk_buff *skb) 560static int dn_route_discard(struct sk_buff *skb)
@@ -626,20 +626,20 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
626 cb->rt_flags = flags; 626 cb->rt_flags = flags;
627 627
628 if (decnet_debug_level & 1) 628 if (decnet_debug_level & 1)
629 printk(KERN_DEBUG 629 printk(KERN_DEBUG
630 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", 630 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
631 (int)flags, (dev) ? dev->name : "???", len, skb->len, 631 (int)flags, (dev) ? dev->name : "???", len, skb->len,
632 padlen); 632 padlen);
633 633
634 if (flags & DN_RT_PKT_CNTL) { 634 if (flags & DN_RT_PKT_CNTL) {
635 if (unlikely(skb_linearize(skb))) 635 if (unlikely(skb_linearize(skb)))
636 goto dump_it; 636 goto dump_it;
637 637
638 switch(flags & DN_RT_CNTL_MSK) { 638 switch(flags & DN_RT_CNTL_MSK) {
639 case DN_RT_PKT_INIT: 639 case DN_RT_PKT_INIT:
640 dn_dev_init_pkt(skb); 640 dn_dev_init_pkt(skb);
641 break; 641 break;
642 case DN_RT_PKT_VERI: 642 case DN_RT_PKT_VERI:
643 dn_dev_veri_pkt(skb); 643 dn_dev_veri_pkt(skb);
644 break; 644 break;
645 } 645 }
@@ -648,31 +648,31 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
648 goto dump_it; 648 goto dump_it;
649 649
650 switch(flags & DN_RT_CNTL_MSK) { 650 switch(flags & DN_RT_CNTL_MSK) {
651 case DN_RT_PKT_HELO: 651 case DN_RT_PKT_HELO:
652 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello); 652 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
653 653
654 case DN_RT_PKT_L1RT: 654 case DN_RT_PKT_L1RT:
655 case DN_RT_PKT_L2RT: 655 case DN_RT_PKT_L2RT:
656 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); 656 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
657 case DN_RT_PKT_ERTH: 657 case DN_RT_PKT_ERTH:
658 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello); 658 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
659 659
660 case DN_RT_PKT_EEDH: 660 case DN_RT_PKT_EEDH:
661 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello); 661 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
662 } 662 }
663 } else { 663 } else {
664 if (dn->parms.state != DN_DEV_S_RU) 664 if (dn->parms.state != DN_DEV_S_RU)
665 goto dump_it; 665 goto dump_it;
666 666
667 skb_pull(skb, 1); /* Pull flags */ 667 skb_pull(skb, 1); /* Pull flags */
668 668
669 switch(flags & DN_RT_PKT_MSK) { 669 switch(flags & DN_RT_PKT_MSK) {
670 case DN_RT_PKT_LONG: 670 case DN_RT_PKT_LONG:
671 return dn_route_rx_long(skb); 671 return dn_route_rx_long(skb);
672 case DN_RT_PKT_SHORT: 672 case DN_RT_PKT_SHORT:
673 return dn_route_rx_short(skb); 673 return dn_route_rx_short(skb);
674 } 674 }
675 } 675 }
676 676
677dump_it: 677dump_it:
678 kfree_skb(skb); 678 kfree_skb(skb);
@@ -815,8 +815,8 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
815 rt->u.dst.neighbour = n; 815 rt->u.dst.neighbour = n;
816 } 816 }
817 817
818 if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || 818 if (rt->u.dst.metrics[RTAX_MTU-1] == 0 ||
819 rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) 819 rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu)
820 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 820 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
821 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); 821 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst));
822 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 || 822 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 ||
@@ -876,7 +876,7 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re
876 876
877static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 877static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
878{ 878{
879 struct flowi fl = { .nl_u = { .dn_u = 879 struct flowi fl = { .nl_u = { .dn_u =
880 { .daddr = oldflp->fld_dst, 880 { .daddr = oldflp->fld_dst,
881 .saddr = oldflp->fld_src, 881 .saddr = oldflp->fld_src,
882 .scope = RT_SCOPE_UNIVERSE, 882 .scope = RT_SCOPE_UNIVERSE,
@@ -899,7 +899,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
899 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 899 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
900 " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst), 900 " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst),
901 dn_ntohs(oldflp->fld_src), 901 dn_ntohs(oldflp->fld_src),
902 oldflp->mark, loopback_dev.ifindex, oldflp->oif); 902 oldflp->mark, loopback_dev.ifindex, oldflp->oif);
903 903
904 /* If we have an output interface, verify its a DECnet device */ 904 /* If we have an output interface, verify its a DECnet device */
905 if (oldflp->oif) { 905 if (oldflp->oif) {
@@ -982,19 +982,19 @@ source_ok:
982 if (err != -ESRCH) 982 if (err != -ESRCH)
983 goto out; 983 goto out;
984 /* 984 /*
985 * Here the fallback is basically the standard algorithm for 985 * Here the fallback is basically the standard algorithm for
986 * routing in endnodes which is described in the DECnet routing 986 * routing in endnodes which is described in the DECnet routing
987 * docs 987 * docs
988 * 988 *
989 * If we are not trying hard, look in neighbour cache. 989 * If we are not trying hard, look in neighbour cache.
990 * The result is tested to ensure that if a specific output 990 * The result is tested to ensure that if a specific output
991 * device/source address was requested, then we honour that 991 * device/source address was requested, then we honour that
992 * here 992 * here
993 */ 993 */
994 if (!try_hard) { 994 if (!try_hard) {
995 neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst); 995 neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst);
996 if (neigh) { 996 if (neigh) {
997 if ((oldflp->oif && 997 if ((oldflp->oif &&
998 (neigh->dev->ifindex != oldflp->oif)) || 998 (neigh->dev->ifindex != oldflp->oif)) ||
999 (oldflp->fld_src && 999 (oldflp->fld_src &&
1000 (!dn_dev_islocal(neigh->dev, 1000 (!dn_dev_islocal(neigh->dev,
@@ -1044,7 +1044,7 @@ select_source:
1044 if (fl.fld_src == 0) { 1044 if (fl.fld_src == 0) {
1045 fl.fld_src = dnet_select_source(dev_out, gateway, 1045 fl.fld_src = dnet_select_source(dev_out, gateway,
1046 res.type == RTN_LOCAL ? 1046 res.type == RTN_LOCAL ?
1047 RT_SCOPE_HOST : 1047 RT_SCOPE_HOST :
1048 RT_SCOPE_LINK); 1048 RT_SCOPE_LINK);
1049 if (fl.fld_src == 0 && res.type != RTN_LOCAL) 1049 if (fl.fld_src == 0 && res.type != RTN_LOCAL)
1050 goto e_addr; 1050 goto e_addr;
@@ -1074,14 +1074,14 @@ select_source:
1074 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1074 if (res.fi->fib_nhs > 1 && fl.oif == 0)
1075 dn_fib_select_multipath(&fl, &res); 1075 dn_fib_select_multipath(&fl, &res);
1076 1076
1077 /* 1077 /*
1078 * We could add some logic to deal with default routes here and 1078 * We could add some logic to deal with default routes here and
1079 * get rid of some of the special casing above. 1079 * get rid of some of the special casing above.
1080 */ 1080 */
1081 1081
1082 if (!fl.fld_src) 1082 if (!fl.fld_src)
1083 fl.fld_src = DN_FIB_RES_PREFSRC(res); 1083 fl.fld_src = DN_FIB_RES_PREFSRC(res);
1084 1084
1085 if (dev_out) 1085 if (dev_out)
1086 dev_put(dev_out); 1086 dev_put(dev_out);
1087 dev_out = DN_FIB_RES_DEV(res); 1087 dev_out = DN_FIB_RES_DEV(res);
@@ -1144,8 +1144,8 @@ out:
1144 return err; 1144 return err;
1145 1145
1146e_addr: 1146e_addr:
1147 err = -EADDRNOTAVAIL; 1147 err = -EADDRNOTAVAIL;
1148 goto done; 1148 goto done;
1149e_inval: 1149e_inval:
1150 err = -EINVAL; 1150 err = -EINVAL;
1151 goto done; 1151 goto done;
@@ -1169,7 +1169,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1169 if (!(flags & MSG_TRYHARD)) { 1169 if (!(flags & MSG_TRYHARD)) {
1170 rcu_read_lock_bh(); 1170 rcu_read_lock_bh();
1171 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1171 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
1172 rt = rcu_dereference(rt->u.rt_next)) { 1172 rt = rcu_dereference(rt->u.dst.dn_next)) {
1173 if ((flp->fld_dst == rt->fl.fld_dst) && 1173 if ((flp->fld_dst == rt->fl.fld_dst) &&
1174 (flp->fld_src == rt->fl.fld_src) && 1174 (flp->fld_src == rt->fl.fld_src) &&
1175 (flp->mark == rt->fl.mark) && 1175 (flp->mark == rt->fl.mark) &&
@@ -1223,7 +1223,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1223 int flags = 0; 1223 int flags = 0;
1224 __le16 gateway = 0; 1224 __le16 gateway = 0;
1225 __le16 local_src = 0; 1225 __le16 local_src = 0;
1226 struct flowi fl = { .nl_u = { .dn_u = 1226 struct flowi fl = { .nl_u = { .dn_u =
1227 { .daddr = cb->dst, 1227 { .daddr = cb->dst,
1228 .saddr = cb->src, 1228 .saddr = cb->src,
1229 .scope = RT_SCOPE_UNIVERSE, 1229 .scope = RT_SCOPE_UNIVERSE,
@@ -1311,7 +1311,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1311 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1311 if (res.fi->fib_nhs > 1 && fl.oif == 0)
1312 dn_fib_select_multipath(&fl, &res); 1312 dn_fib_select_multipath(&fl, &res);
1313 1313
1314 /* 1314 /*
1315 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT 1315 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1316 * flag as a hint to set the intra-ethernet bit when 1316 * flag as a hint to set the intra-ethernet bit when
1317 * forwarding. If we've got NAT in operation, we don't do 1317 * forwarding. If we've got NAT in operation, we don't do
@@ -1443,9 +1443,9 @@ int dn_route_input(struct sk_buff *skb)
1443 1443
1444 rcu_read_lock(); 1444 rcu_read_lock();
1445 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1445 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
1446 rt = rcu_dereference(rt->u.rt_next)) { 1446 rt = rcu_dereference(rt->u.dst.dn_next)) {
1447 if ((rt->fl.fld_src == cb->src) && 1447 if ((rt->fl.fld_src == cb->src) &&
1448 (rt->fl.fld_dst == cb->dst) && 1448 (rt->fl.fld_dst == cb->dst) &&
1449 (rt->fl.oif == 0) && 1449 (rt->fl.oif == 0) &&
1450 (rt->fl.mark == skb->mark) && 1450 (rt->fl.mark == skb->mark) &&
1451 (rt->fl.iif == cb->iif)) { 1451 (rt->fl.iif == cb->iif)) {
@@ -1514,8 +1514,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1514 1514
1515nlmsg_failure: 1515nlmsg_failure:
1516rtattr_failure: 1516rtattr_failure:
1517 skb_trim(skb, b - skb->data); 1517 skb_trim(skb, b - skb->data);
1518 return -1; 1518 return -1;
1519} 1519}
1520 1520
1521/* 1521/*
@@ -1627,12 +1627,12 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1627 rcu_read_lock_bh(); 1627 rcu_read_lock_bh();
1628 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1628 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
1629 rt; 1629 rt;
1630 rt = rcu_dereference(rt->u.rt_next), idx++) { 1630 rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
1631 if (idx < s_idx) 1631 if (idx < s_idx)
1632 continue; 1632 continue;
1633 skb->dst = dst_clone(&rt->u.dst); 1633 skb->dst = dst_clone(&rt->u.dst);
1634 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1634 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
1635 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1635 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1636 1, NLM_F_MULTI) <= 0) { 1636 1, NLM_F_MULTI) <= 0) {
1637 dst_release(xchg(&skb->dst, NULL)); 1637 dst_release(xchg(&skb->dst, NULL));
1638 rcu_read_unlock_bh(); 1638 rcu_read_unlock_bh();
@@ -1673,7 +1673,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1673{ 1673{
1674 struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private); 1674 struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private);
1675 1675
1676 rt = rt->u.rt_next; 1676 rt = rt->u.dst.dn_next;
1677 while(!rt) { 1677 while(!rt) {
1678 rcu_read_unlock_bh(); 1678 rcu_read_unlock_bh();
1679 if (--s->bucket < 0) 1679 if (--s->bucket < 0)
@@ -1721,7 +1721,7 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1721 rt->u.dst.__use, 1721 rt->u.dst.__use,
1722 (int) dst_metric(&rt->u.dst, RTAX_RTT)); 1722 (int) dst_metric(&rt->u.dst, RTAX_RTT));
1723 return 0; 1723 return 0;
1724} 1724}
1725 1725
1726static struct seq_operations dn_rt_cache_seq_ops = { 1726static struct seq_operations dn_rt_cache_seq_ops = {
1727 .start = dn_rt_cache_seq_start, 1727 .start = dn_rt_cache_seq_start,
@@ -1778,38 +1778,38 @@ void __init dn_route_init(void)
1778 for(order = 0; (1UL << order) < goal; order++) 1778 for(order = 0; (1UL << order) < goal; order++)
1779 /* NOTHING */; 1779 /* NOTHING */;
1780 1780
1781 /* 1781 /*
1782 * Only want 1024 entries max, since the table is very, very unlikely 1782 * Only want 1024 entries max, since the table is very, very unlikely
1783 * to be larger than that. 1783 * to be larger than that.
1784 */ 1784 */
1785 while(order && ((((1UL << order) * PAGE_SIZE) / 1785 while(order && ((((1UL << order) * PAGE_SIZE) /
1786 sizeof(struct dn_rt_hash_bucket)) >= 2048)) 1786 sizeof(struct dn_rt_hash_bucket)) >= 2048))
1787 order--; 1787 order--;
1788 1788
1789 do { 1789 do {
1790 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / 1790 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
1791 sizeof(struct dn_rt_hash_bucket); 1791 sizeof(struct dn_rt_hash_bucket);
1792 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) 1792 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
1793 dn_rt_hash_mask--; 1793 dn_rt_hash_mask--;
1794 dn_rt_hash_table = (struct dn_rt_hash_bucket *) 1794 dn_rt_hash_table = (struct dn_rt_hash_bucket *)
1795 __get_free_pages(GFP_ATOMIC, order); 1795 __get_free_pages(GFP_ATOMIC, order);
1796 } while (dn_rt_hash_table == NULL && --order > 0); 1796 } while (dn_rt_hash_table == NULL && --order > 0);
1797 1797
1798 if (!dn_rt_hash_table) 1798 if (!dn_rt_hash_table)
1799 panic("Failed to allocate DECnet route cache hash table\n"); 1799 panic("Failed to allocate DECnet route cache hash table\n");
1800 1800
1801 printk(KERN_INFO 1801 printk(KERN_INFO
1802 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", 1802 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1803 dn_rt_hash_mask, 1803 dn_rt_hash_mask,
1804 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); 1804 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
1805 1805
1806 dn_rt_hash_mask--; 1806 dn_rt_hash_mask--;
1807 for(i = 0; i <= dn_rt_hash_mask; i++) { 1807 for(i = 0; i <= dn_rt_hash_mask; i++) {
1808 spin_lock_init(&dn_rt_hash_table[i].lock); 1808 spin_lock_init(&dn_rt_hash_table[i].lock);
1809 dn_rt_hash_table[i].chain = NULL; 1809 dn_rt_hash_table[i].chain = NULL;
1810 } 1810 }
1811 1811
1812 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1812 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1813 1813
1814 proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1814 proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
1815} 1815}
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 720501e1ae20..780a141f8342 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -60,7 +60,7 @@ struct dn_hash
60#define dz_prefix(key,dz) ((key).datum) 60#define dz_prefix(key,dz) ((key).datum)
61 61
62#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ 62#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
63 for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) 63 for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
64 64
65#define endfor_nexthops(fi) } 65#define endfor_nexthops(fi) }
66 66
@@ -290,82 +290,82 @@ static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
290} 290}
291 291
292static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 292static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
293 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, 293 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
294 struct dn_fib_info *fi, unsigned int flags) 294 struct dn_fib_info *fi, unsigned int flags)
295{ 295{
296 struct rtmsg *rtm; 296 struct rtmsg *rtm;
297 struct nlmsghdr *nlh; 297 struct nlmsghdr *nlh;
298 unsigned char *b = skb->tail; 298 unsigned char *b = skb->tail;
299 299
300 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); 300 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
301 rtm = NLMSG_DATA(nlh); 301 rtm = NLMSG_DATA(nlh);
302 rtm->rtm_family = AF_DECnet; 302 rtm->rtm_family = AF_DECnet;
303 rtm->rtm_dst_len = dst_len; 303 rtm->rtm_dst_len = dst_len;
304 rtm->rtm_src_len = 0; 304 rtm->rtm_src_len = 0;
305 rtm->rtm_tos = 0; 305 rtm->rtm_tos = 0;
306 rtm->rtm_table = tb_id; 306 rtm->rtm_table = tb_id;
307 RTA_PUT_U32(skb, RTA_TABLE, tb_id); 307 RTA_PUT_U32(skb, RTA_TABLE, tb_id);
308 rtm->rtm_flags = fi->fib_flags; 308 rtm->rtm_flags = fi->fib_flags;
309 rtm->rtm_scope = scope; 309 rtm->rtm_scope = scope;
310 rtm->rtm_type = type; 310 rtm->rtm_type = type;
311 if (rtm->rtm_dst_len) 311 if (rtm->rtm_dst_len)
312 RTA_PUT(skb, RTA_DST, 2, dst); 312 RTA_PUT(skb, RTA_DST, 2, dst);
313 rtm->rtm_protocol = fi->fib_protocol; 313 rtm->rtm_protocol = fi->fib_protocol;
314 if (fi->fib_priority) 314 if (fi->fib_priority)
315 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 315 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
316 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 316 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
317 goto rtattr_failure; 317 goto rtattr_failure;
318 if (fi->fib_nhs == 1) { 318 if (fi->fib_nhs == 1) {
319 if (fi->fib_nh->nh_gw) 319 if (fi->fib_nh->nh_gw)
320 RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw); 320 RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw);
321 if (fi->fib_nh->nh_oif) 321 if (fi->fib_nh->nh_oif)
322 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 322 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
323 } 323 }
324 if (fi->fib_nhs > 1) { 324 if (fi->fib_nhs > 1) {
325 struct rtnexthop *nhp; 325 struct rtnexthop *nhp;
326 struct rtattr *mp_head; 326 struct rtattr *mp_head;
327 if (skb_tailroom(skb) <= RTA_SPACE(0)) 327 if (skb_tailroom(skb) <= RTA_SPACE(0))
328 goto rtattr_failure; 328 goto rtattr_failure;
329 mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0)); 329 mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0));
330 330
331 for_nexthops(fi) { 331 for_nexthops(fi) {
332 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 332 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
333 goto rtattr_failure; 333 goto rtattr_failure;
334 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 334 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
335 nhp->rtnh_flags = nh->nh_flags & 0xFF; 335 nhp->rtnh_flags = nh->nh_flags & 0xFF;
336 nhp->rtnh_hops = nh->nh_weight - 1; 336 nhp->rtnh_hops = nh->nh_weight - 1;
337 nhp->rtnh_ifindex = nh->nh_oif; 337 nhp->rtnh_ifindex = nh->nh_oif;
338 if (nh->nh_gw) 338 if (nh->nh_gw)
339 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); 339 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw);
340 nhp->rtnh_len = skb->tail - (unsigned char *)nhp; 340 nhp->rtnh_len = skb->tail - (unsigned char *)nhp;
341 } endfor_nexthops(fi); 341 } endfor_nexthops(fi);
342 mp_head->rta_type = RTA_MULTIPATH; 342 mp_head->rta_type = RTA_MULTIPATH;
343 mp_head->rta_len = skb->tail - (u8*)mp_head; 343 mp_head->rta_len = skb->tail - (u8*)mp_head;
344 } 344 }
345 345
346 nlh->nlmsg_len = skb->tail - b; 346 nlh->nlmsg_len = skb->tail - b;
347 return skb->len; 347 return skb->len;
348 348
349 349
350nlmsg_failure: 350nlmsg_failure:
351rtattr_failure: 351rtattr_failure:
352 skb_trim(skb, b - skb->data); 352 skb_trim(skb, b - skb->data);
353 return -EMSGSIZE; 353 return -EMSGSIZE;
354} 354}
355 355
356 356
357static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id, 357static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
358 struct nlmsghdr *nlh, struct netlink_skb_parms *req) 358 struct nlmsghdr *nlh, struct netlink_skb_parms *req)
359{ 359{
360 struct sk_buff *skb; 360 struct sk_buff *skb;
361 u32 pid = req ? req->pid : 0; 361 u32 pid = req ? req->pid : 0;
362 int err = -ENOBUFS; 362 int err = -ENOBUFS;
363 363
364 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL); 364 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
365 if (skb == NULL) 365 if (skb == NULL)
366 goto errout; 366 goto errout;
367 367
368 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, 368 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
369 f->fn_type, f->fn_scope, &f->fn_key, z, 369 f->fn_type, f->fn_scope, &f->fn_key, z,
370 DN_FIB_INFO(f), 0); 370 DN_FIB_INFO(f), 0);
371 if (err < 0) { 371 if (err < 0) {
@@ -380,7 +380,7 @@ errout:
380 rtnl_set_sk_err(RTNLGRP_DECnet_ROUTE, err); 380 rtnl_set_sk_err(RTNLGRP_DECnet_ROUTE, err);
381} 381}
382 382
383static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, 383static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
384 struct netlink_callback *cb, 384 struct netlink_callback *cb,
385 struct dn_fib_table *tb, 385 struct dn_fib_table *tb,
386 struct dn_zone *dz, 386 struct dn_zone *dz,
@@ -394,12 +394,12 @@ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
394 continue; 394 continue;
395 if (f->fn_state & DN_S_ZOMBIE) 395 if (f->fn_state & DN_S_ZOMBIE)
396 continue; 396 continue;
397 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid, 397 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
398 cb->nlh->nlmsg_seq, 398 cb->nlh->nlmsg_seq,
399 RTM_NEWROUTE, 399 RTM_NEWROUTE,
400 tb->n, 400 tb->n,
401 (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type, 401 (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type,
402 f->fn_scope, &f->fn_key, dz->dz_order, 402 f->fn_scope, &f->fn_key, dz->dz_order,
403 f->fn_info, NLM_F_MULTI) < 0) { 403 f->fn_info, NLM_F_MULTI) < 0) {
404 cb->args[4] = i; 404 cb->args[4] = i;
405 return -1; 405 return -1;
@@ -409,7 +409,7 @@ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
409 return skb->len; 409 return skb->len;
410} 410}
411 411
412static __inline__ int dn_hash_dump_zone(struct sk_buff *skb, 412static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
413 struct netlink_callback *cb, 413 struct netlink_callback *cb,
414 struct dn_fib_table *tb, 414 struct dn_fib_table *tb,
415 struct dn_zone *dz) 415 struct dn_zone *dz)
@@ -433,10 +433,10 @@ static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
433 return skb->len; 433 return skb->len;
434} 434}
435 435
436static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, 436static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
437 struct netlink_callback *cb) 437 struct netlink_callback *cb)
438{ 438{
439 int m, s_m; 439 int m, s_m;
440 struct dn_zone *dz; 440 struct dn_zone *dz;
441 struct dn_hash *table = (struct dn_hash *)tb->data; 441 struct dn_hash *table = (struct dn_hash *)tb->data;
442 442
@@ -457,7 +457,7 @@ static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
457 read_unlock(&dn_fib_tables_lock); 457 read_unlock(&dn_fib_tables_lock);
458 cb->args[2] = m; 458 cb->args[2] = m;
459 459
460 return skb->len; 460 return skb->len;
461} 461}
462 462
463int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) 463int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -482,7 +482,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
482 goto next; 482 goto next;
483 if (dumped) 483 if (dumped)
484 memset(&cb->args[2], 0, sizeof(cb->args) - 484 memset(&cb->args[2], 0, sizeof(cb->args) -
485 2 * sizeof(cb->args[0])); 485 2 * sizeof(cb->args[0]));
486 if (tb->dump(tb, skb, cb) < 0) 486 if (tb->dump(tb, skb, cb) < 0)
487 goto out; 487 goto out;
488 dumped = 1; 488 dumped = 1;
@@ -503,13 +503,13 @@ static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct
503 struct dn_fib_node *new_f, *f, **fp, **del_fp; 503 struct dn_fib_node *new_f, *f, **fp, **del_fp;
504 struct dn_zone *dz; 504 struct dn_zone *dz;
505 struct dn_fib_info *fi; 505 struct dn_fib_info *fi;
506 int z = r->rtm_dst_len; 506 int z = r->rtm_dst_len;
507 int type = r->rtm_type; 507 int type = r->rtm_type;
508 dn_fib_key_t key; 508 dn_fib_key_t key;
509 int err; 509 int err;
510 510
511 if (z > 16) 511 if (z > 16)
512 return -EINVAL; 512 return -EINVAL;
513 513
514 dz = table->dh_zones[z]; 514 dz = table->dh_zones[z];
515 if (!dz && !(dz = dn_new_zone(table, z))) 515 if (!dz && !(dz = dn_new_zone(table, z)))
@@ -524,8 +524,8 @@ static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct
524 key = dz_key(dst, dz); 524 key = dz_key(dst, dz);
525 } 525 }
526 526
527 if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL) 527 if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL)
528 return err; 528 return err;
529 529
530 if (dz->dz_nent > (dz->dz_divisor << 2) && 530 if (dz->dz_nent > (dz->dz_divisor << 2) &&
531 dz->dz_divisor > DN_MAX_DIVISOR && 531 dz->dz_divisor > DN_MAX_DIVISOR &&
@@ -624,9 +624,9 @@ replace:
624 dn_rt_cache_flush(-1); 624 dn_rt_cache_flush(-1);
625 } 625 }
626 626
627 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req); 627 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
628 628
629 return 0; 629 return 0;
630out: 630out:
631 dn_fib_release_info(fi); 631 dn_fib_release_info(fi);
632 return err; 632 return err;
@@ -637,14 +637,14 @@ static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct
637{ 637{
638 struct dn_hash *table = (struct dn_hash*)tb->data; 638 struct dn_hash *table = (struct dn_hash*)tb->data;
639 struct dn_fib_node **fp, **del_fp, *f; 639 struct dn_fib_node **fp, **del_fp, *f;
640 int z = r->rtm_dst_len; 640 int z = r->rtm_dst_len;
641 struct dn_zone *dz; 641 struct dn_zone *dz;
642 dn_fib_key_t key; 642 dn_fib_key_t key;
643 int matched; 643 int matched;
644 644
645 645
646 if (z > 16) 646 if (z > 16)
647 return -EINVAL; 647 return -EINVAL;
648 648
649 if ((dz = table->dh_zones[z]) == NULL) 649 if ((dz = table->dh_zones[z]) == NULL)
650 return -ESRCH; 650 return -ESRCH;
@@ -680,7 +680,7 @@ static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct
680 if (del_fp == NULL && 680 if (del_fp == NULL &&
681 (!r->rtm_type || f->fn_type == r->rtm_type) && 681 (!r->rtm_type || f->fn_type == r->rtm_type) &&
682 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) && 682 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
683 (!r->rtm_protocol || 683 (!r->rtm_protocol ||
684 fi->fib_protocol == r->rtm_protocol) && 684 fi->fib_protocol == r->rtm_protocol) &&
685 dn_fib_nh_match(r, n, rta, fi) == 0) 685 dn_fib_nh_match(r, n, rta, fi) == 0)
686 del_fp = fp; 686 del_fp = fp;
@@ -688,7 +688,7 @@ static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct
688 688
689 if (del_fp) { 689 if (del_fp) {
690 f = *del_fp; 690 f = *del_fp;
691 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); 691 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
692 692
693 if (matched != 1) { 693 if (matched != 1) {
694 write_lock_bh(&dn_fib_tables_lock); 694 write_lock_bh(&dn_fib_tables_lock);
@@ -712,7 +712,7 @@ static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct
712 return 0; 712 return 0;
713 } 713 }
714 714
715 return -ESRCH; 715 return -ESRCH;
716} 716}
717 717
718static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table) 718static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
@@ -759,7 +759,7 @@ static int dn_fib_table_flush(struct dn_fib_table *tb)
759 759
760static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res) 760static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res)
761{ 761{
762 int err; 762 int err;
763 struct dn_zone *dz; 763 struct dn_zone *dz;
764 struct dn_hash *t = (struct dn_hash *)tb->data; 764 struct dn_hash *t = (struct dn_hash *)tb->data;
765 765
@@ -788,7 +788,7 @@ static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp,
788 788
789 if (err == 0) { 789 if (err == 0) {
790 res->type = f->fn_type; 790 res->type = f->fn_type;
791 res->scope = f->fn_scope; 791 res->scope = f->fn_scope;
792 res->prefixlen = dz->dz_order; 792 res->prefixlen = dz->dz_order;
793 goto out; 793 goto out;
794 } 794 }
@@ -799,21 +799,21 @@ static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp,
799 err = 1; 799 err = 1;
800out: 800out:
801 read_unlock(&dn_fib_tables_lock); 801 read_unlock(&dn_fib_tables_lock);
802 return err; 802 return err;
803} 803}
804 804
805 805
806struct dn_fib_table *dn_fib_get_table(u32 n, int create) 806struct dn_fib_table *dn_fib_get_table(u32 n, int create)
807{ 807{
808 struct dn_fib_table *t; 808 struct dn_fib_table *t;
809 struct hlist_node *node; 809 struct hlist_node *node;
810 unsigned int h; 810 unsigned int h;
811 811
812 if (n < RT_TABLE_MIN) 812 if (n < RT_TABLE_MIN)
813 return NULL; 813 return NULL;
814 814
815 if (n > RT_TABLE_MAX) 815 if (n > RT_TABLE_MAX)
816 return NULL; 816 return NULL;
817 817
818 h = n & (DN_FIB_TABLE_HASHSZ - 1); 818 h = n & (DN_FIB_TABLE_HASHSZ - 1);
819 rcu_read_lock(); 819 rcu_read_lock();
@@ -825,54 +825,54 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create)
825 } 825 }
826 rcu_read_unlock(); 826 rcu_read_unlock();
827 827
828 if (!create) 828 if (!create)
829 return NULL; 829 return NULL;
830 830
831 if (in_interrupt() && net_ratelimit()) { 831 if (in_interrupt() && net_ratelimit()) {
832 printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); 832 printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
833 return NULL; 833 return NULL;
834 } 834 }
835 835
836 t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), 836 t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
837 GFP_KERNEL); 837 GFP_KERNEL);
838 if (t == NULL) 838 if (t == NULL)
839 return NULL; 839 return NULL;
840 840
841 t->n = n; 841 t->n = n;
842 t->insert = dn_fib_table_insert; 842 t->insert = dn_fib_table_insert;
843 t->delete = dn_fib_table_delete; 843 t->delete = dn_fib_table_delete;
844 t->lookup = dn_fib_table_lookup; 844 t->lookup = dn_fib_table_lookup;
845 t->flush = dn_fib_table_flush; 845 t->flush = dn_fib_table_flush;
846 t->dump = dn_fib_table_dump; 846 t->dump = dn_fib_table_dump;
847 hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]); 847 hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
848 848
849 return t; 849 return t;
850} 850}
851 851
852struct dn_fib_table *dn_fib_empty_table(void) 852struct dn_fib_table *dn_fib_empty_table(void)
853{ 853{
854 u32 id; 854 u32 id;
855 855
856 for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++) 856 for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
857 if (dn_fib_get_table(id, 0) == NULL) 857 if (dn_fib_get_table(id, 0) == NULL)
858 return dn_fib_get_table(id, 1); 858 return dn_fib_get_table(id, 1);
859 return NULL; 859 return NULL;
860} 860}
861 861
862void dn_fib_flush(void) 862void dn_fib_flush(void)
863{ 863{
864 int flushed = 0; 864 int flushed = 0;
865 struct dn_fib_table *tb; 865 struct dn_fib_table *tb;
866 struct hlist_node *node; 866 struct hlist_node *node;
867 unsigned int h; 867 unsigned int h;
868 868
869 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { 869 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
870 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) 870 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist)
871 flushed += tb->flush(tb); 871 flushed += tb->flush(tb);
872 } 872 }
873 873
874 if (flushed) 874 if (flushed)
875 dn_rt_cache_flush(-1); 875 dn_rt_cache_flush(-1);
876} 876}
877 877
878void __init dn_fib_table_init(void) 878void __init dn_fib_table_init(void)
@@ -892,7 +892,7 @@ void __exit dn_fib_table_cleanup(void)
892 write_lock(&dn_fib_tables_lock); 892 write_lock(&dn_fib_tables_lock);
893 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { 893 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
894 hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h], 894 hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h],
895 hlist) { 895 hlist) {
896 hlist_del(&t->hlist); 896 hlist_del(&t->hlist);
897 kfree(t); 897 kfree(t);
898 } 898 }
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 8b99bd33540d..0e62def05a58 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -138,7 +138,7 @@ static int __init dn_rtmsg_init(void)
138 int rv = 0; 138 int rv = 0;
139 139
140 dnrmg = netlink_kernel_create(NETLINK_DNRTMSG, DNRNG_NLGRP_MAX, 140 dnrmg = netlink_kernel_create(NETLINK_DNRTMSG, DNRNG_NLGRP_MAX,
141 dnrmg_receive_user_sk, THIS_MODULE); 141 dnrmg_receive_user_sk, THIS_MODULE);
142 if (dnrmg == NULL) { 142 if (dnrmg == NULL) {
143 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); 143 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
144 return -ENOMEM; 144 return -ENOMEM;
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index a4065eb1341e..a6c067b593ab 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -164,7 +164,7 @@ static int dn_node_address_strategy(ctl_table *table, int __user *name, int nlen
164 return 0; 164 return 0;
165} 165}
166 166
167static int dn_node_address_handler(ctl_table *table, int write, 167static int dn_node_address_handler(ctl_table *table, int write,
168 struct file *filp, 168 struct file *filp,
169 void __user *buffer, 169 void __user *buffer,
170 size_t *lenp, loff_t *ppos) 170 size_t *lenp, loff_t *ppos)
@@ -240,7 +240,7 @@ static int dn_def_dev_strategy(ctl_table *table, int __user *name, int nlen,
240 } 240 }
241 241
242 namel = strlen(devname) + 1; 242 namel = strlen(devname) + 1;
243 if (len > namel) len = namel; 243 if (len > namel) len = namel;
244 244
245 if (copy_to_user(oldval, devname, len)) 245 if (copy_to_user(oldval, devname, len))
246 return -EFAULT; 246 return -EFAULT;
@@ -275,7 +275,7 @@ static int dn_def_dev_strategy(ctl_table *table, int __user *name, int nlen,
275} 275}
276 276
277 277
278static int dn_def_dev_handler(ctl_table *table, int write, 278static int dn_def_dev_handler(ctl_table *table, int write,
279 struct file * filp, 279 struct file * filp,
280 void __user *buffer, 280 void __user *buffer,
281 size_t *lenp, loff_t *ppos) 281 size_t *lenp, loff_t *ppos)
@@ -341,17 +341,17 @@ static int dn_def_dev_handler(ctl_table *table, int write,
341 341
342static ctl_table dn_table[] = { 342static ctl_table dn_table[] = {
343 { 343 {
344 .ctl_name = NET_DECNET_NODE_ADDRESS, 344 .ctl_name = NET_DECNET_NODE_ADDRESS,
345 .procname = "node_address", 345 .procname = "node_address",
346 .maxlen = 7, 346 .maxlen = 7,
347 .mode = 0644, 347 .mode = 0644,
348 .proc_handler = dn_node_address_handler, 348 .proc_handler = dn_node_address_handler,
349 .strategy = dn_node_address_strategy, 349 .strategy = dn_node_address_strategy,
350 }, 350 },
351 { 351 {
352 .ctl_name = NET_DECNET_NODE_NAME, 352 .ctl_name = NET_DECNET_NODE_NAME,
353 .procname = "node_name", 353 .procname = "node_name",
354 .data = node_name, 354 .data = node_name,
355 .maxlen = 7, 355 .maxlen = 7,
356 .mode = 0644, 356 .mode = 0644,
357 .proc_handler = &proc_dostring, 357 .proc_handler = &proc_dostring,
@@ -359,8 +359,8 @@ static ctl_table dn_table[] = {
359 }, 359 },
360 { 360 {
361 .ctl_name = NET_DECNET_DEFAULT_DEVICE, 361 .ctl_name = NET_DECNET_DEFAULT_DEVICE,
362 .procname = "default_device", 362 .procname = "default_device",
363 .maxlen = 16, 363 .maxlen = 16,
364 .mode = 0644, 364 .mode = 0644,
365 .proc_handler = dn_def_dev_handler, 365 .proc_handler = dn_def_dev_handler,
366 .strategy = dn_def_dev_strategy, 366 .strategy = dn_def_dev_strategy,
@@ -432,32 +432,32 @@ static ctl_table dn_table[] = {
432 .extra2 = &max_decnet_no_fc_max_cwnd 432 .extra2 = &max_decnet_no_fc_max_cwnd
433 }, 433 },
434 { 434 {
435 .ctl_name = NET_DECNET_MEM, 435 .ctl_name = NET_DECNET_MEM,
436 .procname = "decnet_mem", 436 .procname = "decnet_mem",
437 .data = &sysctl_decnet_mem, 437 .data = &sysctl_decnet_mem,
438 .maxlen = sizeof(sysctl_decnet_mem), 438 .maxlen = sizeof(sysctl_decnet_mem),
439 .mode = 0644, 439 .mode = 0644,
440 .proc_handler = &proc_dointvec, 440 .proc_handler = &proc_dointvec,
441 .strategy = &sysctl_intvec, 441 .strategy = &sysctl_intvec,
442 }, 442 },
443 { 443 {
444 .ctl_name = NET_DECNET_RMEM, 444 .ctl_name = NET_DECNET_RMEM,
445 .procname = "decnet_rmem", 445 .procname = "decnet_rmem",
446 .data = &sysctl_decnet_rmem, 446 .data = &sysctl_decnet_rmem,
447 .maxlen = sizeof(sysctl_decnet_rmem), 447 .maxlen = sizeof(sysctl_decnet_rmem),
448 .mode = 0644, 448 .mode = 0644,
449 .proc_handler = &proc_dointvec, 449 .proc_handler = &proc_dointvec,
450 .strategy = &sysctl_intvec, 450 .strategy = &sysctl_intvec,
451 }, 451 },
452 { 452 {
453 .ctl_name = NET_DECNET_WMEM, 453 .ctl_name = NET_DECNET_WMEM,
454 .procname = "decnet_wmem", 454 .procname = "decnet_wmem",
455 .data = &sysctl_decnet_wmem, 455 .data = &sysctl_decnet_wmem,
456 .maxlen = sizeof(sysctl_decnet_wmem), 456 .maxlen = sizeof(sysctl_decnet_wmem),
457 .mode = 0644, 457 .mode = 0644,
458 .proc_handler = &proc_dointvec, 458 .proc_handler = &proc_dointvec,
459 .strategy = &sysctl_intvec, 459 .strategy = &sysctl_intvec,
460 }, 460 },
461 { 461 {
462 .ctl_name = NET_DECNET_DEBUG_LEVEL, 462 .ctl_name = NET_DECNET_DEBUG_LEVEL,
463 .procname = "debug", 463 .procname = "debug",
@@ -472,18 +472,18 @@ static ctl_table dn_table[] = {
472 472
473static ctl_table dn_dir_table[] = { 473static ctl_table dn_dir_table[] = {
474 { 474 {
475 .ctl_name = NET_DECNET, 475 .ctl_name = NET_DECNET,
476 .procname = "decnet", 476 .procname = "decnet",
477 .mode = 0555, 477 .mode = 0555,
478 .child = dn_table}, 478 .child = dn_table},
479 {0} 479 {0}
480}; 480};
481 481
482static ctl_table dn_root_table[] = { 482static ctl_table dn_root_table[] = {
483 { 483 {
484 .ctl_name = CTL_NET, 484 .ctl_name = CTL_NET,
485 .procname = "net", 485 .procname = "net",
486 .mode = 0555, 486 .mode = 0555,
487 .child = dn_dir_table 487 .child = dn_dir_table
488 }, 488 },
489 {0} 489 {0}
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 4d66aac13483..f2ce41434290 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -139,7 +139,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
139 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err); 139 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
140 140
141 /* 141 /*
142 * An error occurred so return it. Because skb_recv_datagram() 142 * An error occurred so return it. Because skb_recv_datagram()
143 * handles the blocking we don't see and worry about blocking 143 * handles the blocking we don't see and worry about blocking
144 * retries. 144 * retries.
145 */ 145 */
@@ -190,15 +190,15 @@ static int econet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
190 struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr; 190 struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
191 struct sock *sk; 191 struct sock *sk;
192 struct econet_sock *eo; 192 struct econet_sock *eo;
193 193
194 /* 194 /*
195 * Check legality 195 * Check legality
196 */ 196 */
197 197
198 if (addr_len < sizeof(struct sockaddr_ec) || 198 if (addr_len < sizeof(struct sockaddr_ec) ||
199 sec->sec_family != AF_ECONET) 199 sec->sec_family != AF_ECONET)
200 return -EINVAL; 200 return -EINVAL;
201 201
202 mutex_lock(&econet_mutex); 202 mutex_lock(&econet_mutex);
203 203
204 sk = sock->sk; 204 sk = sock->sk;
@@ -283,18 +283,18 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
283 int i; 283 int i;
284 mm_segment_t oldfs; 284 mm_segment_t oldfs;
285#endif 285#endif
286 286
287 /* 287 /*
288 * Check the flags. 288 * Check the flags.
289 */ 289 */
290 290
291 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) 291 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
292 return -EINVAL; 292 return -EINVAL;
293 293
294 /* 294 /*
295 * Get and verify the address. 295 * Get and verify the address.
296 */ 296 */
297 297
298 mutex_lock(&econet_mutex); 298 mutex_lock(&econet_mutex);
299 299
300 if (saddr == NULL) { 300 if (saddr == NULL) {
@@ -339,17 +339,17 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
339 unsigned short proto = 0; 339 unsigned short proto = 0;
340 340
341 dev_hold(dev); 341 dev_hold(dev);
342 342
343 skb = sock_alloc_send_skb(sk, len+LL_RESERVED_SPACE(dev), 343 skb = sock_alloc_send_skb(sk, len+LL_RESERVED_SPACE(dev),
344 msg->msg_flags & MSG_DONTWAIT, &err); 344 msg->msg_flags & MSG_DONTWAIT, &err);
345 if (skb==NULL) 345 if (skb==NULL)
346 goto out_unlock; 346 goto out_unlock;
347 347
348 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 348 skb_reserve(skb, LL_RESERVED_SPACE(dev));
349 skb->nh.raw = skb->data; 349 skb->nh.raw = skb->data;
350 350
351 eb = (struct ec_cb *)&skb->cb; 351 eb = (struct ec_cb *)&skb->cb;
352 352
353 /* BUG: saddr may be NULL */ 353 /* BUG: saddr may be NULL */
354 eb->cookie = saddr->cookie; 354 eb->cookie = saddr->cookie;
355 eb->sec = *saddr; 355 eb->sec = *saddr;
@@ -359,7 +359,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
359 int res; 359 int res;
360 struct ec_framehdr *fh; 360 struct ec_framehdr *fh;
361 err = -EINVAL; 361 err = -EINVAL;
362 res = dev->hard_header(skb, dev, ntohs(proto), 362 res = dev->hard_header(skb, dev, ntohs(proto),
363 &addr, NULL, len); 363 &addr, NULL, len);
364 /* Poke in our control byte and 364 /* Poke in our control byte and
365 port number. Hack, hack. */ 365 port number. Hack, hack. */
@@ -372,7 +372,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
372 } else if (res < 0) 372 } else if (res < 0)
373 goto out_free; 373 goto out_free;
374 } 374 }
375 375
376 /* Copy the data. Returns -EFAULT on error */ 376 /* Copy the data. Returns -EFAULT on error */
377 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); 377 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
378 skb->protocol = proto; 378 skb->protocol = proto;
@@ -380,15 +380,15 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
380 skb->priority = sk->sk_priority; 380 skb->priority = sk->sk_priority;
381 if (err) 381 if (err)
382 goto out_free; 382 goto out_free;
383 383
384 err = -ENETDOWN; 384 err = -ENETDOWN;
385 if (!(dev->flags & IFF_UP)) 385 if (!(dev->flags & IFF_UP))
386 goto out_free; 386 goto out_free;
387 387
388 /* 388 /*
389 * Now send it 389 * Now send it
390 */ 390 */
391 391
392 dev_queue_xmit(skb); 392 dev_queue_xmit(skb);
393 dev_put(dev); 393 dev_put(dev);
394 mutex_unlock(&econet_mutex); 394 mutex_unlock(&econet_mutex);
@@ -414,7 +414,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
414 mutex_unlock(&econet_mutex); 414 mutex_unlock(&econet_mutex);
415 return -ENETDOWN; /* No socket - can't send */ 415 return -ENETDOWN; /* No socket - can't send */
416 } 416 }
417 417
418 /* Make up a UDP datagram and hand it off to some higher intellect. */ 418 /* Make up a UDP datagram and hand it off to some higher intellect. */
419 419
420 memset(&udpdest, 0, sizeof(udpdest)); 420 memset(&udpdest, 0, sizeof(udpdest));
@@ -432,7 +432,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
432 idev = __in_dev_get_rcu(dev); 432 idev = __in_dev_get_rcu(dev);
433 if (idev) { 433 if (idev) {
434 if (idev->ifa_list) 434 if (idev->ifa_list)
435 network = ntohl(idev->ifa_list->ifa_address) & 435 network = ntohl(idev->ifa_list->ifa_address) &
436 0xffffff00; /* !!! */ 436 0xffffff00; /* !!! */
437 } 437 }
438 rcu_read_unlock(); 438 rcu_read_unlock();
@@ -470,7 +470,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
470 } 470 }
471 471
472 /* Get a skbuff (no data, just holds our cb information) */ 472 /* Get a skbuff (no data, just holds our cb information) */
473 if ((skb = sock_alloc_send_skb(sk, 0, 473 if ((skb = sock_alloc_send_skb(sk, 0,
474 msg->msg_flags & MSG_DONTWAIT, 474 msg->msg_flags & MSG_DONTWAIT,
475 &err)) == NULL) { 475 &err)) == NULL) {
476 mutex_unlock(&econet_mutex); 476 mutex_unlock(&econet_mutex);
@@ -660,7 +660,7 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
660 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 660 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
661 return -EFAULT; 661 return -EFAULT;
662 662
663 if ((dev = dev_get_by_name(ifr.ifr_name)) == NULL) 663 if ((dev = dev_get_by_name(ifr.ifr_name)) == NULL)
664 return -ENODEV; 664 return -ENODEV;
665 665
666 sec = (struct sockaddr_ec *)&ifr.ifr_addr; 666 sec = (struct sockaddr_ec *)&ifr.ifr_addr;
@@ -754,7 +754,7 @@ static const struct proto_ops econet_ops = {
754 .connect = sock_no_connect, 754 .connect = sock_no_connect,
755 .socketpair = sock_no_socketpair, 755 .socketpair = sock_no_socketpair,
756 .accept = sock_no_accept, 756 .accept = sock_no_accept,
757 .getname = econet_getname, 757 .getname = econet_getname,
758 .poll = datagram_poll, 758 .poll = datagram_poll,
759 .ioctl = econet_ioctl, 759 .ioctl = econet_ioctl,
760 .listen = sock_no_listen, 760 .listen = sock_no_listen,
@@ -780,7 +780,7 @@ static struct sock *ec_listening_socket(unsigned char port, unsigned char
780 780
781 sk_for_each(sk, node, &econet_sklist) { 781 sk_for_each(sk, node, &econet_sklist) {
782 struct econet_sock *opt = ec_sk(sk); 782 struct econet_sock *opt = ec_sk(sk);
783 if ((opt->port == port || opt->port == 0) && 783 if ((opt->port == port || opt->port == 0) &&
784 (opt->station == station || opt->station == 0) && 784 (opt->station == station || opt->station == 0) &&
785 (opt->net == net || opt->net == 0)) 785 (opt->net == net || opt->net == 0))
786 goto found; 786 goto found;
@@ -815,7 +815,7 @@ static int ec_queue_packet(struct sock *sk, struct sk_buff *skb,
815 815
816#ifdef CONFIG_ECONET_AUNUDP 816#ifdef CONFIG_ECONET_AUNUDP
817/* 817/*
818 * Send an AUN protocol response. 818 * Send an AUN protocol response.
819 */ 819 */
820 820
821static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb) 821static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
@@ -828,7 +828,7 @@ static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
828 struct aunhdr ah = {.code = code, .cb = cb, .handle = seq}; 828 struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
829 struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)}; 829 struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
830 struct msghdr udpmsg; 830 struct msghdr udpmsg;
831 831
832 udpmsg.msg_name = (void *)&sin; 832 udpmsg.msg_name = (void *)&sin;
833 udpmsg.msg_namelen = sizeof(sin); 833 udpmsg.msg_namelen = sizeof(sin);
834 udpmsg.msg_control = NULL; 834 udpmsg.msg_control = NULL;
@@ -858,7 +858,7 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
858 if ((sk = ec_listening_socket(ah->port, stn, edev->net)) == NULL) 858 if ((sk = ec_listening_socket(ah->port, stn, edev->net)) == NULL)
859 goto bad; /* Nobody wants it */ 859 goto bad; /* Nobody wants it */
860 860
861 newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15, 861 newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
862 GFP_ATOMIC); 862 GFP_ATOMIC);
863 if (newskb == NULL) 863 if (newskb == NULL)
864 { 864 {
@@ -867,7 +867,7 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
867 goto bad; 867 goto bad;
868 } 868 }
869 869
870 memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah+1), 870 memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah+1),
871 len - sizeof(struct aunhdr)); 871 len - sizeof(struct aunhdr));
872 872
873 if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) 873 if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port))
@@ -990,7 +990,7 @@ static void ab_cleanup(unsigned long h)
990 struct ec_cb *eb = (struct ec_cb *)&skb->cb; 990 struct ec_cb *eb = (struct ec_cb *)&skb->cb;
991 if ((jiffies - eb->start) > eb->timeout) 991 if ((jiffies - eb->start) > eb->timeout)
992 { 992 {
993 tx_result(skb->sk, eb->cookie, 993 tx_result(skb->sk, eb->cookie,
994 ECTYPE_TRANSMIT_NOT_PRESENT); 994 ECTYPE_TRANSMIT_NOT_PRESENT);
995 skb_unlink(skb, &aun_queue); 995 skb_unlink(skb, &aun_queue);
996 kfree_skb(skb); 996 kfree_skb(skb);
@@ -1024,11 +1024,11 @@ static int __init aun_udp_initialise(void)
1024 printk("AUN: socket error %d\n", -error); 1024 printk("AUN: socket error %d\n", -error);
1025 return error; 1025 return error;
1026 } 1026 }
1027 1027
1028 udpsock->sk->sk_reuse = 1; 1028 udpsock->sk->sk_reuse = 1;
1029 udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it 1029 udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
1030 from interrupts */ 1030 from interrupts */
1031 1031
1032 error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin, 1032 error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
1033 sizeof(sin)); 1033 sizeof(sin));
1034 if (error < 0) 1034 if (error < 0)
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 2d31bf3f05c5..766a0b59c0d3 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -12,14 +12,14 @@
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Florian La Roche, <rzsfl@rz.uni-sb.de> 13 * Florian La Roche, <rzsfl@rz.uni-sb.de>
14 * Alan Cox, <gw4pts@gw4pts.ampr.org> 14 * Alan Cox, <gw4pts@gw4pts.ampr.org>
15 * 15 *
16 * Fixes: 16 * Fixes:
17 * Mr Linux : Arp problems 17 * Mr Linux : Arp problems
18 * Alan Cox : Generic queue tidyup (very tiny here) 18 * Alan Cox : Generic queue tidyup (very tiny here)
19 * Alan Cox : eth_header ntohs should be htons 19 * Alan Cox : eth_header ntohs should be htons
20 * Alan Cox : eth_rebuild_header missing an htons and 20 * Alan Cox : eth_rebuild_header missing an htons and
21 * minor other things. 21 * minor other things.
22 * Tegge : Arp bug fixes. 22 * Tegge : Arp bug fixes.
23 * Florian : Removed many unnecessary functions, code cleanup 23 * Florian : Removed many unnecessary functions, code cleanup
24 * and changes for new arp and skbuff. 24 * and changes for new arp and skbuff.
25 * Alan Cox : Redid header building to reflect new format. 25 * Alan Cox : Redid header building to reflect new format.
@@ -307,9 +307,9 @@ void ether_setup(struct net_device *dev)
307 dev->hard_header_len = ETH_HLEN; 307 dev->hard_header_len = ETH_HLEN;
308 dev->mtu = ETH_DATA_LEN; 308 dev->mtu = ETH_DATA_LEN;
309 dev->addr_len = ETH_ALEN; 309 dev->addr_len = ETH_ALEN;
310 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 310 dev->tx_queue_len = 1000; /* Ethernet wants good queues */
311 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 311 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
312 312
313 memset(dev->broadcast, 0xFF, ETH_ALEN); 313 memset(dev->broadcast, 0xFF, ETH_ALEN);
314 314
315} 315}
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index 7a95c3d81314..ec6d8851a061 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -97,7 +97,7 @@ static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len,
97 struct prism2_wep_data *wep = priv; 97 struct prism2_wep_data *wep = priv;
98 u32 klen, len; 98 u32 klen, len;
99 u8 *pos; 99 u8 *pos;
100 100
101 if (skb_headroom(skb) < 4 || skb->len < hdr_len) 101 if (skb_headroom(skb) < 4 || skb->len < hdr_len)
102 return -1; 102 return -1;
103 103
@@ -146,17 +146,17 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
146 /* other checks are in prism2_wep_build_iv */ 146 /* other checks are in prism2_wep_build_iv */
147 if (skb_tailroom(skb) < 4) 147 if (skb_tailroom(skb) < 4)
148 return -1; 148 return -1;
149 149
150 /* add the IV to the frame */ 150 /* add the IV to the frame */
151 if (prism2_wep_build_iv(skb, hdr_len, NULL, 0, priv)) 151 if (prism2_wep_build_iv(skb, hdr_len, NULL, 0, priv))
152 return -1; 152 return -1;
153 153
154 /* Copy the IV into the first 3 bytes of the key */ 154 /* Copy the IV into the first 3 bytes of the key */
155 memcpy(key, skb->data + hdr_len, 3); 155 memcpy(key, skb->data + hdr_len, 3);
156 156
157 /* Copy rest of the WEP key (the secret part) */ 157 /* Copy rest of the WEP key (the secret part) */
158 memcpy(key + 3, wep->key, wep->key_len); 158 memcpy(key + 3, wep->key, wep->key_len);
159 159
160 len = skb->len - hdr_len - 4; 160 len = skb->len - hdr_len - 4;
161 pos = skb->data + hdr_len + 4; 161 pos = skb->data + hdr_len + 4;
162 klen = 3 + wep->key_len; 162 klen = 3 + wep->key_len;
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index d97e5412e31b..4084909f6f92 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -663,7 +663,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
663 * any security data (IV, ICV, etc) that was left behind */ 663 * any security data (IV, ICV, etc) that was left behind */
664 if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) && 664 if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) &&
665 ieee->host_strip_iv_icv) { 665 ieee->host_strip_iv_icv) {
666 int trimlen = 0; 666 int trimlen = 0;
667 667
668 /* Top two-bits of byte 3 are the key index */ 668 /* Top two-bits of byte 3 are the key index */
669 if (skb->len >= hdrlen + 3) 669 if (skb->len >= hdrlen + 3)
@@ -852,7 +852,7 @@ void ieee80211_rx_any(struct ieee80211_device *ieee,
852 852
853 if ((fc & IEEE80211_FCTL_VERS) != 0) 853 if ((fc & IEEE80211_FCTL_VERS) != 0)
854 goto drop_free; 854 goto drop_free;
855 855
856 switch (fc & IEEE80211_FCTL_FTYPE) { 856 switch (fc & IEEE80211_FCTL_FTYPE) {
857 case IEEE80211_FTYPE_MGMT: 857 case IEEE80211_FTYPE_MGMT:
858 if (skb->len < sizeof(struct ieee80211_hdr_3addr)) 858 if (skb->len < sizeof(struct ieee80211_hdr_3addr))
@@ -1489,7 +1489,7 @@ static void update_network(struct ieee80211_network *dst,
1489 1489
1490 /* We only update the statistics if they were created by receiving 1490 /* We only update the statistics if they were created by receiving
1491 * the network information on the actual channel the network is on. 1491 * the network information on the actual channel the network is on.
1492 * 1492 *
1493 * This keeps beacons received on neighbor channels from bringing 1493 * This keeps beacons received on neighbor channels from bringing
1494 * down the signal level of an AP. */ 1494 * down the signal level of an AP. */
1495 if (dst->channel == src->stats.received_channel) 1495 if (dst->channel == src->stats.received_channel)
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index 854fc13cd78d..c55949e5c58a 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -54,7 +54,7 @@ Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
54 | | tion | (BSSID) | | | ence | data | | 54 | | tion | (BSSID) | | | ence | data | |
55 `--------------------------------------------------| |------' 55 `--------------------------------------------------| |------'
56Total: 28 non-data bytes `----.----' 56Total: 28 non-data bytes `----.----'
57 | 57 |
58 .- 'Frame data' expands, if WEP enabled, to <----------' 58 .- 'Frame data' expands, if WEP enabled, to <----------'
59 | 59 |
60 V 60 V
@@ -64,8 +64,8 @@ Bytes | 4 | 0-2296 | 4 |
64Desc. | IV | Encrypted | ICV | 64Desc. | IV | Encrypted | ICV |
65 | | Packet | | 65 | | Packet | |
66 `-----| |-----' 66 `-----| |-----'
67 `-----.-----' 67 `-----.-----'
68 | 68 |
69 .- 'Encrypted Packet' expands to 69 .- 'Encrypted Packet' expands to
70 | 70 |
71 V 71 V
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 5cb9cfd35397..40d7a55fe03e 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -754,7 +754,7 @@ int ieee80211_wx_set_auth(struct net_device *dev,
754 int err = 0; 754 int err = 0;
755 755
756 spin_lock_irqsave(&ieee->lock, flags); 756 spin_lock_irqsave(&ieee->lock, flags);
757 757
758 switch (wrqu->param.flags & IW_AUTH_INDEX) { 758 switch (wrqu->param.flags & IW_AUTH_INDEX) {
759 case IW_AUTH_WPA_VERSION: 759 case IW_AUTH_WPA_VERSION:
760 case IW_AUTH_CIPHER_PAIRWISE: 760 case IW_AUTH_CIPHER_PAIRWISE:
@@ -799,7 +799,7 @@ int ieee80211_wx_get_auth(struct net_device *dev,
799 int err = 0; 799 int err = 0;
800 800
801 spin_lock_irqsave(&ieee->lock, flags); 801 spin_lock_irqsave(&ieee->lock, flags);
802 802
803 switch (wrqu->param.flags & IW_AUTH_INDEX) { 803 switch (wrqu->param.flags & IW_AUTH_INDEX) {
804 case IW_AUTH_WPA_VERSION: 804 case IW_AUTH_WPA_VERSION:
805 case IW_AUTH_CIPHER_PAIRWISE: 805 case IW_AUTH_CIPHER_PAIRWISE:
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index a824852909e4..cc8110bdd579 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -30,7 +30,7 @@
30 * Overview 30 * Overview
31 * 31 *
32 * Before you can associate, you have to authenticate. 32 * Before you can associate, you have to authenticate.
33 * 33 *
34 */ 34 */
35 35
36/* Sends out an association request to the desired AP */ 36/* Sends out an association request to the desired AP */
@@ -41,10 +41,10 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
41 41
42 /* Switch to correct channel for this network */ 42 /* Switch to correct channel for this network */
43 mac->set_channel(mac->dev, net->channel); 43 mac->set_channel(mac->dev, net->channel);
44 44
45 /* Send association request */ 45 /* Send association request */
46 ieee80211softmac_send_mgt_frame(mac, net, IEEE80211_STYPE_ASSOC_REQ, 0); 46 ieee80211softmac_send_mgt_frame(mac, net, IEEE80211_STYPE_ASSOC_REQ, 0);
47 47
48 dprintk(KERN_INFO PFX "sent association request!\n"); 48 dprintk(KERN_INFO PFX "sent association request!\n");
49 49
50 spin_lock_irqsave(&mac->lock, flags); 50 spin_lock_irqsave(&mac->lock, flags);
@@ -153,7 +153,7 @@ network_matches_request(struct ieee80211softmac_device *mac, struct ieee80211_ne
153 } 153 }
154 154
155 /* if 'ANY' network requested, take any that doesn't have privacy enabled */ 155 /* if 'ANY' network requested, take any that doesn't have privacy enabled */
156 if (mac->associnfo.req_essid.len == 0 156 if (mac->associnfo.req_essid.len == 0
157 && !(net->capability & WLAN_CAPABILITY_PRIVACY)) 157 && !(net->capability & WLAN_CAPABILITY_PRIVACY))
158 return 1; 158 return 1;
159 if (net->ssid_len != mac->associnfo.req_essid.len) 159 if (net->ssid_len != mac->associnfo.req_essid.len)
@@ -212,8 +212,8 @@ ieee80211softmac_assoc_work(struct work_struct *work)
212 212
213 /* try to find the requested network in our list, if we found one already */ 213 /* try to find the requested network in our list, if we found one already */
214 if (bssvalid || mac->associnfo.bssfixed) 214 if (bssvalid || mac->associnfo.bssfixed)
215 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); 215 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
216 216
217 /* Search the ieee80211 networks for this network if we didn't find it by bssid, 217 /* Search the ieee80211 networks for this network if we didn't find it by bssid,
218 * but only if we've scanned at least once (to get a better list of networks to 218 * but only if we've scanned at least once (to get a better list of networks to
219 * select from). If we have not scanned before, the !found logic below will be 219 * select from). If we have not scanned before, the !found logic below will be
@@ -265,7 +265,7 @@ ieee80211softmac_assoc_work(struct work_struct *work)
265 if (mac->associnfo.scan_retry > 0) { 265 if (mac->associnfo.scan_retry > 0) {
266 mac->associnfo.scan_retry--; 266 mac->associnfo.scan_retry--;
267 267
268 /* We know of no such network. Let's scan. 268 /* We know of no such network. Let's scan.
269 * NB: this also happens if we had no memory to copy the network info... 269 * NB: this also happens if we had no memory to copy the network info...
270 * Maybe we can hope to have more memory after scanning finishes ;) 270 * Maybe we can hope to have more memory after scanning finishes ;)
271 */ 271 */
@@ -353,7 +353,7 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac,
353 mac->set_bssid_filter(mac->dev, net->bssid); 353 mac->set_bssid_filter(mac->dev, net->bssid);
354 memcpy(mac->ieee->bssid, net->bssid, ETH_ALEN); 354 memcpy(mac->ieee->bssid, net->bssid, ETH_ALEN);
355 netif_carrier_on(mac->dev); 355 netif_carrier_on(mac->dev);
356 356
357 mac->association_id = le16_to_cpup(&resp->aid); 357 mac->association_id = le16_to_cpup(&resp->aid);
358} 358}
359 359
@@ -374,7 +374,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
374 374
375 if (unlikely(!mac->running)) 375 if (unlikely(!mac->running))
376 return -ENODEV; 376 return -ENODEV;
377 377
378 spin_lock_irqsave(&mac->lock, flags); 378 spin_lock_irqsave(&mac->lock, flags);
379 379
380 if (!mac->associnfo.associating) { 380 if (!mac->associnfo.associating) {
@@ -426,7 +426,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
426 mac->associnfo.associated = 0; 426 mac->associnfo.associated = 0;
427 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, network); 427 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, network);
428 } 428 }
429 429
430 spin_unlock_irqrestore(&mac->lock, flags); 430 spin_unlock_irqrestore(&mac->lock, flags);
431 return 0; 431 return 0;
432} 432}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 8ed3e59b8024..826c32d24461 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -30,12 +30,12 @@ static void ieee80211softmac_auth_queue(struct work_struct *work);
30 30
31/* Queues an auth request to the desired AP */ 31/* Queues an auth request to the desired AP */
32int 32int
33ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, 33ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
34 struct ieee80211softmac_network *net) 34 struct ieee80211softmac_network *net)
35{ 35{
36 struct ieee80211softmac_auth_queue_item *auth; 36 struct ieee80211softmac_auth_queue_item *auth;
37 unsigned long flags; 37 unsigned long flags;
38 38
39 if (net->authenticating || net->authenticated) 39 if (net->authenticating || net->authenticated)
40 return 0; 40 return 0;
41 net->authenticating = 1; 41 net->authenticating = 1;
@@ -55,7 +55,7 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
55 auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; 55 auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
56 auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; 56 auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
57 INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); 57 INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
58 58
59 /* Lock (for list) */ 59 /* Lock (for list) */
60 spin_lock_irqsave(&mac->lock, flags); 60 spin_lock_irqsave(&mac->lock, flags);
61 61
@@ -63,7 +63,7 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
63 list_add_tail(&auth->list, &mac->auth_queue); 63 list_add_tail(&auth->list, &mac->auth_queue);
64 schedule_delayed_work(&auth->work, 0); 64 schedule_delayed_work(&auth->work, 0);
65 spin_unlock_irqrestore(&mac->lock, flags); 65 spin_unlock_irqrestore(&mac->lock, flags);
66 66
67 return 0; 67 return 0;
68} 68}
69 69
@@ -85,7 +85,7 @@ ieee80211softmac_auth_queue(struct work_struct *work)
85 if(auth->retry > 0) { 85 if(auth->retry > 0) {
86 /* Switch to correct channel for this network */ 86 /* Switch to correct channel for this network */
87 mac->set_channel(mac->dev, net->channel); 87 mac->set_channel(mac->dev, net->channel);
88 88
89 /* Lock and set flags */ 89 /* Lock and set flags */
90 spin_lock_irqsave(&mac->lock, flags); 90 spin_lock_irqsave(&mac->lock, flags);
91 if (unlikely(!mac->running)) { 91 if (unlikely(!mac->running)) {
@@ -130,11 +130,11 @@ ieee80211softmac_auth_challenge_response(struct work_struct *work)
130} 130}
131 131
132/* Handle the auth response from the AP 132/* Handle the auth response from the AP
133 * This should be registered with ieee80211 as handle_auth 133 * This should be registered with ieee80211 as handle_auth
134 */ 134 */
135int 135int
136ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) 136ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
137{ 137{
138 138
139 struct list_head *list_ptr; 139 struct list_head *list_ptr;
140 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 140 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
@@ -142,7 +142,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
142 struct ieee80211softmac_network *net = NULL; 142 struct ieee80211softmac_network *net = NULL;
143 unsigned long flags; 143 unsigned long flags;
144 u8 * data; 144 u8 * data;
145 145
146 if (unlikely(!mac->running)) 146 if (unlikely(!mac->running))
147 return -ENODEV; 147 return -ENODEV;
148 148
@@ -157,15 +157,15 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
157 aq = NULL; 157 aq = NULL;
158 } 158 }
159 spin_unlock_irqrestore(&mac->lock, flags); 159 spin_unlock_irqrestore(&mac->lock, flags);
160 160
161 /* Make sure that we've got an auth queue item for this request */ 161 /* Make sure that we've got an auth queue item for this request */
162 if(aq == NULL) 162 if(aq == NULL)
163 { 163 {
164 dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2)); 164 dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
165 /* Error #? */ 165 /* Error #? */
166 return -1; 166 return -1;
167 } 167 }
168 168
169 /* Check for out of order authentication */ 169 /* Check for out of order authentication */
170 if(!net->authenticating) 170 if(!net->authenticating)
171 { 171 {
@@ -182,10 +182,10 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
182 case WLAN_STATUS_SUCCESS: 182 case WLAN_STATUS_SUCCESS:
183 /* Update the status to Authenticated */ 183 /* Update the status to Authenticated */
184 spin_lock_irqsave(&mac->lock, flags); 184 spin_lock_irqsave(&mac->lock, flags);
185 net->authenticating = 0; 185 net->authenticating = 0;
186 net->authenticated = 1; 186 net->authenticated = 1;
187 spin_unlock_irqrestore(&mac->lock, flags); 187 spin_unlock_irqrestore(&mac->lock, flags);
188 188
189 /* Send event */ 189 /* Send event */
190 printkl(KERN_NOTICE PFX "Open Authentication completed with "MAC_FMT"\n", MAC_ARG(net->bssid)); 190 printkl(KERN_NOTICE PFX "Open Authentication completed with "MAC_FMT"\n", MAC_ARG(net->bssid));
191 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net); 191 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net);
@@ -196,8 +196,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
196 net->authenticated = 0; 196 net->authenticated = 0;
197 net->authenticating = 0; 197 net->authenticating = 0;
198 spin_unlock_irqrestore(&mac->lock, flags); 198 spin_unlock_irqrestore(&mac->lock, flags);
199 199
200 printkl(KERN_NOTICE PFX "Open Authentication with "MAC_FMT" failed, error code: %i\n", 200 printkl(KERN_NOTICE PFX "Open Authentication with "MAC_FMT" failed, error code: %i\n",
201 MAC_ARG(net->bssid), le16_to_cpup(&auth->status)); 201 MAC_ARG(net->bssid), le16_to_cpup(&auth->status));
202 /* Count the error? */ 202 /* Count the error? */
203 break; 203 break;
@@ -212,11 +212,11 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
212 data = (u8 *)auth->info_element; 212 data = (u8 *)auth->info_element;
213 if (*data++ != MFIE_TYPE_CHALLENGE) { 213 if (*data++ != MFIE_TYPE_CHALLENGE) {
214 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); 214 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n");
215 break; 215 break;
216 } 216 }
217 /* Save the challenge */ 217 /* Save the challenge */
218 spin_lock_irqsave(&mac->lock, flags); 218 spin_lock_irqsave(&mac->lock, flags);
219 net->challenge_len = *data++; 219 net->challenge_len = *data++;
220 if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) 220 if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
221 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; 221 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
222 kfree(net->challenge); 222 kfree(net->challenge);
@@ -229,7 +229,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
229 spin_unlock_irqrestore(&mac->lock, flags); 229 spin_unlock_irqrestore(&mac->lock, flags);
230 break; 230 break;
231 } 231 }
232 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 232 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
233 233
234 /* We reuse the work struct from the auth request here. 234 /* We reuse the work struct from the auth request here.
235 * It is safe to do so as each one is per-request, and 235 * It is safe to do so as each one is per-request, and
@@ -248,22 +248,22 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
248 /* Check the status code of the response */ 248 /* Check the status code of the response */
249 switch(auth->status) { 249 switch(auth->status) {
250 case WLAN_STATUS_SUCCESS: 250 case WLAN_STATUS_SUCCESS:
251 /* Update the status to Authenticated */ 251 /* Update the status to Authenticated */
252 spin_lock_irqsave(&mac->lock, flags); 252 spin_lock_irqsave(&mac->lock, flags);
253 net->authenticating = 0; 253 net->authenticating = 0;
254 net->authenticated = 1; 254 net->authenticated = 1;
255 spin_unlock_irqrestore(&mac->lock, flags); 255 spin_unlock_irqrestore(&mac->lock, flags);
256 printkl(KERN_NOTICE PFX "Shared Key Authentication completed with "MAC_FMT"\n", 256 printkl(KERN_NOTICE PFX "Shared Key Authentication completed with "MAC_FMT"\n",
257 MAC_ARG(net->bssid)); 257 MAC_ARG(net->bssid));
258 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net); 258 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net);
259 break; 259 break;
260 default: 260 default:
261 printkl(KERN_NOTICE PFX "Shared Key Authentication with "MAC_FMT" failed, error code: %i\n", 261 printkl(KERN_NOTICE PFX "Shared Key Authentication with "MAC_FMT" failed, error code: %i\n",
262 MAC_ARG(net->bssid), le16_to_cpup(&auth->status)); 262 MAC_ARG(net->bssid), le16_to_cpup(&auth->status));
263 /* Lock and reset flags */ 263 /* Lock and reset flags */
264 spin_lock_irqsave(&mac->lock, flags); 264 spin_lock_irqsave(&mac->lock, flags);
265 net->authenticating = 0; 265 net->authenticating = 0;
266 net->authenticated = 0; 266 net->authenticated = 0;
267 spin_unlock_irqrestore(&mac->lock, flags); 267 spin_unlock_irqrestore(&mac->lock, flags);
268 /* Count the error? */ 268 /* Count the error? */
269 break; 269 break;
@@ -277,7 +277,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
277 goto free_aq; 277 goto free_aq;
278 break; 278 break;
279 default: 279 default:
280 /* ERROR */ 280 /* ERROR */
281 goto free_aq; 281 goto free_aq;
282 break; 282 break;
283 } 283 }
@@ -313,7 +313,7 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
313 spin_lock_irqsave(&mac->lock, flags); 313 spin_lock_irqsave(&mac->lock, flags);
314 net->authenticating = 0; 314 net->authenticating = 0;
315 net->authenticated = 0; 315 net->authenticated = 0;
316 316
317 /* Find correct auth queue item, if it exists */ 317 /* Find correct auth queue item, if it exists */
318 list_for_each(list_ptr, &mac->auth_queue) { 318 list_for_each(list_ptr, &mac->auth_queue) {
319 aq = list_entry(list_ptr, struct ieee80211softmac_auth_queue_item, list); 319 aq = list_entry(list_ptr, struct ieee80211softmac_auth_queue_item, list);
@@ -322,7 +322,7 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
322 else 322 else
323 aq = NULL; 323 aq = NULL;
324 } 324 }
325 325
326 /* Cancel pending work */ 326 /* Cancel pending work */
327 if(aq != NULL) 327 if(aq != NULL)
328 /* Not entirely safe? What about running work? */ 328 /* Not entirely safe? What about running work? */
@@ -333,7 +333,7 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
333 if(net->challenge != NULL) 333 if(net->challenge != NULL)
334 kfree(net->challenge); 334 kfree(net->challenge);
335 kfree(net); 335 kfree(net);
336 336
337 /* can't transmit data right now... */ 337 /* can't transmit data right now... */
338 netif_carrier_off(mac->dev); 338 netif_carrier_off(mac->dev);
339 spin_unlock_irqrestore(&mac->lock, flags); 339 spin_unlock_irqrestore(&mac->lock, flags);
@@ -341,15 +341,15 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
341 ieee80211softmac_try_reassoc(mac); 341 ieee80211softmac_try_reassoc(mac);
342} 342}
343 343
344/* 344/*
345 * Sends a deauth request to the desired AP 345 * Sends a deauth request to the desired AP
346 */ 346 */
347int 347int
348ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac, 348ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac,
349 struct ieee80211softmac_network *net, int reason) 349 struct ieee80211softmac_network *net, int reason)
350{ 350{
351 int ret; 351 int ret;
352 352
353 /* Make sure the network is authenticated */ 353 /* Make sure the network is authenticated */
354 if (!net->authenticated) 354 if (!net->authenticated)
355 { 355 {
@@ -357,25 +357,25 @@ ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac,
357 /* Error okay? */ 357 /* Error okay? */
358 return -EPERM; 358 return -EPERM;
359 } 359 }
360 360
361 /* Send the de-auth packet */ 361 /* Send the de-auth packet */
362 if((ret = ieee80211softmac_send_mgt_frame(mac, net, IEEE80211_STYPE_DEAUTH, reason))) 362 if((ret = ieee80211softmac_send_mgt_frame(mac, net, IEEE80211_STYPE_DEAUTH, reason)))
363 return ret; 363 return ret;
364 364
365 ieee80211softmac_deauth_from_net(mac, net); 365 ieee80211softmac_deauth_from_net(mac, net);
366 return 0; 366 return 0;
367} 367}
368 368
369/* 369/*
370 * This should be registered with ieee80211 as handle_deauth 370 * This should be registered with ieee80211 as handle_deauth
371 */ 371 */
372int 372int
373ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth) 373ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth)
374{ 374{
375 375
376 struct ieee80211softmac_network *net = NULL; 376 struct ieee80211softmac_network *net = NULL;
377 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 377 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
378 378
379 if (unlikely(!mac->running)) 379 if (unlikely(!mac->running))
380 return -ENODEV; 380 return -ENODEV;
381 381
@@ -385,7 +385,7 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
385 } 385 }
386 386
387 net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2); 387 net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2);
388 388
389 if (net == NULL) { 389 if (net == NULL) {
390 dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n", 390 dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
391 MAC_ARG(deauth->header.addr2)); 391 MAC_ARG(deauth->header.addr2));
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index b9015656cfb3..b3e33a4d4869 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -79,7 +79,7 @@ ieee80211softmac_notify_callback(struct work_struct *work)
79 container_of(work, struct ieee80211softmac_event, work.work); 79 container_of(work, struct ieee80211softmac_event, work.work);
80 struct ieee80211softmac_event event = *pevent; 80 struct ieee80211softmac_event event = *pevent;
81 kfree(pevent); 81 kfree(pevent);
82 82
83 event.fun(event.mac->dev, event.event_type, event.context); 83 event.fun(event.mac->dev, event.event_type, event.context);
84} 84}
85 85
@@ -92,14 +92,14 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
92 92
93 if (event < -1 || event > IEEE80211SOFTMAC_EVENT_LAST) 93 if (event < -1 || event > IEEE80211SOFTMAC_EVENT_LAST)
94 return -ENOSYS; 94 return -ENOSYS;
95 95
96 if (!fun) 96 if (!fun)
97 return -EINVAL; 97 return -EINVAL;
98 98
99 eventptr = kmalloc(sizeof(struct ieee80211softmac_event), gfp_mask); 99 eventptr = kmalloc(sizeof(struct ieee80211softmac_event), gfp_mask);
100 if (!eventptr) 100 if (!eventptr)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
103 eventptr->event_type = event; 103 eventptr->event_type = event;
104 INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); 104 INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
105 eventptr->fun = fun; 105 eventptr->fun = fun;
@@ -122,7 +122,7 @@ ieee80211softmac_notify_gfp(struct net_device *dev,
122 122
123 if (event < 0 || event > IEEE80211SOFTMAC_EVENT_LAST) 123 if (event < 0 || event > IEEE80211SOFTMAC_EVENT_LAST)
124 return -ENOSYS; 124 return -ENOSYS;
125 125
126 return ieee80211softmac_notify_internal(mac, event, NULL, fun, context, gfp_mask); 126 return ieee80211softmac_notify_internal(mac, event, NULL, fun, context, gfp_mask);
127} 127}
128EXPORT_SYMBOL_GPL(ieee80211softmac_notify_gfp); 128EXPORT_SYMBOL_GPL(ieee80211softmac_notify_gfp);
@@ -133,7 +133,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
133{ 133{
134 struct ieee80211softmac_event *eventptr, *tmp; 134 struct ieee80211softmac_event *eventptr, *tmp;
135 struct ieee80211softmac_network *network; 135 struct ieee80211softmac_network *network;
136 136
137 if (event >= 0) { 137 if (event >= 0) {
138 union iwreq_data wrqu; 138 union iwreq_data wrqu;
139 int we_event; 139 int we_event;
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index b96931001b43..26c35253be33 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Some parts based on code from net80211 2 * Some parts based on code from net80211
3 * Copyright (c) 2001 Atsushi Onoe 3 * Copyright (c) 2001 Atsushi Onoe
4 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting 4 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
@@ -29,14 +29,14 @@
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * 32 *
33 */ 33 */
34 34
35#include "ieee80211softmac_priv.h" 35#include "ieee80211softmac_priv.h"
36 36
37/* Helper functions for inserting data into the frames */ 37/* Helper functions for inserting data into the frames */
38 38
39/* 39/*
40 * Adds an ESSID element to the frame 40 * Adds an ESSID element to the frame
41 * 41 *
42 */ 42 */
@@ -53,7 +53,7 @@ ieee80211softmac_add_essid(u8 *dst, struct ieee80211softmac_essid *essid)
53 *dst++ = 0; 53 *dst++ = 0;
54 return dst; 54 return dst;
55 } 55 }
56} 56}
57 57
58/* Adds Supported Rates and if required Extended Rates Information Element 58/* Adds Supported Rates and if required Extended Rates Information Element
59 * to the frame, ASSUMES WE HAVE A SORTED LIST OF RATES */ 59 * to the frame, ASSUMES WE HAVE A SORTED LIST OF RATES */
@@ -81,18 +81,18 @@ ieee80211softmac_frame_add_rates(u8 *dst, const struct ieee80211softmac_ratesinf
81 memcpy(dst, r->rates + cck_len, ofdm_len); 81 memcpy(dst, r->rates + cck_len, ofdm_len);
82 dst += ofdm_len; 82 dst += ofdm_len;
83 } 83 }
84 } 84 }
85 return dst; 85 return dst;
86} 86}
87 87
88/* Allocate a management frame */ 88/* Allocate a management frame */
89static u8 * 89static u8 *
90ieee80211softmac_alloc_mgt(u32 size) 90ieee80211softmac_alloc_mgt(u32 size)
91{ 91{
92 u8 * data; 92 u8 * data;
93 93
94 /* Add the header and FCS to the size */ 94 /* Add the header and FCS to the size */
95 size = size + IEEE80211_3ADDR_LEN; 95 size = size + IEEE80211_3ADDR_LEN;
96 if(size > IEEE80211_DATA_LEN) 96 if(size > IEEE80211_DATA_LEN)
97 return NULL; 97 return NULL;
98 /* Allocate the frame */ 98 /* Allocate the frame */
@@ -103,13 +103,13 @@ ieee80211softmac_alloc_mgt(u32 size)
103/* 103/*
104 * Add a 2 Address Header 104 * Add a 2 Address Header
105 */ 105 */
106static void 106static void
107ieee80211softmac_hdr_2addr(struct ieee80211softmac_device *mac, 107ieee80211softmac_hdr_2addr(struct ieee80211softmac_device *mac,
108 struct ieee80211_hdr_2addr *header, u32 type, u8 *dest) 108 struct ieee80211_hdr_2addr *header, u32 type, u8 *dest)
109{ 109{
110 /* Fill in the frame control flags */ 110 /* Fill in the frame control flags */
111 header->frame_ctl = cpu_to_le16(type); 111 header->frame_ctl = cpu_to_le16(type);
112 /* Control packets always have WEP turned off */ 112 /* Control packets always have WEP turned off */
113 if(type > IEEE80211_STYPE_CFENDACK && type < IEEE80211_STYPE_PSPOLL) 113 if(type > IEEE80211_STYPE_CFENDACK && type < IEEE80211_STYPE_PSPOLL)
114 header->frame_ctl |= mac->ieee->sec.level ? cpu_to_le16(IEEE80211_FCTL_PROTECTED) : 0; 114 header->frame_ctl |= mac->ieee->sec.level ? cpu_to_le16(IEEE80211_FCTL_PROTECTED) : 0;
115 115
@@ -130,13 +130,13 @@ ieee80211softmac_hdr_2addr(struct ieee80211softmac_device *mac,
130 130
131 131
132/* Add a 3 Address Header */ 132/* Add a 3 Address Header */
133static void 133static void
134ieee80211softmac_hdr_3addr(struct ieee80211softmac_device *mac, 134ieee80211softmac_hdr_3addr(struct ieee80211softmac_device *mac,
135 struct ieee80211_hdr_3addr *header, u32 type, u8 *dest, u8 *bssid) 135 struct ieee80211_hdr_3addr *header, u32 type, u8 *dest, u8 *bssid)
136{ 136{
137 /* This is common with 2addr, so use that instead */ 137 /* This is common with 2addr, so use that instead */
138 ieee80211softmac_hdr_2addr(mac, (struct ieee80211_hdr_2addr *)header, type, dest); 138 ieee80211softmac_hdr_2addr(mac, (struct ieee80211_hdr_2addr *)header, type, dest);
139 139
140 /* Fill in the BSS ID */ 140 /* Fill in the BSS ID */
141 if(bssid == NULL) 141 if(bssid == NULL)
142 memset(header->addr3, 0xFF, ETH_ALEN); 142 memset(header->addr3, 0xFF, ETH_ALEN);
@@ -201,11 +201,11 @@ ieee80211softmac_capabilities(struct ieee80211softmac_device *mac,
201 201
202/***************************************************************************** 202/*****************************************************************************
203 * Create Management packets 203 * Create Management packets
204 *****************************************************************************/ 204 *****************************************************************************/
205 205
206/* Creates an association request packet */ 206/* Creates an association request packet */
207static u32 207static u32
208ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt, 208ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt,
209 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net) 209 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net)
210{ 210{
211 u8 *data; 211 u8 *data;
@@ -233,7 +233,7 @@ ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt,
233 233
234 /* Fill in Listen Interval (?) */ 234 /* Fill in Listen Interval (?) */
235 (*pkt)->listen_interval = cpu_to_le16(10); 235 (*pkt)->listen_interval = cpu_to_le16(10);
236 236
237 data = (u8 *)(*pkt)->info_element; 237 data = (u8 *)(*pkt)->info_element;
238 /* Add SSID */ 238 /* Add SSID */
239 data = ieee80211softmac_add_essid(data, &net->essid); 239 data = ieee80211softmac_add_essid(data, &net->essid);
@@ -250,7 +250,7 @@ ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt,
250 250
251/* Create a reassociation request packet */ 251/* Create a reassociation request packet */
252static u32 252static u32
253ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt, 253ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
254 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net) 254 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net)
255{ 255{
256 u8 *data; 256 u8 *data;
@@ -263,9 +263,9 @@ ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
263 /* Rates IE */ 263 /* Rates IE */
264 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN + 264 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN +
265 /* Extended Rates IE */ 265 /* Extended Rates IE */
266 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN 266 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN
267 /* Other IE's? */ 267 /* Other IE's? */
268 ); 268 );
269 if (unlikely((*pkt) == NULL)) 269 if (unlikely((*pkt) == NULL))
270 return 0; 270 return 0;
271 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_REASSOC_REQ, net->bssid, net->bssid); 271 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_REASSOC_REQ, net->bssid, net->bssid);
@@ -277,10 +277,10 @@ ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
277 (*pkt)->listen_interval = cpu_to_le16(10); 277 (*pkt)->listen_interval = cpu_to_le16(10);
278 /* Fill in the current AP MAC */ 278 /* Fill in the current AP MAC */
279 memcpy((*pkt)->current_ap, mac->ieee->bssid, ETH_ALEN); 279 memcpy((*pkt)->current_ap, mac->ieee->bssid, ETH_ALEN);
280 280
281 data = (u8 *)(*pkt)->info_element; 281 data = (u8 *)(*pkt)->info_element;
282 /* Add SSID */ 282 /* Add SSID */
283 data = ieee80211softmac_add_essid(data, &net->essid); 283 data = ieee80211softmac_add_essid(data, &net->essid);
284 /* Add Rates */ 284 /* Add Rates */
285 data = ieee80211softmac_frame_add_rates(data, &mac->ratesinfo); 285 data = ieee80211softmac_frame_add_rates(data, &mac->ratesinfo);
286 /* Return packet size */ 286 /* Return packet size */
@@ -289,7 +289,7 @@ ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
289 289
290/* Create an authentication packet */ 290/* Create an authentication packet */
291static u32 291static u32
292ieee80211softmac_auth(struct ieee80211_auth **pkt, 292ieee80211softmac_auth(struct ieee80211_auth **pkt,
293 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net, 293 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net,
294 u16 transaction, u16 status, int *encrypt_mpdu) 294 u16 transaction, u16 status, int *encrypt_mpdu)
295{ 295{
@@ -309,20 +309,20 @@ ieee80211softmac_auth(struct ieee80211_auth **pkt,
309 if (unlikely((*pkt) == NULL)) 309 if (unlikely((*pkt) == NULL))
310 return 0; 310 return 0;
311 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_AUTH, net->bssid, net->bssid); 311 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_AUTH, net->bssid, net->bssid);
312 312
313 /* Algorithm */ 313 /* Algorithm */
314 (*pkt)->algorithm = cpu_to_le16(auth_mode); 314 (*pkt)->algorithm = cpu_to_le16(auth_mode);
315 /* Transaction */ 315 /* Transaction */
316 (*pkt)->transaction = cpu_to_le16(transaction); 316 (*pkt)->transaction = cpu_to_le16(transaction);
317 /* Status */ 317 /* Status */
318 (*pkt)->status = cpu_to_le16(status); 318 (*pkt)->status = cpu_to_le16(status);
319 319
320 data = (u8 *)(*pkt)->info_element; 320 data = (u8 *)(*pkt)->info_element;
321 /* Challenge Text */ 321 /* Challenge Text */
322 if (is_shared_response) { 322 if (is_shared_response) {
323 *data = MFIE_TYPE_CHALLENGE; 323 *data = MFIE_TYPE_CHALLENGE;
324 data++; 324 data++;
325 325
326 /* Copy the challenge in */ 326 /* Copy the challenge in */
327 *data = net->challenge_len; 327 *data = net->challenge_len;
328 data++; 328 data++;
@@ -360,7 +360,7 @@ static u32
360ieee80211softmac_probe_req(struct ieee80211_probe_request **pkt, 360ieee80211softmac_probe_req(struct ieee80211_probe_request **pkt,
361 struct ieee80211softmac_device *mac, struct ieee80211softmac_essid *essid) 361 struct ieee80211softmac_device *mac, struct ieee80211softmac_essid *essid)
362{ 362{
363 u8 *data; 363 u8 *data;
364 /* Allocate Packet */ 364 /* Allocate Packet */
365 (*pkt) = (struct ieee80211_probe_request *)ieee80211softmac_alloc_mgt( 365 (*pkt) = (struct ieee80211_probe_request *)ieee80211softmac_alloc_mgt(
366 /* SSID of requested network */ 366 /* SSID of requested network */
@@ -368,12 +368,12 @@ ieee80211softmac_probe_req(struct ieee80211_probe_request **pkt,
368 /* Rates IE */ 368 /* Rates IE */
369 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN + 369 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN +
370 /* Extended Rates IE */ 370 /* Extended Rates IE */
371 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN 371 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN
372 ); 372 );
373 if (unlikely((*pkt) == NULL)) 373 if (unlikely((*pkt) == NULL))
374 return 0; 374 return 0;
375 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_PROBE_REQ, NULL, NULL); 375 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_PROBE_REQ, NULL, NULL);
376 376
377 data = (u8 *)(*pkt)->info_element; 377 data = (u8 *)(*pkt)->info_element;
378 /* Add ESSID (can be NULL) */ 378 /* Add ESSID (can be NULL) */
379 data = ieee80211softmac_add_essid(data, essid); 379 data = ieee80211softmac_add_essid(data, essid);
@@ -401,7 +401,7 @@ ieee80211softmac_probe_resp(struct ieee80211_probe_response **pkt,
401 2 + /* DS Parameter Set */ 401 2 + /* DS Parameter Set */
402 8 + /* CF Parameter Set */ 402 8 + /* CF Parameter Set */
403 4 /* IBSS Parameter Set */ 403 4 /* IBSS Parameter Set */
404 ); 404 );
405 if (unlikely((*pkt) == NULL)) 405 if (unlikely((*pkt) == NULL))
406 return 0; 406 return 0;
407 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_PROBE_RESP, net->bssid, net->bssid); 407 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_PROBE_RESP, net->bssid, net->bssid);
@@ -445,15 +445,15 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
445 pkt_size = ieee80211softmac_probe_resp((struct ieee80211_probe_response **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg); 445 pkt_size = ieee80211softmac_probe_resp((struct ieee80211_probe_response **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg);
446 break; 446 break;
447 default: 447 default:
448 printkl(KERN_DEBUG PFX "Unsupported Management Frame type: %i\n", type); 448 printkl(KERN_DEBUG PFX "Unsupported Management Frame type: %i\n", type);
449 return -EINVAL; 449 return -EINVAL;
450 }; 450 };
451 451
452 if(pkt_size == 0 || pkt == NULL) { 452 if(pkt_size == 0 || pkt == NULL) {
453 printkl(KERN_DEBUG PFX "Error, packet is nonexistant or 0 length\n"); 453 printkl(KERN_DEBUG PFX "Error, packet is nonexistant or 0 length\n");
454 return -ENOMEM; 454 return -ENOMEM;
455 } 455 }
456 456
457 /* Send the packet to the ieee80211 layer for tx */ 457 /* Send the packet to the ieee80211 layer for tx */
458 /* we defined softmac->mgmt_xmit for this. Should we keep it 458 /* we defined softmac->mgmt_xmit for this. Should we keep it
459 * as it is (that means we'd need to wrap this into a txb), 459 * as it is (that means we'd need to wrap this into a txb),
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 256207b71dc9..4f8c3ef70819 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -32,19 +32,19 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
32{ 32{
33 struct ieee80211softmac_device *softmac; 33 struct ieee80211softmac_device *softmac;
34 struct net_device *dev; 34 struct net_device *dev;
35 35
36 dev = alloc_ieee80211(sizeof(struct ieee80211softmac_device) + sizeof_priv); 36 dev = alloc_ieee80211(sizeof(struct ieee80211softmac_device) + sizeof_priv);
37 softmac = ieee80211_priv(dev); 37 softmac = ieee80211_priv(dev);
38 softmac->dev = dev; 38 softmac->dev = dev;
39 softmac->ieee = netdev_priv(dev); 39 softmac->ieee = netdev_priv(dev);
40 spin_lock_init(&softmac->lock); 40 spin_lock_init(&softmac->lock);
41 41
42 softmac->ieee->handle_auth = ieee80211softmac_auth_resp; 42 softmac->ieee->handle_auth = ieee80211softmac_auth_resp;
43 softmac->ieee->handle_deauth = ieee80211softmac_deauth_resp; 43 softmac->ieee->handle_deauth = ieee80211softmac_deauth_resp;
44 softmac->ieee->handle_assoc_response = ieee80211softmac_handle_assoc_response; 44 softmac->ieee->handle_assoc_response = ieee80211softmac_handle_assoc_response;
45 softmac->ieee->handle_reassoc_request = ieee80211softmac_handle_reassoc_req; 45 softmac->ieee->handle_reassoc_request = ieee80211softmac_handle_reassoc_req;
46 softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc; 46 softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc;
47 softmac->ieee->handle_beacon = ieee80211softmac_handle_beacon; 47 softmac->ieee->handle_beacon = ieee80211softmac_handle_beacon;
48 softmac->scaninfo = NULL; 48 softmac->scaninfo = NULL;
49 49
50 softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; 50 softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
@@ -66,37 +66,37 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
66 66
67 /* to start with, we can't send anything ... */ 67 /* to start with, we can't send anything ... */
68 netif_carrier_off(dev); 68 netif_carrier_off(dev);
69 69
70 return dev; 70 return dev;
71} 71}
72EXPORT_SYMBOL_GPL(alloc_ieee80211softmac); 72EXPORT_SYMBOL_GPL(alloc_ieee80211softmac);
73 73
74/* Clears the pending work queue items, stops all scans, etc. */ 74/* Clears the pending work queue items, stops all scans, etc. */
75void 75void
76ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm) 76ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm)
77{ 77{
78 unsigned long flags; 78 unsigned long flags;
79 struct ieee80211softmac_event *eventptr, *eventtmp; 79 struct ieee80211softmac_event *eventptr, *eventtmp;
80 struct ieee80211softmac_auth_queue_item *authptr, *authtmp; 80 struct ieee80211softmac_auth_queue_item *authptr, *authtmp;
81 struct ieee80211softmac_network *netptr, *nettmp; 81 struct ieee80211softmac_network *netptr, *nettmp;
82 82
83 ieee80211softmac_stop_scan(sm); 83 ieee80211softmac_stop_scan(sm);
84 ieee80211softmac_wait_for_scan(sm); 84 ieee80211softmac_wait_for_scan(sm);
85 85
86 spin_lock_irqsave(&sm->lock, flags); 86 spin_lock_irqsave(&sm->lock, flags);
87 sm->running = 0; 87 sm->running = 0;
88 88
89 /* Free all pending assoc work items */ 89 /* Free all pending assoc work items */
90 cancel_delayed_work(&sm->associnfo.work); 90 cancel_delayed_work(&sm->associnfo.work);
91 91
92 /* Free all pending scan work items */ 92 /* Free all pending scan work items */
93 if(sm->scaninfo != NULL) 93 if(sm->scaninfo != NULL)
94 cancel_delayed_work(&sm->scaninfo->softmac_scan); 94 cancel_delayed_work(&sm->scaninfo->softmac_scan);
95 95
96 /* Free all pending auth work items */ 96 /* Free all pending auth work items */
97 list_for_each_entry(authptr, &sm->auth_queue, list) 97 list_for_each_entry(authptr, &sm->auth_queue, list)
98 cancel_delayed_work(&authptr->work); 98 cancel_delayed_work(&authptr->work);
99 99
100 /* delete all pending event calls and work items */ 100 /* delete all pending event calls and work items */
101 list_for_each_entry_safe(eventptr, eventtmp, &sm->events, list) 101 list_for_each_entry_safe(eventptr, eventtmp, &sm->events, list)
102 cancel_delayed_work(&eventptr->work); 102 cancel_delayed_work(&eventptr->work);
@@ -111,13 +111,13 @@ ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm)
111 list_del(&authptr->list); 111 list_del(&authptr->list);
112 kfree(authptr); 112 kfree(authptr);
113 } 113 }
114 114
115 /* delete all pending event calls and work items */ 115 /* delete all pending event calls and work items */
116 list_for_each_entry_safe(eventptr, eventtmp, &sm->events, list) { 116 list_for_each_entry_safe(eventptr, eventtmp, &sm->events, list) {
117 list_del(&eventptr->list); 117 list_del(&eventptr->list);
118 kfree(eventptr); 118 kfree(eventptr);
119 } 119 }
120 120
121 /* Free all networks */ 121 /* Free all networks */
122 list_for_each_entry_safe(netptr, nettmp, &sm->network_list, list) { 122 list_for_each_entry_safe(netptr, nettmp, &sm->network_list, list) {
123 ieee80211softmac_del_network_locked(sm, netptr); 123 ieee80211softmac_del_network_locked(sm, netptr);
@@ -133,7 +133,7 @@ EXPORT_SYMBOL_GPL(ieee80211softmac_clear_pending_work);
133void free_ieee80211softmac(struct net_device *dev) 133void free_ieee80211softmac(struct net_device *dev)
134{ 134{
135 struct ieee80211softmac_device *sm = ieee80211_priv(dev); 135 struct ieee80211softmac_device *sm = ieee80211_priv(dev);
136 ieee80211softmac_clear_pending_work(sm); 136 ieee80211softmac_clear_pending_work(sm);
137 kfree(sm->scaninfo); 137 kfree(sm->scaninfo);
138 kfree(sm->wpa.IE); 138 kfree(sm->wpa.IE);
139 free_ieee80211(dev); 139 free_ieee80211(dev);
@@ -208,9 +208,9 @@ EXPORT_SYMBOL_GPL(ieee80211softmac_highest_supported_rate);
208void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac, 208void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac,
209 u8 erp_value) 209 u8 erp_value)
210{ 210{
211 int use_protection; 211 int use_protection;
212 int short_preamble; 212 int short_preamble;
213 u32 changes = 0; 213 u32 changes = 0;
214 214
215 /* Barker preamble mode */ 215 /* Barker preamble mode */
216 short_preamble = ((erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0 216 short_preamble = ((erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0
@@ -269,7 +269,7 @@ void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac)
269 rates, so 801.11g devices start off at 11M for now. People 269 rates, so 801.11g devices start off at 11M for now. People
270 can manually change it if they really need to, but 11M is 270 can manually change it if they really need to, but 11M is
271 more reliable. Note similar logic in 271 more reliable. Note similar logic in
272 ieee80211softmac_wx_set_rate() */ 272 ieee80211softmac_wx_set_rate() */
273 if (ieee->modulation & IEEE80211_CCK_MODULATION) { 273 if (ieee->modulation & IEEE80211_CCK_MODULATION) {
274 txrates->user_rate = IEEE80211_CCK_RATE_11MB; 274 txrates->user_rate = IEEE80211_CCK_RATE_11MB;
275 } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) { 275 } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
@@ -332,7 +332,7 @@ void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates)
332{ 332{
333 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 333 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
334 unsigned long flags; 334 unsigned long flags;
335 335
336 spin_lock_irqsave(&mac->lock, flags); 336 spin_lock_irqsave(&mac->lock, flags);
337 memcpy(mac->ratesinfo.rates, rates, count); 337 memcpy(mac->ratesinfo.rates, rates, count);
338 mac->ratesinfo.count = count; 338 mac->ratesinfo.count = count;
@@ -344,7 +344,7 @@ static u8 raise_rate(struct ieee80211softmac_device *mac, u8 rate)
344{ 344{
345 int i; 345 int i;
346 struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo; 346 struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo;
347 347
348 for (i=0; i<ri->count-1; i++) { 348 for (i=0; i<ri->count-1; i++) {
349 if (ri->rates[i] == rate) 349 if (ri->rates[i] == rate)
350 return ri->rates[i+1]; 350 return ri->rates[i+1];
@@ -357,7 +357,7 @@ u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rat
357{ 357{
358 int i; 358 int i;
359 struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo; 359 struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo;
360 360
361 for (i=delta; i<ri->count; i++) { 361 for (i=delta; i<ri->count; i++) {
362 if (ri->rates[i] == rate) 362 if (ri->rates[i] == rate)
363 return ri->rates[i-delta]; 363 return ri->rates[i-delta];
@@ -438,7 +438,7 @@ ieee80211softmac_create_network(struct ieee80211softmac_device *mac,
438 softnet->channel = net->channel; 438 softnet->channel = net->channel;
439 softnet->essid.len = net->ssid_len; 439 softnet->essid.len = net->ssid_len;
440 memcpy(softnet->essid.data, net->ssid, softnet->essid.len); 440 memcpy(softnet->essid.data, net->ssid, softnet->essid.len);
441 441
442 /* copy rates over */ 442 /* copy rates over */
443 softnet->supported_rates.count = net->rates_len; 443 softnet->supported_rates.count = net->rates_len;
444 memcpy(&softnet->supported_rates.rates[0], net->rates, net->rates_len); 444 memcpy(&softnet->supported_rates.rates[0], net->rates, net->rates_len);
@@ -529,7 +529,7 @@ ieee80211softmac_get_network_by_bssid(struct ieee80211softmac_device *mac,
529{ 529{
530 unsigned long flags; 530 unsigned long flags;
531 struct ieee80211softmac_network *softmac_net; 531 struct ieee80211softmac_network *softmac_net;
532 532
533 spin_lock_irqsave(&mac->lock, flags); 533 spin_lock_irqsave(&mac->lock, flags);
534 softmac_net = ieee80211softmac_get_network_by_bssid_locked(mac, bssid); 534 softmac_net = ieee80211softmac_get_network_by_bssid_locked(mac, bssid);
535 spin_unlock_irqrestore(&mac->lock, flags); 535 spin_unlock_irqrestore(&mac->lock, flags);
@@ -556,13 +556,13 @@ ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac
556/* Get a network from the list by ESSID with locking */ 556/* Get a network from the list by ESSID with locking */
557struct ieee80211softmac_network * 557struct ieee80211softmac_network *
558ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac, 558ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac,
559 struct ieee80211softmac_essid *essid) 559 struct ieee80211softmac_essid *essid)
560{ 560{
561 unsigned long flags; 561 unsigned long flags;
562 struct ieee80211softmac_network *softmac_net = NULL; 562 struct ieee80211softmac_network *softmac_net = NULL;
563 563
564 spin_lock_irqsave(&mac->lock, flags); 564 spin_lock_irqsave(&mac->lock, flags);
565 softmac_net = ieee80211softmac_get_network_by_essid_locked(mac, essid); 565 softmac_net = ieee80211softmac_get_network_by_essid_locked(mac, essid);
566 spin_unlock_irqrestore(&mac->lock, flags); 566 spin_unlock_irqrestore(&mac->lock, flags);
567 return softmac_net; 567 return softmac_net;
568} 568}
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 4c2bba34d328..c43b189634df 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -130,7 +130,7 @@ static inline u8 get_fallback_rate(struct ieee80211softmac_device *mac, u8 rate)
130{ 130{
131 return ieee80211softmac_lower_rate_delta(mac, rate, 2); 131 return ieee80211softmac_lower_rate_delta(mac, rate, 2);
132} 132}
133 133
134 134
135/*** prototypes from _io.c */ 135/*** prototypes from _io.c */
136int ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac, 136int ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
@@ -156,7 +156,7 @@ int ieee80211softmac_handle_assoc_response(struct net_device * dev,
156int ieee80211softmac_handle_disassoc(struct net_device * dev, 156int ieee80211softmac_handle_disassoc(struct net_device * dev,
157 struct ieee80211_disassoc * disassoc); 157 struct ieee80211_disassoc * disassoc);
158int ieee80211softmac_handle_reassoc_req(struct net_device * dev, 158int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
159 struct ieee80211_reassoc_request * reassoc); 159 struct ieee80211_reassoc_request * reassoc);
160void ieee80211softmac_assoc_timeout(struct work_struct *work); 160void ieee80211softmac_assoc_timeout(struct work_struct *work);
161void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); 161void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
162void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); 162void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
@@ -164,15 +164,15 @@ void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
164/* some helper functions */ 164/* some helper functions */
165static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm) 165static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm)
166{ 166{
167 return (sm->start_scan == ieee80211softmac_start_scan_implementation) && 167 return (sm->start_scan == ieee80211softmac_start_scan_implementation) &&
168 (sm->stop_scan == ieee80211softmac_stop_scan_implementation) && 168 (sm->stop_scan == ieee80211softmac_stop_scan_implementation) &&
169 (sm->wait_for_scan == ieee80211softmac_wait_for_scan_implementation); 169 (sm->wait_for_scan == ieee80211softmac_wait_for_scan_implementation);
170} 170}
171 171
172static inline int ieee80211softmac_scan_sanity_check(struct ieee80211softmac_device *sm) 172static inline int ieee80211softmac_scan_sanity_check(struct ieee80211softmac_device *sm)
173{ 173{
174 return ((sm->start_scan != ieee80211softmac_start_scan_implementation) && 174 return ((sm->start_scan != ieee80211softmac_start_scan_implementation) &&
175 (sm->stop_scan != ieee80211softmac_stop_scan_implementation) && 175 (sm->stop_scan != ieee80211softmac_stop_scan_implementation) &&
176 (sm->wait_for_scan != ieee80211softmac_wait_for_scan_implementation) 176 (sm->wait_for_scan != ieee80211softmac_wait_for_scan_implementation)
177 ) || ieee80211softmac_scan_handlers_check_self(sm); 177 ) || ieee80211softmac_scan_handlers_check_self(sm);
178} 178}
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index 0c85d6c24cdb..abea3648680e 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -62,12 +62,12 @@ ieee80211softmac_stop_scan(struct ieee80211softmac_device *sm)
62 unsigned long flags; 62 unsigned long flags;
63 63
64 spin_lock_irqsave(&sm->lock, flags); 64 spin_lock_irqsave(&sm->lock, flags);
65 65
66 if (!sm->scanning) { 66 if (!sm->scanning) {
67 spin_unlock_irqrestore(&sm->lock, flags); 67 spin_unlock_irqrestore(&sm->lock, flags);
68 return; 68 return;
69 } 69 }
70 70
71 spin_unlock_irqrestore(&sm->lock, flags); 71 spin_unlock_irqrestore(&sm->lock, flags);
72 sm->stop_scan(sm->dev); 72 sm->stop_scan(sm->dev);
73} 73}
@@ -78,12 +78,12 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm)
78 unsigned long flags; 78 unsigned long flags;
79 79
80 spin_lock_irqsave(&sm->lock, flags); 80 spin_lock_irqsave(&sm->lock, flags);
81 81
82 if (!sm->scanning) { 82 if (!sm->scanning) {
83 spin_unlock_irqrestore(&sm->lock, flags); 83 spin_unlock_irqrestore(&sm->lock, flags);
84 return; 84 return;
85 } 85 }
86 86
87 spin_unlock_irqrestore(&sm->lock, flags); 87 spin_unlock_irqrestore(&sm->lock, flags);
88 sm->wait_for_scan(sm->dev); 88 sm->wait_for_scan(sm->dev);
89} 89}
@@ -158,14 +158,14 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev)
158{ 158{
159 struct ieee80211softmac_device *sm = ieee80211_priv(dev); 159 struct ieee80211softmac_device *sm = ieee80211_priv(dev);
160 unsigned long flags; 160 unsigned long flags;
161 161
162 if (!(dev->flags & IFF_UP)) 162 if (!(dev->flags & IFF_UP))
163 return -ENODEV; 163 return -ENODEV;
164 164
165 assert(ieee80211softmac_scan_handlers_check_self(sm)); 165 assert(ieee80211softmac_scan_handlers_check_self(sm));
166 if (!ieee80211softmac_scan_handlers_check_self(sm)) 166 if (!ieee80211softmac_scan_handlers_check_self(sm))
167 return -EINVAL; 167 return -EINVAL;
168 168
169 spin_lock_irqsave(&sm->lock, flags); 169 spin_lock_irqsave(&sm->lock, flags);
170 /* it looks like we need to hold the lock here 170 /* it looks like we need to hold the lock here
171 * to make sure we don't allocate two of these... */ 171 * to make sure we don't allocate two of these... */
@@ -241,7 +241,7 @@ void ieee80211softmac_scan_finished(struct ieee80211softmac_device *sm)
241 spin_lock_irqsave(&sm->lock, flags); 241 spin_lock_irqsave(&sm->lock, flags);
242 sm->scanning = 0; 242 sm->scanning = 0;
243 spin_unlock_irqrestore(&sm->lock, flags); 243 spin_unlock_irqrestore(&sm->lock, flags);
244 244
245 if (sm->associnfo.bssvalid) { 245 if (sm->associnfo.bssvalid) {
246 struct ieee80211softmac_network *net; 246 struct ieee80211softmac_network *net;
247 247
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index fb58e03b3fbd..c306d52566e0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -142,14 +142,14 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev,
142 /* If all fails, return ANY (empty) */ 142 /* If all fails, return ANY (empty) */
143 data->essid.length = 0; 143 data->essid.length = 0;
144 data->essid.flags = 0; /* active */ 144 data->essid.flags = 0; /* active */
145 145
146 /* If we have a statically configured ESSID then return it */ 146 /* If we have a statically configured ESSID then return it */
147 if (sm->associnfo.static_essid) { 147 if (sm->associnfo.static_essid) {
148 data->essid.length = sm->associnfo.req_essid.len; 148 data->essid.length = sm->associnfo.req_essid.len;
149 data->essid.flags = 1; /* active */ 149 data->essid.flags = 1; /* active */
150 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len); 150 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len);
151 } 151 }
152 152
153 /* If we're associating/associated, return that */ 153 /* If we're associating/associated, return that */
154 if (sm->associnfo.associated || sm->associnfo.associating) { 154 if (sm->associnfo.associated || sm->associnfo.associating) {
155 data->essid.length = sm->associnfo.associate_essid.len; 155 data->essid.length = sm->associnfo.associate_essid.len;
@@ -181,7 +181,7 @@ ieee80211softmac_wx_set_rate(struct net_device *net_dev,
181 rates, so 801.11g devices start off at 11M for now. People 181 rates, so 801.11g devices start off at 11M for now. People
182 can manually change it if they really need to, but 11M is 182 can manually change it if they really need to, but 11M is
183 more reliable. Note similar logic in 183 more reliable. Note similar logic in
184 ieee80211softmac_wx_set_rate() */ 184 ieee80211softmac_wx_set_rate() */
185 if (ieee->modulation & IEEE80211_CCK_MODULATION) 185 if (ieee->modulation & IEEE80211_CCK_MODULATION)
186 in_rate = 11000000; 186 in_rate = 11000000;
187 else 187 else
@@ -247,7 +247,7 @@ ieee80211softmac_wx_set_rate(struct net_device *net_dev,
247 ieee80211softmac_recalc_txrates(mac); 247 ieee80211softmac_recalc_txrates(mac);
248 err = 0; 248 err = 0;
249 249
250out_unlock: 250out_unlock:
251 spin_unlock_irqrestore(&mac->lock, flags); 251 spin_unlock_irqrestore(&mac->lock, flags);
252out: 252out:
253 return err; 253 return err;
@@ -366,7 +366,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
366 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { 366 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
367 /* the bssid we have is no longer fixed */ 367 /* the bssid we have is no longer fixed */
368 mac->associnfo.bssfixed = 0; 368 mac->associnfo.bssfixed = 0;
369 } else { 369 } else {
370 if (!memcmp(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN)) { 370 if (!memcmp(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN)) {
371 if (mac->associnfo.associating || mac->associnfo.associated) { 371 if (mac->associnfo.associating || mac->associnfo.associated) {
372 /* bssid unchanged and associated or associating - just return */ 372 /* bssid unchanged and associated or associating - just return */
@@ -380,7 +380,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
380 mac->associnfo.bssfixed = 1; 380 mac->associnfo.bssfixed = 1;
381 /* queue associate if new bssid or (old one again and not associated) */ 381 /* queue associate if new bssid or (old one again and not associated) */
382 schedule_delayed_work(&mac->associnfo.work, 0); 382 schedule_delayed_work(&mac->associnfo.work, 0);
383 } 383 }
384 384
385 out: 385 out:
386 mutex_unlock(&mac->associnfo.mutex); 386 mutex_unlock(&mac->associnfo.mutex);
@@ -437,7 +437,7 @@ ieee80211softmac_wx_set_genie(struct net_device *dev,
437 mac->wpa.IEbuflen = 0; 437 mac->wpa.IEbuflen = 0;
438 } 438 }
439 439
440 out: 440 out:
441 spin_unlock_irqrestore(&mac->lock, flags); 441 spin_unlock_irqrestore(&mac->lock, flags);
442 mutex_unlock(&mac->associnfo.mutex); 442 mutex_unlock(&mac->associnfo.mutex);
443 443
@@ -458,9 +458,9 @@ ieee80211softmac_wx_get_genie(struct net_device *dev,
458 458
459 mutex_lock(&mac->associnfo.mutex); 459 mutex_lock(&mac->associnfo.mutex);
460 spin_lock_irqsave(&mac->lock, flags); 460 spin_lock_irqsave(&mac->lock, flags);
461 461
462 wrqu->data.length = 0; 462 wrqu->data.length = 0;
463 463
464 if (mac->wpa.IE && mac->wpa.IElen) { 464 if (mac->wpa.IE && mac->wpa.IElen) {
465 wrqu->data.length = mac->wpa.IElen; 465 wrqu->data.length = mac->wpa.IElen;
466 if (mac->wpa.IElen <= space) 466 if (mac->wpa.IElen <= space)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 5750a2b2a0d6..cf358c84c440 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -550,7 +550,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
550 if (err < 0) 550 if (err < 0)
551 goto out; 551 goto out;
552 552
553 sock->state = SS_CONNECTING; 553 sock->state = SS_CONNECTING;
554 554
555 /* Just entered SS_CONNECTING state; the only 555 /* Just entered SS_CONNECTING state; the only
556 * difference is that return value in non-blocking 556 * difference is that return value in non-blocking
@@ -878,36 +878,36 @@ static struct net_proto_family inet_family_ops = {
878 */ 878 */
879static struct inet_protosw inetsw_array[] = 879static struct inet_protosw inetsw_array[] =
880{ 880{
881 { 881 {
882 .type = SOCK_STREAM, 882 .type = SOCK_STREAM,
883 .protocol = IPPROTO_TCP, 883 .protocol = IPPROTO_TCP,
884 .prot = &tcp_prot, 884 .prot = &tcp_prot,
885 .ops = &inet_stream_ops, 885 .ops = &inet_stream_ops,
886 .capability = -1, 886 .capability = -1,
887 .no_check = 0, 887 .no_check = 0,
888 .flags = INET_PROTOSW_PERMANENT | 888 .flags = INET_PROTOSW_PERMANENT |
889 INET_PROTOSW_ICSK, 889 INET_PROTOSW_ICSK,
890 }, 890 },
891 891
892 { 892 {
893 .type = SOCK_DGRAM, 893 .type = SOCK_DGRAM,
894 .protocol = IPPROTO_UDP, 894 .protocol = IPPROTO_UDP,
895 .prot = &udp_prot, 895 .prot = &udp_prot,
896 .ops = &inet_dgram_ops, 896 .ops = &inet_dgram_ops,
897 .capability = -1, 897 .capability = -1,
898 .no_check = UDP_CSUM_DEFAULT, 898 .no_check = UDP_CSUM_DEFAULT,
899 .flags = INET_PROTOSW_PERMANENT, 899 .flags = INET_PROTOSW_PERMANENT,
900 }, 900 },
901 901
902 902
903 { 903 {
904 .type = SOCK_RAW, 904 .type = SOCK_RAW,
905 .protocol = IPPROTO_IP, /* wild card */ 905 .protocol = IPPROTO_IP, /* wild card */
906 .prot = &raw_prot, 906 .prot = &raw_prot,
907 .ops = &inet_sockraw_ops, 907 .ops = &inet_sockraw_ops,
908 .capability = CAP_NET_RAW, 908 .capability = CAP_NET_RAW,
909 .no_check = UDP_CSUM_DEFAULT, 909 .no_check = UDP_CSUM_DEFAULT,
910 .flags = INET_PROTOSW_REUSE, 910 .flags = INET_PROTOSW_REUSE,
911 } 911 }
912}; 912};
913 913
@@ -946,7 +946,7 @@ void inet_register_protosw(struct inet_protosw *p)
946 /* Add the new entry after the last permanent entry if any, so that 946 /* Add the new entry after the last permanent entry if any, so that
947 * the new entry does not override a permanent entry when matched with 947 * the new entry does not override a permanent entry when matched with
948 * a wild-card protocol. But it is allowed to override any existing 948 * a wild-card protocol. But it is allowed to override any existing
949 * non-permanent entry. This means that when we remove this entry, the 949 * non-permanent entry. This means that when we remove this entry, the
950 * system automatically returns to the old behavior. 950 * system automatically returns to the old behavior.
951 */ 951 */
952 list_add_rcu(&p->list, last_perm); 952 list_add_rcu(&p->list, last_perm);
@@ -1073,7 +1073,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1073 }, 1073 },
1074 }, 1074 },
1075 }; 1075 };
1076 1076
1077 security_sk_classify_flow(sk, &fl); 1077 security_sk_classify_flow(sk, &fl);
1078 err = ip_route_output_flow(&rt, &fl, sk, 0); 1078 err = ip_route_output_flow(&rt, &fl, sk, 0);
1079} 1079}
@@ -1273,10 +1273,10 @@ static int __init inet_init(void)
1273 goto out_unregister_udp_proto; 1273 goto out_unregister_udp_proto;
1274 1274
1275 /* 1275 /*
1276 * Tell SOCKET that we are alive... 1276 * Tell SOCKET that we are alive...
1277 */ 1277 */
1278 1278
1279 (void)sock_register(&inet_family_ops); 1279 (void)sock_register(&inet_family_ops);
1280 1280
1281 /* 1281 /*
1282 * Add all the base protocols. 1282 * Add all the base protocols.
@@ -1306,9 +1306,9 @@ static int __init inet_init(void)
1306 1306
1307 arp_init(); 1307 arp_init();
1308 1308
1309 /* 1309 /*
1310 * Set the IP module up 1310 * Set the IP module up
1311 */ 1311 */
1312 1312
1313 ip_init(); 1313 ip_init();
1314 1314
@@ -1334,11 +1334,11 @@ static int __init inet_init(void)
1334#endif 1334#endif
1335 /* 1335 /*
1336 * Initialise per-cpu ipv4 mibs 1336 * Initialise per-cpu ipv4 mibs
1337 */ 1337 */
1338 1338
1339 if(init_ipv4_mibs()) 1339 if(init_ipv4_mibs())
1340 printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ; 1340 printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ;
1341 1341
1342 ipv4_proc_init(); 1342 ipv4_proc_init();
1343 1343
1344 ipfrag_init(); 1344 ipfrag_init();
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 67a5509e26fc..7194eb40b6d0 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -91,7 +91,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
91 top_iph->check = 0; 91 top_iph->check = 0;
92 92
93 ahp = x->data; 93 ahp = x->data;
94 ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + 94 ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
95 ahp->icv_trunc_len) >> 2) - 2; 95 ahp->icv_trunc_len) >> 2) - 2;
96 96
97 ah->reserved = 0; 97 ah->reserved = 0;
@@ -135,9 +135,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
135 ah = (struct ip_auth_hdr*)skb->data; 135 ah = (struct ip_auth_hdr*)skb->data;
136 ahp = x->data; 136 ahp = x->data;
137 ah_hlen = (ah->hdrlen + 2) << 2; 137 ah_hlen = (ah->hdrlen + 2) << 2;
138 138
139 if (ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_full_len) && 139 if (ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_full_len) &&
140 ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len)) 140 ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len))
141 goto out; 141 goto out;
142 142
143 if (!pskb_may_pull(skb, ah_hlen)) 143 if (!pskb_may_pull(skb, ah_hlen))
@@ -166,9 +166,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
166 if (ip_clear_mutable_options(iph, &dummy)) 166 if (ip_clear_mutable_options(iph, &dummy))
167 goto out; 167 goto out;
168 } 168 }
169 { 169 {
170 u8 auth_data[MAX_AH_AUTH_LEN]; 170 u8 auth_data[MAX_AH_AUTH_LEN];
171 171
172 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 172 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
173 skb_push(skb, ihl); 173 skb_push(skb, ihl);
174 err = ah_mac_digest(ahp, skb, ah->auth_data); 174 err = ah_mac_digest(ahp, skb, ah->auth_data);
@@ -237,7 +237,7 @@ static int ah_init_state(struct xfrm_state *x)
237 ahp->tfm = tfm; 237 ahp->tfm = tfm;
238 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) 238 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
239 goto error; 239 goto error;
240 240
241 /* 241 /*
242 * Lookup the algorithm description maintained by xfrm_algo, 242 * Lookup the algorithm description maintained by xfrm_algo,
243 * verify crypto transform properties, and store information 243 * verify crypto transform properties, and store information
@@ -254,16 +254,16 @@ static int ah_init_state(struct xfrm_state *x)
254 aalg_desc->uinfo.auth.icv_fullbits/8); 254 aalg_desc->uinfo.auth.icv_fullbits/8);
255 goto error; 255 goto error;
256 } 256 }
257 257
258 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 258 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
259 ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 259 ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
260 260
261 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); 261 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
262 262
263 ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); 263 ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
264 if (!ahp->work_icv) 264 if (!ahp->work_icv)
265 goto error; 265 goto error;
266 266
267 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); 267 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len);
268 if (x->props.mode == XFRM_MODE_TUNNEL) 268 if (x->props.mode == XFRM_MODE_TUNNEL)
269 x->props.header_len += sizeof(struct iphdr); 269 x->props.header_len += sizeof(struct iphdr);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 3981e8be9ab8..a58afde4f72f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -15,9 +15,9 @@
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 * 16 *
17 * Fixes: 17 * Fixes:
18 * Alan Cox : Removed the Ethernet assumptions in 18 * Alan Cox : Removed the Ethernet assumptions in
19 * Florian's code 19 * Florian's code
20 * Alan Cox : Fixed some small errors in the ARP 20 * Alan Cox : Fixed some small errors in the ARP
21 * logic 21 * logic
22 * Alan Cox : Allow >4K in /proc 22 * Alan Cox : Allow >4K in /proc
23 * Alan Cox : Make ARP add its own protocol entry 23 * Alan Cox : Make ARP add its own protocol entry
@@ -39,18 +39,18 @@
39 * Jonathan Naylor : Only lookup the hardware address for 39 * Jonathan Naylor : Only lookup the hardware address for
40 * the correct hardware type. 40 * the correct hardware type.
41 * Germano Caronni : Assorted subtle races. 41 * Germano Caronni : Assorted subtle races.
42 * Craig Schlenter : Don't modify permanent entry 42 * Craig Schlenter : Don't modify permanent entry
43 * during arp_rcv. 43 * during arp_rcv.
44 * Russ Nelson : Tidied up a few bits. 44 * Russ Nelson : Tidied up a few bits.
45 * Alexey Kuznetsov: Major changes to caching and behaviour, 45 * Alexey Kuznetsov: Major changes to caching and behaviour,
46 * eg intelligent arp probing and 46 * eg intelligent arp probing and
47 * generation 47 * generation
48 * of host down events. 48 * of host down events.
49 * Alan Cox : Missing unlock in device events. 49 * Alan Cox : Missing unlock in device events.
50 * Eckes : ARP ioctl control errors. 50 * Eckes : ARP ioctl control errors.
51 * Alexey Kuznetsov: Arp free fix. 51 * Alexey Kuznetsov: Arp free fix.
52 * Manuel Rodriguez: Gratuitous ARP. 52 * Manuel Rodriguez: Gratuitous ARP.
53 * Jonathan Layes : Added arpd support through kerneld 53 * Jonathan Layes : Added arpd support through kerneld
54 * message queue (960314) 54 * message queue (960314)
55 * Mike Shaver : /proc/sys/net/ipv4/arp_* support 55 * Mike Shaver : /proc/sys/net/ipv4/arp_* support
56 * Mike McLagan : Routing by source 56 * Mike McLagan : Routing by source
@@ -210,7 +210,7 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
210 case ARPHRD_FDDI: 210 case ARPHRD_FDDI:
211 case ARPHRD_IEEE802: 211 case ARPHRD_IEEE802:
212 ip_eth_mc_map(addr, haddr); 212 ip_eth_mc_map(addr, haddr);
213 return 0; 213 return 0;
214 case ARPHRD_IEEE802_TR: 214 case ARPHRD_IEEE802_TR:
215 ip_tr_mc_map(addr, haddr); 215 ip_tr_mc_map(addr, haddr);
216 return 0; 216 return 0;
@@ -288,7 +288,7 @@ static int arp_constructor(struct neighbour *neigh)
288 switch (dev->type) { 288 switch (dev->type) {
289 default: 289 default:
290 break; 290 break;
291 case ARPHRD_ROSE: 291 case ARPHRD_ROSE:
292#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 292#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
293 case ARPHRD_AX25: 293 case ARPHRD_AX25:
294#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) 294#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
@@ -425,18 +425,18 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
425 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip, 425 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip,
426 .saddr = tip } } }; 426 .saddr = tip } } };
427 struct rtable *rt; 427 struct rtable *rt;
428 int flag = 0; 428 int flag = 0;
429 /*unsigned long now; */ 429 /*unsigned long now; */
430 430
431 if (ip_route_output_key(&rt, &fl) < 0) 431 if (ip_route_output_key(&rt, &fl) < 0)
432 return 1; 432 return 1;
433 if (rt->u.dst.dev != dev) { 433 if (rt->u.dst.dev != dev) {
434 NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); 434 NET_INC_STATS_BH(LINUX_MIB_ARPFILTER);
435 flag = 1; 435 flag = 1;
436 } 436 }
437 ip_rt_put(rt); 437 ip_rt_put(rt);
438 return flag; 438 return flag;
439} 439}
440 440
441/* OBSOLETE FUNCTIONS */ 441/* OBSOLETE FUNCTIONS */
442 442
@@ -490,7 +490,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
490 n->used = jiffies; 490 n->used = jiffies;
491 if (n->nud_state&NUD_VALID || neigh_event_send(n, skb) == 0) { 491 if (n->nud_state&NUD_VALID || neigh_event_send(n, skb) == 0) {
492 read_lock_bh(&n->lock); 492 read_lock_bh(&n->lock);
493 memcpy(haddr, n->ha, dev->addr_len); 493 memcpy(haddr, n->ha, dev->addr_len);
494 read_unlock_bh(&n->lock); 494 read_unlock_bh(&n->lock);
495 neigh_release(n); 495 neigh_release(n);
496 return 0; 496 return 0;
@@ -572,7 +572,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
572 /* 572 /*
573 * Allocate a buffer 573 * Allocate a buffer
574 */ 574 */
575 575
576 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) 576 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
577 + LL_RESERVED_SPACE(dev), GFP_ATOMIC); 577 + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
578 if (skb == NULL) 578 if (skb == NULL)
@@ -685,7 +685,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
685 /* 685 /*
686 * No arp on this interface. 686 * No arp on this interface.
687 */ 687 */
688 688
689 if (dev->flags&IFF_NOARP) 689 if (dev->flags&IFF_NOARP)
690 return; 690 return;
691 691
@@ -725,7 +725,7 @@ static int arp_process(struct sk_buff *skb)
725 arp = skb->nh.arph; 725 arp = skb->nh.arph;
726 726
727 switch (dev_type) { 727 switch (dev_type) {
728 default: 728 default:
729 if (arp->ar_pro != htons(ETH_P_IP) || 729 if (arp->ar_pro != htons(ETH_P_IP) ||
730 htons(dev_type) != arp->ar_hrd) 730 htons(dev_type) != arp->ar_hrd)
731 goto out; 731 goto out;
@@ -792,7 +792,7 @@ static int arp_process(struct sk_buff *skb)
792 tha = arp_ptr; 792 tha = arp_ptr;
793 arp_ptr += dev->addr_len; 793 arp_ptr += dev->addr_len;
794 memcpy(&tip, arp_ptr, 4); 794 memcpy(&tip, arp_ptr, 4);
795/* 795/*
796 * Check for bad requests for 127.x.x.x and requests for multicast 796 * Check for bad requests for 127.x.x.x and requests for multicast
797 * addresses. If this is one such, delete it. 797 * addresses. If this is one such, delete it.
798 */ 798 */
@@ -809,16 +809,16 @@ static int arp_process(struct sk_buff *skb)
809 * Process entry. The idea here is we want to send a reply if it is a 809 * Process entry. The idea here is we want to send a reply if it is a
810 * request for us or if it is a request for someone else that we hold 810 * request for us or if it is a request for someone else that we hold
811 * a proxy for. We want to add an entry to our cache if it is a reply 811 * a proxy for. We want to add an entry to our cache if it is a reply
812 * to us or if it is a request for our address. 812 * to us or if it is a request for our address.
813 * (The assumption for this last is that if someone is requesting our 813 * (The assumption for this last is that if someone is requesting our
814 * address, they are probably intending to talk to us, so it saves time 814 * address, they are probably intending to talk to us, so it saves time
815 * if we cache their address. Their address is also probably not in 815 * if we cache their address. Their address is also probably not in
816 * our cache, since ours is not in their cache.) 816 * our cache, since ours is not in their cache.)
817 * 817 *
818 * Putting this another way, we only care about replies if they are to 818 * Putting this another way, we only care about replies if they are to
819 * us, in which case we add them to the cache. For requests, we care 819 * us, in which case we add them to the cache. For requests, we care
820 * about those for us and those for our proxies. We reply to both, 820 * about those for us and those for our proxies. We reply to both,
821 * and in the case of requests for us we add the requester to the arp 821 * and in the case of requests for us we add the requester to the arp
822 * cache. 822 * cache.
823 */ 823 */
824 824
@@ -845,7 +845,7 @@ static int arp_process(struct sk_buff *skb)
845 if (!dont_send) 845 if (!dont_send)
846 dont_send |= arp_ignore(in_dev,dev,sip,tip); 846 dont_send |= arp_ignore(in_dev,dev,sip,tip);
847 if (!dont_send && IN_DEV_ARPFILTER(in_dev)) 847 if (!dont_send && IN_DEV_ARPFILTER(in_dev))
848 dont_send |= arp_filter(sip,tip,dev); 848 dont_send |= arp_filter(sip,tip,dev);
849 if (!dont_send) 849 if (!dont_send)
850 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); 850 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
851 851
@@ -860,7 +860,7 @@ static int arp_process(struct sk_buff *skb)
860 if (n) 860 if (n)
861 neigh_release(n); 861 neigh_release(n);
862 862
863 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || 863 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
864 skb->pkt_type == PACKET_HOST || 864 skb->pkt_type == PACKET_HOST ||
865 in_dev->arp_parms->proxy_delay == 0) { 865 in_dev->arp_parms->proxy_delay == 0) {
866 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); 866 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
@@ -1039,7 +1039,7 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev)
1039 if (r->arp_flags & ATF_PERM) 1039 if (r->arp_flags & ATF_PERM)
1040 state = NUD_PERMANENT; 1040 state = NUD_PERMANENT;
1041 err = neigh_update(neigh, (r->arp_flags&ATF_COM) ? 1041 err = neigh_update(neigh, (r->arp_flags&ATF_COM) ?
1042 r->arp_ha.sa_data : NULL, state, 1042 r->arp_ha.sa_data : NULL, state,
1043 NEIGH_UPDATE_F_OVERRIDE| 1043 NEIGH_UPDATE_F_OVERRIDE|
1044 NEIGH_UPDATE_F_ADMIN); 1044 NEIGH_UPDATE_F_ADMIN);
1045 neigh_release(neigh); 1045 neigh_release(neigh);
@@ -1121,7 +1121,7 @@ static int arp_req_delete(struct arpreq *r, struct net_device * dev)
1121 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1121 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1122 if (neigh) { 1122 if (neigh) {
1123 if (neigh->nud_state&~NUD_NOARP) 1123 if (neigh->nud_state&~NUD_NOARP)
1124 err = neigh_update(neigh, NULL, NUD_FAILED, 1124 err = neigh_update(neigh, NULL, NUD_FAILED,
1125 NEIGH_UPDATE_F_OVERRIDE| 1125 NEIGH_UPDATE_F_OVERRIDE|
1126 NEIGH_UPDATE_F_ADMIN); 1126 NEIGH_UPDATE_F_ADMIN);
1127 neigh_release(neigh); 1127 neigh_release(neigh);
@@ -1181,7 +1181,7 @@ int arp_ioctl(unsigned int cmd, void __user *arg)
1181 1181
1182 switch(cmd) { 1182 switch(cmd) {
1183 case SIOCDARP: 1183 case SIOCDARP:
1184 err = arp_req_delete(&r, dev); 1184 err = arp_req_delete(&r, dev);
1185 break; 1185 break;
1186 case SIOCSARP: 1186 case SIOCSARP:
1187 err = arp_req_set(&r, dev); 1187 err = arp_req_set(&r, dev);
@@ -1268,14 +1268,14 @@ static char *ax2asc2(ax25_address *a, char *buf)
1268 1268
1269 if (c != ' ') *s++ = c; 1269 if (c != ' ') *s++ = c;
1270 } 1270 }
1271 1271
1272 *s++ = '-'; 1272 *s++ = '-';
1273 1273
1274 if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) { 1274 if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
1275 *s++ = '1'; 1275 *s++ = '1';
1276 n -= 10; 1276 n -= 10;
1277 } 1277 }
1278 1278
1279 *s++ = n + '0'; 1279 *s++ = n + '0';
1280 *s++ = '\0'; 1280 *s++ = '\0';
1281 1281
@@ -1373,7 +1373,7 @@ static int arp_seq_open(struct inode *inode, struct file *file)
1373 struct seq_file *seq; 1373 struct seq_file *seq;
1374 int rc = -ENOMEM; 1374 int rc = -ENOMEM;
1375 struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1375 struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1376 1376
1377 if (!s) 1377 if (!s)
1378 goto out; 1378 goto out;
1379 1379
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 0072d79f0c2a..dd02a45d0f67 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -29,12 +29,12 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
29 int oif; 29 int oif;
30 int err; 30 int err;
31 31
32
33 if (addr_len < sizeof(*usin))
34 return -EINVAL;
35 32
36 if (usin->sin_family != AF_INET) 33 if (addr_len < sizeof(*usin))
37 return -EAFNOSUPPORT; 34 return -EINVAL;
35
36 if (usin->sin_family != AF_INET)
37 return -EAFNOSUPPORT;
38 38
39 sk_dst_reset(sk); 39 sk_dst_reset(sk);
40 40
@@ -56,8 +56,8 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
56 ip_rt_put(rt); 56 ip_rt_put(rt);
57 return -EACCES; 57 return -EACCES;
58 } 58 }
59 if (!inet->saddr) 59 if (!inet->saddr)
60 inet->saddr = rt->rt_src; /* Update source address */ 60 inet->saddr = rt->rt_src; /* Update source address */
61 if (!inet->rcv_saddr) 61 if (!inet->rcv_saddr)
62 inet->rcv_saddr = rt->rt_src; 62 inet->rcv_saddr = rt->rt_src;
63 inet->daddr = rt->rt_dst; 63 inet->daddr = rt->rt_dst;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c40203640966..ba5e7f4cd127 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -252,7 +252,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
252 252
253 ASSERT_RTNL(); 253 ASSERT_RTNL();
254 254
255 /* 1. Deleting primary ifaddr forces deletion all secondaries 255 /* 1. Deleting primary ifaddr forces deletion all secondaries
256 * unless alias promotion is set 256 * unless alias promotion is set
257 **/ 257 **/
258 258
@@ -260,7 +260,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
260 struct in_ifaddr **ifap1 = &ifa1->ifa_next; 260 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
261 261
262 while ((ifa = *ifap1) != NULL) { 262 while ((ifa = *ifap1) != NULL) {
263 if (!(ifa->ifa_flags & IFA_F_SECONDARY) && 263 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
264 ifa1->ifa_scope <= ifa->ifa_scope) 264 ifa1->ifa_scope <= ifa->ifa_scope)
265 last_prim = ifa; 265 last_prim = ifa;
266 266
@@ -583,8 +583,8 @@ static __inline__ int inet_abc_len(__be32 addr)
583{ 583{
584 int rc = -1; /* Something else, probably a multicast. */ 584 int rc = -1; /* Something else, probably a multicast. */
585 585
586 if (ZERONET(addr)) 586 if (ZERONET(addr))
587 rc = 0; 587 rc = 0;
588 else { 588 else {
589 __u32 haddr = ntohl(addr); 589 __u32 haddr = ntohl(addr);
590 590
@@ -596,7 +596,7 @@ static __inline__ int inet_abc_len(__be32 addr)
596 rc = 24; 596 rc = 24;
597 } 597 }
598 598
599 return rc; 599 return rc;
600} 600}
601 601
602 602
@@ -1020,29 +1020,29 @@ int unregister_inetaddr_notifier(struct notifier_block *nb)
1020 * alias numbering and to create unique labels if possible. 1020 * alias numbering and to create unique labels if possible.
1021*/ 1021*/
1022static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) 1022static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1023{ 1023{
1024 struct in_ifaddr *ifa; 1024 struct in_ifaddr *ifa;
1025 int named = 0; 1025 int named = 0;
1026 1026
1027 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 1027 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1028 char old[IFNAMSIZ], *dot; 1028 char old[IFNAMSIZ], *dot;
1029 1029
1030 memcpy(old, ifa->ifa_label, IFNAMSIZ); 1030 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1031 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); 1031 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1032 if (named++ == 0) 1032 if (named++ == 0)
1033 continue; 1033 continue;
1034 dot = strchr(ifa->ifa_label, ':'); 1034 dot = strchr(ifa->ifa_label, ':');
1035 if (dot == NULL) { 1035 if (dot == NULL) {
1036 sprintf(old, ":%d", named); 1036 sprintf(old, ":%d", named);
1037 dot = old; 1037 dot = old;
1038 } 1038 }
1039 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) { 1039 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) {
1040 strcat(ifa->ifa_label, dot); 1040 strcat(ifa->ifa_label, dot);
1041 } else { 1041 } else {
1042 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); 1042 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1043 } 1043 }
1044 } 1044 }
1045} 1045}
1046 1046
1047/* Called only under RTNL semaphore */ 1047/* Called only under RTNL semaphore */
1048 1048
@@ -1539,7 +1539,7 @@ static struct devinet_sysctl_table {
1539 }, 1539 },
1540 }, 1540 },
1541 .devinet_conf_dir = { 1541 .devinet_conf_dir = {
1542 { 1542 {
1543 .ctl_name = NET_IPV4_CONF, 1543 .ctl_name = NET_IPV4_CONF,
1544 .procname = "conf", 1544 .procname = "conf",
1545 .mode = 0555, 1545 .mode = 0555,
@@ -1581,18 +1581,18 @@ static void devinet_sysctl_register(struct in_device *in_dev,
1581 } 1581 }
1582 1582
1583 if (dev) { 1583 if (dev) {
1584 dev_name = dev->name; 1584 dev_name = dev->name;
1585 t->devinet_dev[0].ctl_name = dev->ifindex; 1585 t->devinet_dev[0].ctl_name = dev->ifindex;
1586 } else { 1586 } else {
1587 dev_name = "default"; 1587 dev_name = "default";
1588 t->devinet_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT; 1588 t->devinet_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT;
1589 } 1589 }
1590 1590
1591 /* 1591 /*
1592 * Make a copy of dev_name, because '.procname' is regarded as const 1592 * Make a copy of dev_name, because '.procname' is regarded as const
1593 * by sysctl and we wouldn't want anyone to change it under our feet 1593 * by sysctl and we wouldn't want anyone to change it under our feet
1594 * (see SIOCSIFNAME). 1594 * (see SIOCSIFNAME).
1595 */ 1595 */
1596 dev_name = kstrdup(dev_name, GFP_KERNEL); 1596 dev_name = kstrdup(dev_name, GFP_KERNEL);
1597 if (!dev_name) 1597 if (!dev_name)
1598 goto free; 1598 goto free;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index f2c6776ea0e6..31041127eeb8 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -215,7 +215,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
215 if (padlen+2 >= elen) 215 if (padlen+2 >= elen)
216 goto out; 216 goto out;
217 217
218 /* ... check padding bits here. Silly. :-) */ 218 /* ... check padding bits here. Silly. :-) */
219 219
220 iph = skb->nh.iph; 220 iph = skb->nh.iph;
221 ihl = iph->ihl * 4; 221 ihl = iph->ihl * 4;
@@ -236,7 +236,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
236 236
237 ipaddr.a4 = iph->saddr; 237 ipaddr.a4 = iph->saddr;
238 km_new_mapping(x, &ipaddr, uh->source); 238 km_new_mapping(x, &ipaddr, uh->source);
239 239
240 /* XXX: perhaps add an extra 240 /* XXX: perhaps add an extra
241 * policy check here, to see 241 * policy check here, to see
242 * if we should allow or 242 * if we should allow or
@@ -245,7 +245,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
245 * address/port. 245 * address/port.
246 */ 246 */
247 } 247 }
248 248
249 /* 249 /*
250 * 2) ignore UDP/TCP checksums in case 250 * 2) ignore UDP/TCP checksums in case
251 * of NAT-T in Transport Mode, or 251 * of NAT-T in Transport Mode, or
@@ -284,7 +284,7 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
284 mtu = ALIGN(mtu + 2, 4) + blksize - 4; 284 mtu = ALIGN(mtu + 2, 4) + blksize - 4;
285 break; 285 break;
286 case XFRM_MODE_BEET: 286 case XFRM_MODE_BEET:
287 /* The worst case. */ 287 /* The worst case. */
288 enclen = IPV4_BEET_PHMAXLEN; 288 enclen = IPV4_BEET_PHMAXLEN;
289 mtu = ALIGN(mtu + enclen + 2, blksize); 289 mtu = ALIGN(mtu + enclen + 2, blksize);
290 break; 290 break;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index d47b72af89ed..64f31e63db7f 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -160,7 +160,7 @@ unsigned inet_addr_type(__be32 addr)
160#ifdef CONFIG_IP_MULTIPLE_TABLES 160#ifdef CONFIG_IP_MULTIPLE_TABLES
161 res.r = NULL; 161 res.r = NULL;
162#endif 162#endif
163 163
164 if (ip_fib_local_table) { 164 if (ip_fib_local_table) {
165 ret = RTN_UNICAST; 165 ret = RTN_UNICAST;
166 if (!ip_fib_local_table->tb_lookup(ip_fib_local_table, 166 if (!ip_fib_local_table->tb_lookup(ip_fib_local_table,
@@ -378,7 +378,7 @@ static int rtentry_to_fib_config(int cmd, struct rtentry *rt,
378 int len = 0; 378 int len = 0;
379 379
380 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); 380 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
381 if (mx == NULL) 381 if (mx == NULL)
382 return -ENOMEM; 382 return -ENOMEM;
383 383
384 if (rt->rt_flags & RTF_MTU) 384 if (rt->rt_flags & RTF_MTU)
@@ -400,7 +400,7 @@ static int rtentry_to_fib_config(int cmd, struct rtentry *rt,
400/* 400/*
401 * Handle IP routing ioctl calls. These are used to manipulate the routing tables 401 * Handle IP routing ioctl calls. These are used to manipulate the routing tables
402 */ 402 */
403 403
404int ip_rt_ioctl(unsigned int cmd, void __user *arg) 404int ip_rt_ioctl(unsigned int cmd, void __user *arg)
405{ 405{
406 struct fib_config cfg; 406 struct fib_config cfg;
@@ -600,7 +600,7 @@ int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
600 goto next; 600 goto next;
601 if (dumped) 601 if (dumped)
602 memset(&cb->args[2], 0, sizeof(cb->args) - 602 memset(&cb->args[2], 0, sizeof(cb->args) -
603 2 * sizeof(cb->args[0])); 603 2 * sizeof(cb->args[0]));
604 if (tb->tb_dump(tb, skb, cb) < 0) 604 if (tb->tb_dump(tb, skb, cb) < 0)
605 goto out; 605 goto out;
606 dumped = 1; 606 dumped = 1;
@@ -766,7 +766,7 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
766 766
767static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) 767static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
768{ 768{
769 769
770 struct fib_result res; 770 struct fib_result res;
771 struct flowi fl = { .mark = frn->fl_mark, 771 struct flowi fl = { .mark = frn->fl_mark,
772 .nl_u = { .ip4_u = { .daddr = frn->fl_addr, 772 .nl_u = { .ip4_u = { .daddr = frn->fl_addr,
@@ -791,11 +791,11 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
791static void nl_fib_input(struct sock *sk, int len) 791static void nl_fib_input(struct sock *sk, int len)
792{ 792{
793 struct sk_buff *skb = NULL; 793 struct sk_buff *skb = NULL;
794 struct nlmsghdr *nlh = NULL; 794 struct nlmsghdr *nlh = NULL;
795 struct fib_result_nl *frn; 795 struct fib_result_nl *frn;
796 u32 pid; 796 u32 pid;
797 struct fib_table *tb; 797 struct fib_table *tb;
798 798
799 skb = skb_dequeue(&sk->sk_receive_queue); 799 skb = skb_dequeue(&sk->sk_receive_queue);
800 nlh = (struct nlmsghdr *)skb->data; 800 nlh = (struct nlmsghdr *)skb->data;
801 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || 801 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
@@ -803,17 +803,17 @@ static void nl_fib_input(struct sock *sk, int len)
803 kfree_skb(skb); 803 kfree_skb(skb);
804 return; 804 return;
805 } 805 }
806 806
807 frn = (struct fib_result_nl *) NLMSG_DATA(nlh); 807 frn = (struct fib_result_nl *) NLMSG_DATA(nlh);
808 tb = fib_get_table(frn->tb_id_in); 808 tb = fib_get_table(frn->tb_id_in);
809 809
810 nl_fib_lookup(frn, tb); 810 nl_fib_lookup(frn, tb);
811 811
812 pid = nlh->nlmsg_pid; /*pid of sending process */ 812 pid = nlh->nlmsg_pid; /*pid of sending process */
813 NETLINK_CB(skb).pid = 0; /* from kernel */ 813 NETLINK_CB(skb).pid = 0; /* from kernel */
814 NETLINK_CB(skb).dst_group = 0; /* unicast */ 814 NETLINK_CB(skb).dst_group = 0; /* unicast */
815 netlink_unicast(sk, skb, pid, MSG_DONTWAIT); 815 netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
816} 816}
817 817
818static void nl_fib_lookup_init(void) 818static void nl_fib_lookup_init(void)
819{ 819{
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 648f47c1c399..dea04d725b04 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -146,7 +146,7 @@ static void fn_rehash_zone(struct fn_zone *fz)
146 struct hlist_head *ht, *old_ht; 146 struct hlist_head *ht, *old_ht;
147 int old_divisor, new_divisor; 147 int old_divisor, new_divisor;
148 u32 new_hashmask; 148 u32 new_hashmask;
149 149
150 old_divisor = fz->fz_divisor; 150 old_divisor = fz->fz_divisor;
151 151
152 switch (old_divisor) { 152 switch (old_divisor) {
@@ -911,7 +911,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq)
911 911
912 if (!iter->zone) 912 if (!iter->zone)
913 goto out; 913 goto out;
914 914
915 iter->bucket = 0; 915 iter->bucket = 0;
916 iter->hash_head = iter->zone->fz_hash; 916 iter->hash_head = iter->zone->fz_hash;
917 917
@@ -932,7 +932,7 @@ static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
932{ 932{
933 struct fib_iter_state *iter = seq->private; 933 struct fib_iter_state *iter = seq->private;
934 struct fib_alias *fa; 934 struct fib_alias *fa;
935 935
936 if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) { 936 if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
937 fa = iter->fa; 937 fa = iter->fa;
938 pos -= iter->pos; 938 pos -= iter->pos;
@@ -981,7 +981,7 @@ static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
981 return flags; 981 return flags;
982} 982}
983 983
984/* 984/*
985 * This outputs /proc/net/route. 985 * This outputs /proc/net/route.
986 * 986 *
987 * It always works in backward compatibility mode. 987 * It always works in backward compatibility mode.
@@ -1040,7 +1040,7 @@ static int fib_seq_open(struct inode *inode, struct file *file)
1040 struct seq_file *seq; 1040 struct seq_file *seq;
1041 int rc = -ENOMEM; 1041 int rc = -ENOMEM;
1042 struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1042 struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1043 1043
1044 if (!s) 1044 if (!s)
1045 goto out; 1045 goto out;
1046 1046
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index be1028c9933e..2f1fdae6efa6 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -85,12 +85,12 @@ for (nhsel=0; nhsel < 1; nhsel++)
85#define endfor_nexthops(fi) } 85#define endfor_nexthops(fi) }
86 86
87 87
88static const struct 88static const struct
89{ 89{
90 int error; 90 int error;
91 u8 scope; 91 u8 scope;
92} fib_props[RTA_MAX + 1] = { 92} fib_props[RTA_MAX + 1] = {
93 { 93 {
94 .error = 0, 94 .error = 0,
95 .scope = RT_SCOPE_NOWHERE, 95 .scope = RT_SCOPE_NOWHERE,
96 }, /* RTN_UNSPEC */ 96 }, /* RTN_UNSPEC */
@@ -439,7 +439,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
439 439
440 rtnh = cfg->fc_mp; 440 rtnh = cfg->fc_mp;
441 remaining = cfg->fc_mp_len; 441 remaining = cfg->fc_mp_len;
442 442
443 for_nexthops(fi) { 443 for_nexthops(fi) {
444 int attrlen; 444 int attrlen;
445 445
@@ -508,9 +508,9 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
508 Normally it looks as following. 508 Normally it looks as following.
509 509
510 {universe prefix} -> (gw, oif) [scope link] 510 {universe prefix} -> (gw, oif) [scope link]
511 | 511 |
512 |-> {link prefix} -> (gw, oif) [scope local] 512 |-> {link prefix} -> (gw, oif) [scope local]
513 | 513 |
514 |-> {local prefix} (terminal node) 514 |-> {local prefix} (terminal node)
515 */ 515 */
516 516
@@ -864,7 +864,7 @@ err_inval:
864 err = -EINVAL; 864 err = -EINVAL;
865 865
866failure: 866failure:
867 if (fi) { 867 if (fi) {
868 fi->fib_dead = 1; 868 fi->fib_dead = 1;
869 free_fib_info(fi); 869 free_fib_info(fi);
870 } 870 }
@@ -1049,7 +1049,7 @@ int fib_sync_down(__be32 local, struct net_device *dev, int force)
1049{ 1049{
1050 int ret = 0; 1050 int ret = 0;
1051 int scope = RT_SCOPE_NOWHERE; 1051 int scope = RT_SCOPE_NOWHERE;
1052 1052
1053 if (force) 1053 if (force)
1054 scope = -1; 1054 scope = -1;
1055 1055
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1e589b91605e..004a437bd7b5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -7,13 +7,13 @@
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet 7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences. 8 * & Swedish University of Agricultural Sciences.
9 * 9 *
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of 10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences. 11 * Agricultural Sciences.
12 * 12 *
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet 13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 * 14 *
15 * This work is based on the LPC-trie which is originally descibed in: 15 * This work is based on the LPC-trie which is originally descibed in:
16 * 16 *
17 * An experimental study of compression methods for dynamic tries 17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. 18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/ 19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
@@ -224,34 +224,34 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b)
224} 224}
225 225
226/* 226/*
227 To understand this stuff, an understanding of keys and all their bits is 227 To understand this stuff, an understanding of keys and all their bits is
228 necessary. Every node in the trie has a key associated with it, but not 228 necessary. Every node in the trie has a key associated with it, but not
229 all of the bits in that key are significant. 229 all of the bits in that key are significant.
230 230
231 Consider a node 'n' and its parent 'tp'. 231 Consider a node 'n' and its parent 'tp'.
232 232
233 If n is a leaf, every bit in its key is significant. Its presence is 233 If n is a leaf, every bit in its key is significant. Its presence is
234 necessitated by path compression, since during a tree traversal (when 234 necessitated by path compression, since during a tree traversal (when
235 searching for a leaf - unless we are doing an insertion) we will completely 235 searching for a leaf - unless we are doing an insertion) we will completely
236 ignore all skipped bits we encounter. Thus we need to verify, at the end of 236 ignore all skipped bits we encounter. Thus we need to verify, at the end of
237 a potentially successful search, that we have indeed been walking the 237 a potentially successful search, that we have indeed been walking the
238 correct key path. 238 correct key path.
239 239
240 Note that we can never "miss" the correct key in the tree if present by 240 Note that we can never "miss" the correct key in the tree if present by
241 following the wrong path. Path compression ensures that segments of the key 241 following the wrong path. Path compression ensures that segments of the key
242 that are the same for all keys with a given prefix are skipped, but the 242 that are the same for all keys with a given prefix are skipped, but the
243 skipped part *is* identical for each node in the subtrie below the skipped 243 skipped part *is* identical for each node in the subtrie below the skipped
244 bit! trie_insert() in this implementation takes care of that - note the 244 bit! trie_insert() in this implementation takes care of that - note the
245 call to tkey_sub_equals() in trie_insert(). 245 call to tkey_sub_equals() in trie_insert().
246 246
247 if n is an internal node - a 'tnode' here, the various parts of its key 247 if n is an internal node - a 'tnode' here, the various parts of its key
248 have many different meanings. 248 have many different meanings.
249 249
250 Example: 250 Example:
251 _________________________________________________________________ 251 _________________________________________________________________
252 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | 252 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
253 ----------------------------------------------------------------- 253 -----------------------------------------------------------------
254 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 254 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
255 255
256 _________________________________________________________________ 256 _________________________________________________________________
257 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | 257 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
@@ -263,23 +263,23 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b)
263 n->pos = 15 263 n->pos = 15
264 n->bits = 4 264 n->bits = 4
265 265
266 First, let's just ignore the bits that come before the parent tp, that is 266 First, let's just ignore the bits that come before the parent tp, that is
267 the bits from 0 to (tp->pos-1). They are *known* but at this point we do 267 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
268 not use them for anything. 268 not use them for anything.
269 269
270 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the 270 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
271 index into the parent's child array. That is, they will be used to find 271 index into the parent's child array. That is, they will be used to find
272 'n' among tp's children. 272 'n' among tp's children.
273 273
274 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits 274 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
275 for the node n. 275 for the node n.
276 276
277 All the bits we have seen so far are significant to the node n. The rest 277 All the bits we have seen so far are significant to the node n. The rest
278 of the bits are really not needed or indeed known in n->key. 278 of the bits are really not needed or indeed known in n->key.
279 279
280 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into 280 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
281 n's child array, and will of course be different for each child. 281 n's child array, and will of course be different for each child.
282 282
283 283
284 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown 284 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
285 at this point. 285 at this point.
@@ -294,7 +294,7 @@ static inline void check_tnode(const struct tnode *tn)
294static int halve_threshold = 25; 294static int halve_threshold = 25;
295static int inflate_threshold = 50; 295static int inflate_threshold = 50;
296static int halve_threshold_root = 15; 296static int halve_threshold_root = 15;
297static int inflate_threshold_root = 25; 297static int inflate_threshold_root = 25;
298 298
299 299
300static void __alias_free_mem(struct rcu_head *head) 300static void __alias_free_mem(struct rcu_head *head)
@@ -355,7 +355,7 @@ static inline void tnode_free(struct tnode *tn)
355 struct leaf *l = (struct leaf *) tn; 355 struct leaf *l = (struct leaf *) tn;
356 call_rcu_bh(&l->rcu, __leaf_free_rcu); 356 call_rcu_bh(&l->rcu, __leaf_free_rcu);
357 } 357 }
358 else 358 else
359 call_rcu(&tn->rcu, __tnode_free_rcu); 359 call_rcu(&tn->rcu, __tnode_free_rcu);
360} 360}
361 361
@@ -461,7 +461,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
461 int inflate_threshold_use; 461 int inflate_threshold_use;
462 int halve_threshold_use; 462 int halve_threshold_use;
463 463
464 if (!tn) 464 if (!tn)
465 return NULL; 465 return NULL;
466 466
467 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n", 467 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
@@ -556,7 +556,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
556 556
557 if(!tn->parent) 557 if(!tn->parent)
558 inflate_threshold_use = inflate_threshold_root; 558 inflate_threshold_use = inflate_threshold_root;
559 else 559 else
560 inflate_threshold_use = inflate_threshold; 560 inflate_threshold_use = inflate_threshold;
561 561
562 err = 0; 562 err = 0;
@@ -587,7 +587,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
587 587
588 if(!tn->parent) 588 if(!tn->parent)
589 halve_threshold_use = halve_threshold_root; 589 halve_threshold_use = halve_threshold_root;
590 else 590 else
591 halve_threshold_use = halve_threshold; 591 halve_threshold_use = halve_threshold;
592 592
593 err = 0; 593 err = 0;
@@ -665,10 +665,10 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
665 right = tnode_new(inode->key|m, inode->pos + 1, 665 right = tnode_new(inode->key|m, inode->pos + 1,
666 inode->bits - 1); 666 inode->bits - 1);
667 667
668 if (!right) { 668 if (!right) {
669 tnode_free(left); 669 tnode_free(left);
670 goto nomem; 670 goto nomem;
671 } 671 }
672 672
673 put_child(t, tn, 2*i, (struct node *) left); 673 put_child(t, tn, 2*i, (struct node *) left);
674 put_child(t, tn, 2*i+1, (struct node *) right); 674 put_child(t, tn, 2*i+1, (struct node *) right);
@@ -890,23 +890,23 @@ static inline struct list_head * get_fa_head(struct leaf *l, int plen)
890 890
891static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) 891static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
892{ 892{
893 struct leaf_info *li = NULL, *last = NULL; 893 struct leaf_info *li = NULL, *last = NULL;
894 struct hlist_node *node; 894 struct hlist_node *node;
895 895
896 if (hlist_empty(head)) { 896 if (hlist_empty(head)) {
897 hlist_add_head_rcu(&new->hlist, head); 897 hlist_add_head_rcu(&new->hlist, head);
898 } else { 898 } else {
899 hlist_for_each_entry(li, node, head, hlist) { 899 hlist_for_each_entry(li, node, head, hlist) {
900 if (new->plen > li->plen) 900 if (new->plen > li->plen)
901 break; 901 break;
902 902
903 last = li; 903 last = li;
904 } 904 }
905 if (last) 905 if (last)
906 hlist_add_after_rcu(&last->hlist, &new->hlist); 906 hlist_add_after_rcu(&last->hlist, &new->hlist);
907 else 907 else
908 hlist_add_before_rcu(&new->hlist, &li->hlist); 908 hlist_add_before_rcu(&new->hlist, &li->hlist);
909 } 909 }
910} 910}
911 911
912/* rcu_read_lock needs to be hold by caller from readside */ 912/* rcu_read_lock needs to be hold by caller from readside */
@@ -1700,7 +1700,7 @@ static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1700 /* Decend if tnode */ 1700 /* Decend if tnode */
1701 while (IS_TNODE(c)) { 1701 while (IS_TNODE(c)) {
1702 p = (struct tnode *) c; 1702 p = (struct tnode *) c;
1703 idx = 0; 1703 idx = 0;
1704 1704
1705 /* Rightmost non-NULL branch */ 1705 /* Rightmost non-NULL branch */
1706 if (p && IS_TNODE(p)) 1706 if (p && IS_TNODE(p))
@@ -2303,9 +2303,9 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2303 2303
2304 seq_indent(seq, iter->depth-1); 2304 seq_indent(seq, iter->depth-1);
2305 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n", 2305 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2306 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children, 2306 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2307 tn->empty_children); 2307 tn->empty_children);
2308 2308
2309 } else { 2309 } else {
2310 struct leaf *l = (struct leaf *) n; 2310 struct leaf *l = (struct leaf *) n;
2311 int i; 2311 int i;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 40cf0d0e1b83..4b7a0d946a0d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -304,7 +304,7 @@ static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code)
304 304
305 /* No rate limit on loopback */ 305 /* No rate limit on loopback */
306 if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) 306 if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
307 goto out; 307 goto out;
308 308
309 /* Limit if icmp type is enabled in ratemask. */ 309 /* Limit if icmp type is enabled in ratemask. */
310 if ((1 << type) & sysctl_icmp_ratemask) 310 if ((1 << type) & sysctl_icmp_ratemask)
@@ -350,9 +350,9 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
350 struct sk_buff *skb; 350 struct sk_buff *skb;
351 351
352 if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, 352 if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
353 icmp_param->data_len+icmp_param->head_len, 353 icmp_param->data_len+icmp_param->head_len,
354 icmp_param->head_len, 354 icmp_param->head_len,
355 ipc, rt, MSG_DONTWAIT) < 0) 355 ipc, rt, MSG_DONTWAIT) < 0)
356 ip_flush_pending_frames(icmp_socket->sk); 356 ip_flush_pending_frames(icmp_socket->sk);
357 else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { 357 else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
358 struct icmphdr *icmph = skb->h.icmph; 358 struct icmphdr *icmph = skb->h.icmph;
@@ -755,7 +755,7 @@ static void icmp_redirect(struct sk_buff *skb)
755 skb->h.icmph->un.gateway, 755 skb->h.icmph->un.gateway,
756 iph->saddr, skb->dev); 756 iph->saddr, skb->dev);
757 break; 757 break;
758 } 758 }
759out: 759out:
760 return; 760 return;
761out_err: 761out_err:
@@ -959,7 +959,7 @@ int icmp_rcv(struct sk_buff *skb)
959 * Parse the ICMP message 959 * Parse the ICMP message
960 */ 960 */
961 961
962 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 962 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
963 /* 963 /*
964 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be 964 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
965 * silently ignored (we let user decide with a sysctl). 965 * silently ignored (we let user decide with a sysctl).
@@ -976,7 +976,7 @@ int icmp_rcv(struct sk_buff *skb)
976 icmph->type != ICMP_ADDRESS && 976 icmph->type != ICMP_ADDRESS &&
977 icmph->type != ICMP_ADDRESSREPLY) { 977 icmph->type != ICMP_ADDRESSREPLY) {
978 goto error; 978 goto error;
979 } 979 }
980 } 980 }
981 981
982 ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry); 982 ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry);
@@ -1085,7 +1085,7 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1085 .input_entry = ICMP_MIB_DUMMY, 1085 .input_entry = ICMP_MIB_DUMMY,
1086 .handler = icmp_discard, 1086 .handler = icmp_discard,
1087 }, 1087 },
1088 [ICMP_INFO_REPLY] = { 1088 [ICMP_INFO_REPLY] = {
1089 .output_entry = ICMP_MIB_DUMMY, 1089 .output_entry = ICMP_MIB_DUMMY,
1090 .input_entry = ICMP_MIB_DUMMY, 1090 .input_entry = ICMP_MIB_DUMMY,
1091 .handler = icmp_discard, 1091 .handler = icmp_discard,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 024ae56cab25..b8e1625d34cf 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -35,7 +35,7 @@
35 * 35 *
36 * Chih-Jen Chang : Tried to revise IGMP to Version 2 36 * Chih-Jen Chang : Tried to revise IGMP to Version 2
37 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu 37 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
38 * The enhancements are mainly based on Steve Deering's 38 * The enhancements are mainly based on Steve Deering's
39 * ipmulti-3.5 source code. 39 * ipmulti-3.5 source code.
40 * Chih-Jen Chang : Added the igmp_get_mrouter_info and 40 * Chih-Jen Chang : Added the igmp_get_mrouter_info and
41 * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of 41 * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
@@ -49,11 +49,11 @@
49 * Alan Cox : Stop IGMP from 0.0.0.0 being accepted. 49 * Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
50 * Alan Cox : Use GFP_ATOMIC in the right places. 50 * Alan Cox : Use GFP_ATOMIC in the right places.
51 * Christian Daudt : igmp timer wasn't set for local group 51 * Christian Daudt : igmp timer wasn't set for local group
52 * memberships but was being deleted, 52 * memberships but was being deleted,
53 * which caused a "del_timer() called 53 * which caused a "del_timer() called
54 * from %p with timer not initialized\n" 54 * from %p with timer not initialized\n"
55 * message (960131). 55 * message (960131).
56 * Christian Daudt : removed del_timer from 56 * Christian Daudt : removed del_timer from
57 * igmp_timer_expire function (960205). 57 * igmp_timer_expire function (960205).
58 * Christian Daudt : igmp_heard_report now only calls 58 * Christian Daudt : igmp_heard_report now only calls
59 * igmp_timer_expire if tm->running is 59 * igmp_timer_expire if tm->running is
@@ -718,7 +718,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
718{ 718{
719 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) 719 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
720 return; 720 return;
721 in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv : 721 in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
722 IGMP_Unsolicited_Report_Count; 722 IGMP_Unsolicited_Report_Count;
723 igmp_ifc_start_timer(in_dev, 1); 723 igmp_ifc_start_timer(in_dev, 1);
724} 724}
@@ -838,7 +838,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
838 if (len == 8) { 838 if (len == 8) {
839 if (ih->code == 0) { 839 if (ih->code == 0) {
840 /* Alas, old v1 router presents here. */ 840 /* Alas, old v1 router presents here. */
841 841
842 max_delay = IGMP_Query_Response_Interval; 842 max_delay = IGMP_Query_Response_Interval;
843 in_dev->mr_v1_seen = jiffies + 843 in_dev->mr_v1_seen = jiffies +
844 IGMP_V1_Router_Present_Timeout; 844 IGMP_V1_Router_Present_Timeout;
@@ -860,10 +860,10 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
860 } else { /* v3 */ 860 } else { /* v3 */
861 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 861 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
862 return; 862 return;
863 863
864 ih3 = (struct igmpv3_query *) skb->h.raw; 864 ih3 = (struct igmpv3_query *) skb->h.raw;
865 if (ih3->nsrcs) { 865 if (ih3->nsrcs) {
866 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) 866 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
867 + ntohs(ih3->nsrcs)*sizeof(__be32))) 867 + ntohs(ih3->nsrcs)*sizeof(__be32)))
868 return; 868 return;
869 ih3 = (struct igmpv3_query *) skb->h.raw; 869 ih3 = (struct igmpv3_query *) skb->h.raw;
@@ -909,7 +909,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
909 else 909 else
910 im->gsquery = mark; 910 im->gsquery = mark;
911 changed = !im->gsquery || 911 changed = !im->gsquery ||
912 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs); 912 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
913 spin_unlock_bh(&im->lock); 913 spin_unlock_bh(&im->lock);
914 if (changed) 914 if (changed)
915 igmp_mod_timer(im, max_delay); 915 igmp_mod_timer(im, max_delay);
@@ -1257,9 +1257,9 @@ out:
1257void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) 1257void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1258{ 1258{
1259 struct ip_mc_list *i, **ip; 1259 struct ip_mc_list *i, **ip;
1260 1260
1261 ASSERT_RTNL(); 1261 ASSERT_RTNL();
1262 1262
1263 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { 1263 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
1264 if (i->multiaddr==addr) { 1264 if (i->multiaddr==addr) {
1265 if (--i->users == 0) { 1265 if (--i->users == 0) {
@@ -1436,7 +1436,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
1436#ifdef CONFIG_IP_MULTICAST 1436#ifdef CONFIG_IP_MULTICAST
1437 if (psf->sf_oldin && 1437 if (psf->sf_oldin &&
1438 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { 1438 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
1439 psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv : 1439 psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1440 IGMP_Unsolicited_Report_Count; 1440 IGMP_Unsolicited_Report_Count;
1441 psf->sf_next = pmc->tomb; 1441 psf->sf_next = pmc->tomb;
1442 pmc->tomb = psf; 1442 pmc->tomb = psf;
@@ -1500,7 +1500,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1500 /* filter mode change */ 1500 /* filter mode change */
1501 pmc->sfmode = MCAST_INCLUDE; 1501 pmc->sfmode = MCAST_INCLUDE;
1502#ifdef CONFIG_IP_MULTICAST 1502#ifdef CONFIG_IP_MULTICAST
1503 pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : 1503 pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1504 IGMP_Unsolicited_Report_Count; 1504 IGMP_Unsolicited_Report_Count;
1505 in_dev->mr_ifc_count = pmc->crcount; 1505 in_dev->mr_ifc_count = pmc->crcount;
1506 for (psf=pmc->sources; psf; psf = psf->sf_next) 1506 for (psf=pmc->sources; psf; psf = psf->sf_next)
@@ -1679,7 +1679,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1679#ifdef CONFIG_IP_MULTICAST 1679#ifdef CONFIG_IP_MULTICAST
1680 /* else no filters; keep old mode for reports */ 1680 /* else no filters; keep old mode for reports */
1681 1681
1682 pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : 1682 pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1683 IGMP_Unsolicited_Report_Count; 1683 IGMP_Unsolicited_Report_Count;
1684 in_dev->mr_ifc_count = pmc->crcount; 1684 in_dev->mr_ifc_count = pmc->crcount;
1685 for (psf=pmc->sources; psf; psf = psf->sf_next) 1685 for (psf=pmc->sources; psf; psf = psf->sf_next)
@@ -1873,7 +1873,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1873 } else if (pmc->sfmode != omode) { 1873 } else if (pmc->sfmode != omode) {
1874 /* allow mode switches for empty-set filters */ 1874 /* allow mode switches for empty-set filters */
1875 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); 1875 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
1876 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0, 1876 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
1877 NULL, 0); 1877 NULL, 0);
1878 pmc->sfmode = omode; 1878 pmc->sfmode = omode;
1879 } 1879 }
@@ -1899,7 +1899,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1899 } 1899 }
1900 1900
1901 /* update the interface filter */ 1901 /* update the interface filter */
1902 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 1902 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
1903 &mreqs->imr_sourceaddr, 1); 1903 &mreqs->imr_sourceaddr, 1);
1904 1904
1905 for (j=i+1; j<psl->sl_count; j++) 1905 for (j=i+1; j<psl->sl_count; j++)
@@ -1949,7 +1949,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1949 psl->sl_count++; 1949 psl->sl_count++;
1950 err = 0; 1950 err = 0;
1951 /* update the interface list */ 1951 /* update the interface list */
1952 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 1952 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
1953 &mreqs->imr_sourceaddr, 1); 1953 &mreqs->imr_sourceaddr, 1);
1954done: 1954done:
1955 rtnl_unlock(); 1955 rtnl_unlock();
@@ -2264,7 +2264,7 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2264 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2264 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2265 2265
2266 for (state->dev = dev_base, state->in_dev = NULL; 2266 for (state->dev = dev_base, state->in_dev = NULL;
2267 state->dev; 2267 state->dev;
2268 state->dev = state->dev->next) { 2268 state->dev = state->dev->next) {
2269 struct in_device *in_dev; 2269 struct in_device *in_dev;
2270 in_dev = in_dev_get(state->dev); 2270 in_dev = in_dev_get(state->dev);
@@ -2346,7 +2346,7 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2346static int igmp_mc_seq_show(struct seq_file *seq, void *v) 2346static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2347{ 2347{
2348 if (v == SEQ_START_TOKEN) 2348 if (v == SEQ_START_TOKEN)
2349 seq_puts(seq, 2349 seq_puts(seq,
2350 "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n"); 2350 "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
2351 else { 2351 else {
2352 struct ip_mc_list *im = (struct ip_mc_list *)v; 2352 struct ip_mc_list *im = (struct ip_mc_list *)v;
@@ -2426,7 +2426,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2426 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2426 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2427 2427
2428 for (state->dev = dev_base, state->idev = NULL, state->im = NULL; 2428 for (state->dev = dev_base, state->idev = NULL, state->im = NULL;
2429 state->dev; 2429 state->dev;
2430 state->dev = state->dev->next) { 2430 state->dev = state->dev->next) {
2431 struct in_device *idev; 2431 struct in_device *idev;
2432 idev = in_dev_get(state->dev); 2432 idev = in_dev_get(state->dev);
@@ -2531,7 +2531,7 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
2531 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2531 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2532 2532
2533 if (v == SEQ_START_TOKEN) { 2533 if (v == SEQ_START_TOKEN) {
2534 seq_printf(seq, 2534 seq_printf(seq,
2535 "%3s %6s " 2535 "%3s %6s "
2536 "%10s %10s %6s %6s\n", "Idx", 2536 "%10s %10s %6s %6s\n", "Idx",
2537 "Device", "MCA", 2537 "Device", "MCA",
@@ -2539,8 +2539,8 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
2539 } else { 2539 } else {
2540 seq_printf(seq, 2540 seq_printf(seq,
2541 "%3d %6.6s 0x%08x " 2541 "%3d %6.6s 0x%08x "
2542 "0x%08x %6lu %6lu\n", 2542 "0x%08x %6lu %6lu\n",
2543 state->dev->ifindex, state->dev->name, 2543 state->dev->ifindex, state->dev->name,
2544 ntohl(state->im->multiaddr), 2544 ntohl(state->im->multiaddr),
2545 ntohl(psf->sf_inaddr), 2545 ntohl(psf->sf_inaddr),
2546 psf->sf_count[MCAST_INCLUDE], 2546 psf->sf_count[MCAST_INCLUDE],
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 9d68837888d3..43fb1600f1f0 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -149,7 +149,7 @@ success:
149 if (!inet_csk(sk)->icsk_bind_hash) 149 if (!inet_csk(sk)->icsk_bind_hash)
150 inet_bind_hash(sk, tb, snum); 150 inet_bind_hash(sk, tb, snum);
151 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); 151 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
152 ret = 0; 152 ret = 0;
153 153
154fail_unlock: 154fail_unlock:
155 spin_unlock(&head->lock); 155 spin_unlock(&head->lock);
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(inet_csk_accept);
255 255
256/* 256/*
257 * Using different timers for retransmit, delayed acks and probes 257 * Using different timers for retransmit, delayed acks and probes
258 * We may wish use just one timer maintaining a list of expire jiffies 258 * We may wish use just one timer maintaining a list of expire jiffies
259 * to optimize. 259 * to optimize.
260 */ 260 */
261void inet_csk_init_xmit_timers(struct sock *sk, 261void inet_csk_init_xmit_timers(struct sock *sk,
@@ -273,7 +273,7 @@ void inet_csk_init_xmit_timers(struct sock *sk,
273 icsk->icsk_delack_timer.function = delack_handler; 273 icsk->icsk_delack_timer.function = delack_handler;
274 sk->sk_timer.function = keepalive_handler; 274 sk->sk_timer.function = keepalive_handler;
275 275
276 icsk->icsk_retransmit_timer.data = 276 icsk->icsk_retransmit_timer.data =
277 icsk->icsk_delack_timer.data = 277 icsk->icsk_delack_timer.data =
278 sk->sk_timer.data = (unsigned long)sk; 278 sk->sk_timer.data = (unsigned long)sk;
279 279
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8aa7d51e6881..5df71cd08da8 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -381,7 +381,7 @@ static int inet_diag_bc_run(const void *bc, int len,
381 if (addr[0] == 0 && addr[1] == 0 && 381 if (addr[0] == 0 && addr[1] == 0 &&
382 addr[2] == htonl(0xffff) && 382 addr[2] == htonl(0xffff) &&
383 bitstring_match(addr + 3, cond->addr, 383 bitstring_match(addr + 3, cond->addr,
384 cond->prefix_len)) 384 cond->prefix_len))
385 break; 385 break;
386 } 386 }
387 yes = 0; 387 yes = 0;
@@ -518,7 +518,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
518 } 518 }
519 entry.sport = tw->tw_num; 519 entry.sport = tw->tw_num;
520 entry.dport = ntohs(tw->tw_dport); 520 entry.dport = ntohs(tw->tw_dport);
521 entry.userlocks = 0; 521 entry.userlocks = 0;
522 522
523 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 523 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
524 return 0; 524 return 0;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 150ace18dc75..fb662621c54e 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -262,7 +262,7 @@ not_unique:
262static inline u32 inet_sk_port_offset(const struct sock *sk) 262static inline u32 inet_sk_port_offset(const struct sock *sk)
263{ 263{
264 const struct inet_sock *inet = inet_sk(sk); 264 const struct inet_sock *inet = inet_sk(sk);
265 return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr, 265 return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr,
266 inet->dport); 266 inet->dport);
267} 267}
268 268
@@ -274,81 +274,81 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
274{ 274{
275 struct inet_hashinfo *hinfo = death_row->hashinfo; 275 struct inet_hashinfo *hinfo = death_row->hashinfo;
276 const unsigned short snum = inet_sk(sk)->num; 276 const unsigned short snum = inet_sk(sk)->num;
277 struct inet_bind_hashbucket *head; 277 struct inet_bind_hashbucket *head;
278 struct inet_bind_bucket *tb; 278 struct inet_bind_bucket *tb;
279 int ret; 279 int ret;
280 280
281 if (!snum) { 281 if (!snum) {
282 int low = sysctl_local_port_range[0]; 282 int low = sysctl_local_port_range[0];
283 int high = sysctl_local_port_range[1]; 283 int high = sysctl_local_port_range[1];
284 int range = high - low; 284 int range = high - low;
285 int i; 285 int i;
286 int port; 286 int port;
287 static u32 hint; 287 static u32 hint;
288 u32 offset = hint + inet_sk_port_offset(sk); 288 u32 offset = hint + inet_sk_port_offset(sk);
289 struct hlist_node *node; 289 struct hlist_node *node;
290 struct inet_timewait_sock *tw = NULL; 290 struct inet_timewait_sock *tw = NULL;
291 291
292 local_bh_disable(); 292 local_bh_disable();
293 for (i = 1; i <= range; i++) { 293 for (i = 1; i <= range; i++) {
294 port = low + (i + offset) % range; 294 port = low + (i + offset) % range;
295 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; 295 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
296 spin_lock(&head->lock); 296 spin_lock(&head->lock);
297 297
298 /* Does not bother with rcv_saddr checks, 298 /* Does not bother with rcv_saddr checks,
299 * because the established check is already 299 * because the established check is already
300 * unique enough. 300 * unique enough.
301 */ 301 */
302 inet_bind_bucket_for_each(tb, node, &head->chain) { 302 inet_bind_bucket_for_each(tb, node, &head->chain) {
303 if (tb->port == port) { 303 if (tb->port == port) {
304 BUG_TRAP(!hlist_empty(&tb->owners)); 304 BUG_TRAP(!hlist_empty(&tb->owners));
305 if (tb->fastreuse >= 0) 305 if (tb->fastreuse >= 0)
306 goto next_port; 306 goto next_port;
307 if (!__inet_check_established(death_row, 307 if (!__inet_check_established(death_row,
308 sk, port, 308 sk, port,
309 &tw)) 309 &tw))
310 goto ok; 310 goto ok;
311 goto next_port; 311 goto next_port;
312 } 312 }
313 } 313 }
314 314
315 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port); 315 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port);
316 if (!tb) { 316 if (!tb) {
317 spin_unlock(&head->lock); 317 spin_unlock(&head->lock);
318 break; 318 break;
319 } 319 }
320 tb->fastreuse = -1; 320 tb->fastreuse = -1;
321 goto ok; 321 goto ok;
322 322
323 next_port: 323 next_port:
324 spin_unlock(&head->lock); 324 spin_unlock(&head->lock);
325 } 325 }
326 local_bh_enable(); 326 local_bh_enable();
327 327
328 return -EADDRNOTAVAIL; 328 return -EADDRNOTAVAIL;
329 329
330ok: 330ok:
331 hint += i; 331 hint += i;
332 332
333 /* Head lock still held and bh's disabled */ 333 /* Head lock still held and bh's disabled */
334 inet_bind_hash(sk, tb, port); 334 inet_bind_hash(sk, tb, port);
335 if (sk_unhashed(sk)) { 335 if (sk_unhashed(sk)) {
336 inet_sk(sk)->sport = htons(port); 336 inet_sk(sk)->sport = htons(port);
337 __inet_hash(hinfo, sk, 0); 337 __inet_hash(hinfo, sk, 0);
338 } 338 }
339 spin_unlock(&head->lock); 339 spin_unlock(&head->lock);
340 340
341 if (tw) { 341 if (tw) {
342 inet_twsk_deschedule(tw, death_row); 342 inet_twsk_deschedule(tw, death_row);
343 inet_twsk_put(tw); 343 inet_twsk_put(tw);
344 } 344 }
345 345
346 ret = 0; 346 ret = 0;
347 goto out; 347 goto out;
348 } 348 }
349 349
350 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; 350 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
351 tb = inet_csk(sk)->icsk_bind_hash; 351 tb = inet_csk(sk)->icsk_bind_hash;
352 spin_lock_bh(&head->lock); 352 spin_lock_bh(&head->lock);
353 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 353 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
354 __inet_hash(hinfo, sk, 0); 354 __inet_hash(hinfo, sk, 0);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index a22d11d2911c..c3ea0cd2e584 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -4,15 +4,15 @@
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * The IP forwarding functionality. 6 * The IP forwarding functionality.
7 * 7 *
8 * Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $ 8 * Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $
9 * 9 *
10 * Authors: see ip.c 10 * Authors: see ip.c
11 * 11 *
12 * Fixes: 12 * Fixes:
13 * Many : Split from ip.c , see ip_input.c for 13 * Many : Split from ip.c , see ip_input.c for
14 * history. 14 * history.
15 * Dave Gregorich : NULL ip_rt_put fix for multicast 15 * Dave Gregorich : NULL ip_rt_put fix for multicast
16 * routing. 16 * routing.
17 * Jos Vos : Add call_out_firewall before sending, 17 * Jos Vos : Add call_out_firewall before sending,
18 * use output device for accounting. 18 * use output device for accounting.
@@ -69,14 +69,14 @@ int ip_forward(struct sk_buff *skb)
69 goto drop; 69 goto drop;
70 70
71 skb->ip_summed = CHECKSUM_NONE; 71 skb->ip_summed = CHECKSUM_NONE;
72 72
73 /* 73 /*
74 * According to the RFC, we must first decrease the TTL field. If 74 * According to the RFC, we must first decrease the TTL field. If
75 * that reaches zero, we must reply an ICMP control message telling 75 * that reaches zero, we must reply an ICMP control message telling
76 * that the packet's lifetime expired. 76 * that the packet's lifetime expired.
77 */ 77 */
78 if (skb->nh.iph->ttl <= 1) 78 if (skb->nh.iph->ttl <= 1)
79 goto too_many_hops; 79 goto too_many_hops;
80 80
81 if (!xfrm4_route_forward(skb)) 81 if (!xfrm4_route_forward(skb))
82 goto drop; 82 goto drop;
@@ -107,16 +107,16 @@ int ip_forward(struct sk_buff *skb)
107 ip_forward_finish); 107 ip_forward_finish);
108 108
109sr_failed: 109sr_failed:
110 /* 110 /*
111 * Strict routing permits no gatewaying 111 * Strict routing permits no gatewaying
112 */ 112 */
113 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0); 113 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0);
114 goto drop; 114 goto drop;
115 115
116too_many_hops: 116too_many_hops:
117 /* Tell the sender its packet died... */ 117 /* Tell the sender its packet died... */
118 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 118 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
119 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); 119 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
120drop: 120drop:
121 kfree_skb(skb); 121 kfree_skb(skb);
122 return NET_RX_DROP; 122 return NET_RX_DROP;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 8ce00d3703da..b6f055380373 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -4,7 +4,7 @@
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * The IP fragmentation functionality. 6 * The IP fragmentation functionality.
7 * 7 *
8 * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $ 8 * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
9 * 9 *
10 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 10 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
@@ -238,7 +238,7 @@ static void ipq_kill(struct ipq *ipq)
238 } 238 }
239} 239}
240 240
241/* Memory limiting on fragments. Evictor trashes the oldest 241/* Memory limiting on fragments. Evictor trashes the oldest
242 * fragment queue until we are back under the threshold. 242 * fragment queue until we are back under the threshold.
243 */ 243 */
244static void ip_evictor(void) 244static void ip_evictor(void)
@@ -479,14 +479,14 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
479 goto err; 479 goto err;
480 } 480 }
481 481
482 offset = ntohs(skb->nh.iph->frag_off); 482 offset = ntohs(skb->nh.iph->frag_off);
483 flags = offset & ~IP_OFFSET; 483 flags = offset & ~IP_OFFSET;
484 offset &= IP_OFFSET; 484 offset &= IP_OFFSET;
485 offset <<= 3; /* offset is in 8-byte chunks */ 485 offset <<= 3; /* offset is in 8-byte chunks */
486 ihl = skb->nh.iph->ihl * 4; 486 ihl = skb->nh.iph->ihl * 4;
487 487
488 /* Determine the position of this fragment. */ 488 /* Determine the position of this fragment. */
489 end = offset + skb->len - ihl; 489 end = offset + skb->len - ihl;
490 490
491 /* Is this the final fragment? */ 491 /* Is this the final fragment? */
492 if ((flags & IP_MF) == 0) { 492 if ((flags & IP_MF) == 0) {
@@ -589,8 +589,8 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
589 else 589 else
590 qp->fragments = skb; 590 qp->fragments = skb;
591 591
592 if (skb->dev) 592 if (skb->dev)
593 qp->iif = skb->dev->ifindex; 593 qp->iif = skb->dev->ifindex;
594 skb->dev = NULL; 594 skb->dev = NULL;
595 skb_get_timestamp(skb, &qp->stamp); 595 skb_get_timestamp(skb, &qp->stamp);
596 qp->meat += skb->len; 596 qp->meat += skb->len;
@@ -684,7 +684,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
684 return head; 684 return head;
685 685
686out_nomem: 686out_nomem:
687 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " 687 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
688 "queue %p\n", qp); 688 "queue %p\n", qp);
689 goto out_fail; 689 goto out_fail;
690out_oversize: 690out_oversize:
@@ -703,7 +703,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
703 struct iphdr *iph = skb->nh.iph; 703 struct iphdr *iph = skb->nh.iph;
704 struct ipq *qp; 704 struct ipq *qp;
705 struct net_device *dev; 705 struct net_device *dev;
706 706
707 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 707 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
708 708
709 /* Start by cleaning up the memory. */ 709 /* Start by cleaning up the memory. */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 51c83500790f..f12c0d6623a0 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Linux NET3: GRE over IP protocol decoder. 2 * Linux NET3: GRE over IP protocol decoder.
3 * 3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) 4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 * 5 *
@@ -63,7 +63,7 @@
63 solution, but it supposes maintaing new variable in ALL 63 solution, but it supposes maintaing new variable in ALL
64 skb, even if no tunneling is used. 64 skb, even if no tunneling is used.
65 65
66 Current solution: t->recursion lock breaks dead loops. It looks 66 Current solution: t->recursion lock breaks dead loops. It looks
67 like dev->tbusy flag, but I preferred new variable, because 67 like dev->tbusy flag, but I preferred new variable, because
68 the semantics is different. One day, when hard_start_xmit 68 the semantics is different. One day, when hard_start_xmit
69 will be multithreaded we will have to use skb->encapsulation. 69 will be multithreaded we will have to use skb->encapsulation.
@@ -613,7 +613,7 @@ static int ipgre_rcv(struct sk_buff *skb)
613 if (flags == 0 && 613 if (flags == 0 &&
614 skb->protocol == htons(ETH_P_WCCP)) { 614 skb->protocol == htons(ETH_P_WCCP)) {
615 skb->protocol = htons(ETH_P_IP); 615 skb->protocol = htons(ETH_P_IP);
616 if ((*(h + offset) & 0xF0) != 0x40) 616 if ((*(h + offset) & 0xF0) != 0x40)
617 offset += 4; 617 offset += 4;
618 } 618 }
619 619
@@ -816,7 +816,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
816 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 816 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
817 if (!new_skb) { 817 if (!new_skb) {
818 ip_rt_put(rt); 818 ip_rt_put(rt);
819 stats->tx_dropped++; 819 stats->tx_dropped++;
820 dev_kfree_skb(skb); 820 dev_kfree_skb(skb);
821 tunnel->recursion--; 821 tunnel->recursion--;
822 return 0; 822 return 0;
@@ -1044,7 +1044,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1044 so that I had to set ARPHRD_IPGRE to a random value. 1044 so that I had to set ARPHRD_IPGRE to a random value.
1045 I have an impression, that Cisco could make something similar, 1045 I have an impression, that Cisco could make something similar,
1046 but this feature is apparently missing in IOS<=11.2(8). 1046 but this feature is apparently missing in IOS<=11.2(8).
1047 1047
1048 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks 1048 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1049 with broadcast 224.66.66.66. If you have access to mbone, play with me :-) 1049 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1050 1050
@@ -1076,9 +1076,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh
1076 p[1] = htons(type); 1076 p[1] = htons(type);
1077 1077
1078 /* 1078 /*
1079 * Set the source hardware address. 1079 * Set the source hardware address.
1080 */ 1080 */
1081 1081
1082 if (saddr) 1082 if (saddr)
1083 memcpy(&iph->saddr, saddr, 4); 1083 memcpy(&iph->saddr, saddr, 4);
1084 1084
@@ -1088,7 +1088,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh
1088 } 1088 }
1089 if (iph->daddr && !MULTICAST(iph->daddr)) 1089 if (iph->daddr && !MULTICAST(iph->daddr))
1090 return t->hlen; 1090 return t->hlen;
1091 1091
1092 return -t->hlen; 1092 return -t->hlen;
1093} 1093}
1094 1094
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 212734ca238f..f38e97647ac0 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -15,7 +15,7 @@
15 * Stefan Becker, <stefanb@yello.ping.de> 15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net> 16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * 18 *
19 * 19 *
20 * Fixes: 20 * Fixes:
21 * Alan Cox : Commented a couple of minor bits of surplus code 21 * Alan Cox : Commented a couple of minor bits of surplus code
@@ -98,13 +98,13 @@
98 * Jos Vos : Do accounting *before* call_in_firewall 98 * Jos Vos : Do accounting *before* call_in_firewall
99 * Willy Konynenberg : Transparent proxying support 99 * Willy Konynenberg : Transparent proxying support
100 * 100 *
101 * 101 *
102 * 102 *
103 * To Fix: 103 * To Fix:
104 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient 104 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
105 * and could be made very efficient with the addition of some virtual memory hacks to permit 105 * and could be made very efficient with the addition of some virtual memory hacks to permit
106 * the allocation of a buffer that can then be 'grown' by twiddling page tables. 106 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
107 * Output fragmentation wants updating along with the buffer management to use a single 107 * Output fragmentation wants updating along with the buffer management to use a single
108 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet 108 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
109 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause 109 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
110 * fragmentation anyway. 110 * fragmentation anyway.
@@ -154,7 +154,7 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
154 154
155/* 155/*
156 * Process Router Attention IP option 156 * Process Router Attention IP option
157 */ 157 */
158int ip_call_ra_chain(struct sk_buff *skb) 158int ip_call_ra_chain(struct sk_buff *skb)
159{ 159{
160 struct ip_ra_chain *ra; 160 struct ip_ra_chain *ra;
@@ -202,8 +202,8 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
202 202
203 __skb_pull(skb, ihl); 203 __skb_pull(skb, ihl);
204 204
205 /* Point into the IP datagram, just past the header. */ 205 /* Point into the IP datagram, just past the header. */
206 skb->h.raw = skb->data; 206 skb->h.raw = skb->data;
207 207
208 rcu_read_lock(); 208 rcu_read_lock();
209 { 209 {
@@ -259,7 +259,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
259 259
260/* 260/*
261 * Deliver IP Packets to the higher protocol layers. 261 * Deliver IP Packets to the higher protocol layers.
262 */ 262 */
263int ip_local_deliver(struct sk_buff *skb) 263int ip_local_deliver(struct sk_buff *skb)
264{ 264{
265 /* 265 /*
@@ -335,14 +335,14 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
335 /* 335 /*
336 * Initialise the virtual path cache for the packet. It describes 336 * Initialise the virtual path cache for the packet. It describes
337 * how the packet travels inside Linux networking. 337 * how the packet travels inside Linux networking.
338 */ 338 */
339 if (skb->dst == NULL) { 339 if (skb->dst == NULL) {
340 int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, 340 int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
341 skb->dev); 341 skb->dev);
342 if (unlikely(err)) { 342 if (unlikely(err)) {
343 if (err == -EHOSTUNREACH) 343 if (err == -EHOSTUNREACH)
344 IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); 344 IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
345 goto drop; 345 goto drop;
346 } 346 }
347 } 347 }
348 348
@@ -363,13 +363,13 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
363 return dst_input(skb); 363 return dst_input(skb);
364 364
365drop: 365drop:
366 kfree_skb(skb); 366 kfree_skb(skb);
367 return NET_RX_DROP; 367 return NET_RX_DROP;
368} 368}
369 369
370/* 370/*
371 * Main IP Receive routine. 371 * Main IP Receive routine.
372 */ 372 */
373int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 373int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
374{ 374{
375 struct iphdr *iph; 375 struct iphdr *iph;
@@ -437,9 +437,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
437inhdr_error: 437inhdr_error:
438 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 438 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
439drop: 439drop:
440 kfree_skb(skb); 440 kfree_skb(skb);
441out: 441out:
442 return NET_RX_DROP; 442 return NET_RX_DROP;
443} 443}
444 444
445EXPORT_SYMBOL(ip_statistics); 445EXPORT_SYMBOL(ip_statistics);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 9f02917d6f45..f906a80d5a87 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -8,7 +8,7 @@
8 * Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $ 8 * Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $
9 * 9 *
10 * Authors: A.N.Kuznetsov 10 * Authors: A.N.Kuznetsov
11 * 11 *
12 */ 12 */
13 13
14#include <linux/capability.h> 14#include <linux/capability.h>
@@ -26,7 +26,7 @@
26#include <net/route.h> 26#include <net/route.h>
27#include <net/cipso_ipv4.h> 27#include <net/cipso_ipv4.h>
28 28
29/* 29/*
30 * Write options to IP header, record destination address to 30 * Write options to IP header, record destination address to
31 * source route option, address of outgoing interface 31 * source route option, address of outgoing interface
32 * (we should already know it, so that this function is allowed be 32 * (we should already know it, so that this function is allowed be
@@ -76,7 +76,7 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
76 } 76 }
77} 77}
78 78
79/* 79/*
80 * Provided (sopt, skb) points to received options, 80 * Provided (sopt, skb) points to received options,
81 * build in dopt compiled option set appropriate for answering. 81 * build in dopt compiled option set appropriate for answering.
82 * i.e. invert SRR option, copy anothers, 82 * i.e. invert SRR option, copy anothers,
@@ -85,7 +85,7 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
85 * NOTE: dopt cannot point to skb. 85 * NOTE: dopt cannot point to skb.
86 */ 86 */
87 87
88int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) 88int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
89{ 89{
90 struct ip_options *sopt; 90 struct ip_options *sopt;
91 unsigned char *sptr, *dptr; 91 unsigned char *sptr, *dptr;
@@ -215,7 +215,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
215 * Simple and stupid 8), but the most efficient way. 215 * Simple and stupid 8), but the most efficient way.
216 */ 216 */
217 217
218void ip_options_fragment(struct sk_buff * skb) 218void ip_options_fragment(struct sk_buff * skb)
219{ 219{
220 unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr); 220 unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr);
221 struct ip_options * opt = &(IPCB(skb)->opt); 221 struct ip_options * opt = &(IPCB(skb)->opt);
@@ -370,7 +370,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
370 switch (optptr[3]&0xF) { 370 switch (optptr[3]&0xF) {
371 case IPOPT_TS_TSONLY: 371 case IPOPT_TS_TSONLY:
372 opt->ts = optptr - iph; 372 opt->ts = optptr - iph;
373 if (skb) 373 if (skb)
374 timeptr = (__be32*)&optptr[optptr[2]-1]; 374 timeptr = (__be32*)&optptr[optptr[2]-1];
375 opt->ts_needtime = 1; 375 opt->ts_needtime = 1;
376 optptr[2] += 4; 376 optptr[2] += 4;
@@ -448,7 +448,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb)
448 goto error; 448 goto error;
449 } 449 }
450 opt->cipso = optptr - iph; 450 opt->cipso = optptr - iph;
451 if (cipso_v4_validate(&optptr)) { 451 if (cipso_v4_validate(&optptr)) {
452 pp_ptr = optptr; 452 pp_ptr = optptr;
453 goto error; 453 goto error;
454 } 454 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a0f2008584bc..bb0bb8f07c54 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -22,7 +22,7 @@
22 * Fixes: 22 * Fixes:
23 * Alan Cox : Missing nonblock feature in ip_build_xmit. 23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit. 24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when 25 * Bradford Johnson: Fix faulty handling of some frames when
26 * no route is found. 26 * no route is found.
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit 27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by 28 * (in case if packet not accepted by
@@ -33,9 +33,9 @@
33 * some redundant tests. 33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply. 35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path 36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86 37 * for decreased register pressure on x86
38 * and more readibility. 38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE, 39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM. 40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments. 41 * Detlev Wengorz : Copy protocol for fragments.
@@ -114,7 +114,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
114 return ttl; 114 return ttl;
115} 115}
116 116
117/* 117/*
118 * Add an ip header to a skbuff and send it out. 118 * Add an ip header to a skbuff and send it out.
119 * 119 *
120 */ 120 */
@@ -243,7 +243,7 @@ int ip_mc_output(struct sk_buff *skb)
243 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 243 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
244 if (newskb) 244 if (newskb)
245 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL, 245 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
246 newskb->dev, 246 newskb->dev,
247 ip_dev_loopback_xmit); 247 ip_dev_loopback_xmit);
248 } 248 }
249 249
@@ -277,7 +277,7 @@ int ip_output(struct sk_buff *skb)
277 skb->protocol = htons(ETH_P_IP); 277 skb->protocol = htons(ETH_P_IP);
278 278
279 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, 279 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
280 ip_finish_output, 280 ip_finish_output,
281 !(IPCB(skb)->flags & IPSKB_REROUTED)); 281 !(IPCB(skb)->flags & IPSKB_REROUTED));
282} 282}
283 283
@@ -660,7 +660,7 @@ slow_path:
660 return err; 660 return err;
661 661
662fail: 662fail:
663 kfree_skb(skb); 663 kfree_skb(skb);
664 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); 664 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
665 return err; 665 return err;
666} 666}
@@ -755,7 +755,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
755 * from many pieces of data. Each pieces will be holded on the socket 755 * from many pieces of data. Each pieces will be holded on the socket
756 * until ip_push_pending_frames() is called. Each piece can be a page 756 * until ip_push_pending_frames() is called. Each piece can be a page
757 * or non-page data. 757 * or non-page data.
758 * 758 *
759 * Not only UDP, other transport protocols - e.g. raw sockets - can use 759 * Not only UDP, other transport protocols - e.g. raw sockets - can use
760 * this interface potentially. 760 * this interface potentially.
761 * 761 *
@@ -888,7 +888,7 @@ alloc_new_skb:
888 datalen = maxfraglen - fragheaderlen; 888 datalen = maxfraglen - fragheaderlen;
889 fraglen = datalen + fragheaderlen; 889 fraglen = datalen + fragheaderlen;
890 890
891 if ((flags & MSG_MORE) && 891 if ((flags & MSG_MORE) &&
892 !(rt->u.dst.dev->features&NETIF_F_SG)) 892 !(rt->u.dst.dev->features&NETIF_F_SG))
893 alloclen = mtu; 893 alloclen = mtu;
894 else 894 else
@@ -903,14 +903,14 @@ alloc_new_skb:
903 alloclen += rt->u.dst.trailer_len; 903 alloclen += rt->u.dst.trailer_len;
904 904
905 if (transhdrlen) { 905 if (transhdrlen) {
906 skb = sock_alloc_send_skb(sk, 906 skb = sock_alloc_send_skb(sk,
907 alloclen + hh_len + 15, 907 alloclen + hh_len + 15,
908 (flags & MSG_DONTWAIT), &err); 908 (flags & MSG_DONTWAIT), &err);
909 } else { 909 } else {
910 skb = NULL; 910 skb = NULL;
911 if (atomic_read(&sk->sk_wmem_alloc) <= 911 if (atomic_read(&sk->sk_wmem_alloc) <=
912 2 * sk->sk_sndbuf) 912 2 * sk->sk_sndbuf)
913 skb = sock_wmalloc(sk, 913 skb = sock_wmalloc(sk,
914 alloclen + hh_len + 15, 1, 914 alloclen + hh_len + 15, 1,
915 sk->sk_allocation); 915 sk->sk_allocation);
916 if (unlikely(skb == NULL)) 916 if (unlikely(skb == NULL))
@@ -971,7 +971,7 @@ alloc_new_skb:
971 unsigned int off; 971 unsigned int off;
972 972
973 off = skb->len; 973 off = skb->len;
974 if (getfrag(from, skb_put(skb, copy), 974 if (getfrag(from, skb_put(skb, copy),
975 offset, copy, off, skb) < 0) { 975 offset, copy, off, skb) < 0) {
976 __skb_trim(skb, off); 976 __skb_trim(skb, off);
977 err = -EFAULT; 977 err = -EFAULT;
@@ -993,7 +993,7 @@ alloc_new_skb:
993 goto error; 993 goto error;
994 } 994 }
995 get_page(page); 995 get_page(page);
996 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); 996 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
997 frag = &skb_shinfo(skb)->frags[i]; 997 frag = &skb_shinfo(skb)->frags[i];
998 } 998 }
999 } else if (i < MAX_SKB_FRAGS) { 999 } else if (i < MAX_SKB_FRAGS) {
@@ -1033,7 +1033,7 @@ alloc_new_skb:
1033error: 1033error:
1034 inet->cork.length -= length; 1034 inet->cork.length -= length;
1035 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); 1035 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1036 return err; 1036 return err;
1037} 1037}
1038 1038
1039ssize_t ip_append_page(struct sock *sk, struct page *page, 1039ssize_t ip_append_page(struct sock *sk, struct page *page,
@@ -1257,7 +1257,7 @@ int ip_push_pending_frames(struct sock *sk)
1257 skb->dst = dst_clone(&rt->u.dst); 1257 skb->dst = dst_clone(&rt->u.dst);
1258 1258
1259 /* Netfilter gets whole the not fragmented skb. */ 1259 /* Netfilter gets whole the not fragmented skb. */
1260 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, 1260 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1261 skb->dst->dev, dst_output); 1261 skb->dst->dev, dst_output);
1262 if (err) { 1262 if (err) {
1263 if (err > 0) 1263 if (err > 0)
@@ -1305,21 +1305,21 @@ void ip_flush_pending_frames(struct sock *sk)
1305/* 1305/*
1306 * Fetch data from kernel space and fill in checksum if needed. 1306 * Fetch data from kernel space and fill in checksum if needed.
1307 */ 1307 */
1308static int ip_reply_glue_bits(void *dptr, char *to, int offset, 1308static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1309 int len, int odd, struct sk_buff *skb) 1309 int len, int odd, struct sk_buff *skb)
1310{ 1310{
1311 __wsum csum; 1311 __wsum csum;
1312 1312
1313 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); 1313 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1314 skb->csum = csum_block_add(skb->csum, csum, odd); 1314 skb->csum = csum_block_add(skb->csum, csum, odd);
1315 return 0; 1315 return 0;
1316} 1316}
1317 1317
1318/* 1318/*
1319 * Generic function to send a packet as reply to another packet. 1319 * Generic function to send a packet as reply to another packet.
1320 * Used to send TCP resets so far. ICMP should use this function too. 1320 * Used to send TCP resets so far. ICMP should use this function too.
1321 * 1321 *
1322 * Should run single threaded per socket because it uses the sock 1322 * Should run single threaded per socket because it uses the sock
1323 * structure to pass arguments. 1323 * structure to pass arguments.
1324 * 1324 *
1325 * LATER: switch from ip_build_xmit to ip_append_* 1325 * LATER: switch from ip_build_xmit to ip_append_*
@@ -1357,7 +1357,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1357 /* Not quite clean, but right. */ 1357 /* Not quite clean, but right. */
1358 .uli_u = { .ports = 1358 .uli_u = { .ports =
1359 { .sport = skb->h.th->dest, 1359 { .sport = skb->h.th->dest,
1360 .dport = skb->h.th->source } }, 1360 .dport = skb->h.th->source } },
1361 .proto = sk->sk_protocol }; 1361 .proto = sk->sk_protocol };
1362 security_skb_classify_flow(skb, &fl); 1362 security_skb_classify_flow(skb, &fl);
1363 if (ip_route_output_key(&rt, &fl)) 1363 if (ip_route_output_key(&rt, &fl))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 57d4bae6f080..e120686c3cb8 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -4,7 +4,7 @@
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * The IP to API glue. 6 * The IP to API glue.
7 * 7 *
8 * Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $ 8 * Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Authors: see ip.c 10 * Authors: see ip.c
@@ -12,7 +12,7 @@
12 * Fixes: 12 * Fixes:
13 * Many : Split from ip.c , see ip.c for history. 13 * Many : Split from ip.c , see ip.c for history.
14 * Martin Mares : TOS setting fixed. 14 * Martin Mares : TOS setting fixed.
15 * Alan Cox : Fixed a couple of oopses in Martin's 15 * Alan Cox : Fixed a couple of oopses in Martin's
16 * TOS tweaks. 16 * TOS tweaks.
17 * Mike McLagan : Routing by source 17 * Mike McLagan : Routing by source
18 */ 18 */
@@ -253,7 +253,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
253 return 0; 253 return 0;
254} 254}
255 255
256void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 256void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
257 __be16 port, u32 info, u8 *payload) 257 __be16 port, u32 info, u8 *payload)
258{ 258{
259 struct inet_sock *inet = inet_sk(sk); 259 struct inet_sock *inet = inet_sk(sk);
@@ -266,10 +266,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
266 if (!skb) 266 if (!skb)
267 return; 267 return;
268 268
269 serr = SKB_EXT_ERR(skb); 269 serr = SKB_EXT_ERR(skb);
270 serr->ee.ee_errno = err; 270 serr->ee.ee_errno = err;
271 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP; 271 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
272 serr->ee.ee_type = skb->h.icmph->type; 272 serr->ee.ee_type = skb->h.icmph->type;
273 serr->ee.ee_code = skb->h.icmph->code; 273 serr->ee.ee_code = skb->h.icmph->code;
274 serr->ee.ee_pad = 0; 274 serr->ee.ee_pad = 0;
275 serr->ee.ee_info = info; 275 serr->ee.ee_info = info;
@@ -301,10 +301,10 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
301 skb->nh.iph = iph; 301 skb->nh.iph = iph;
302 iph->daddr = daddr; 302 iph->daddr = daddr;
303 303
304 serr = SKB_EXT_ERR(skb); 304 serr = SKB_EXT_ERR(skb);
305 serr->ee.ee_errno = err; 305 serr->ee.ee_errno = err;
306 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 306 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
307 serr->ee.ee_type = 0; 307 serr->ee.ee_type = 0;
308 serr->ee.ee_code = 0; 308 serr->ee.ee_code = 0;
309 serr->ee.ee_pad = 0; 309 serr->ee.ee_pad = 0;
310 serr->ee.ee_info = info; 310 serr->ee.ee_info = info;
@@ -319,7 +319,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
319 kfree_skb(skb); 319 kfree_skb(skb);
320} 320}
321 321
322/* 322/*
323 * Handle MSG_ERRQUEUE 323 * Handle MSG_ERRQUEUE
324 */ 324 */
325int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) 325int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
@@ -391,7 +391,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
391 } else 391 } else
392 spin_unlock_bh(&sk->sk_error_queue.lock); 392 spin_unlock_bh(&sk->sk_error_queue.lock);
393 393
394out_free_skb: 394out_free_skb:
395 kfree_skb(skb); 395 kfree_skb(skb);
396out: 396out:
397 return err; 397 return err;
@@ -409,15 +409,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
409 struct inet_sock *inet = inet_sk(sk); 409 struct inet_sock *inet = inet_sk(sk);
410 int val=0,err; 410 int val=0,err;
411 411
412 if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | 412 if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
413 (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | 413 (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
414 (1<<IP_RETOPTS) | (1<<IP_TOS) | 414 (1<<IP_RETOPTS) | (1<<IP_TOS) |
415 (1<<IP_TTL) | (1<<IP_HDRINCL) | 415 (1<<IP_TTL) | (1<<IP_HDRINCL) |
416 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 416 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
417 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 417 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
418 (1<<IP_PASSSEC))) || 418 (1<<IP_PASSSEC))) ||
419 optname == IP_MULTICAST_TTL || 419 optname == IP_MULTICAST_TTL ||
420 optname == IP_MULTICAST_LOOP) { 420 optname == IP_MULTICAST_LOOP) {
421 if (optlen >= sizeof(int)) { 421 if (optlen >= sizeof(int)) {
422 if (get_user(val, (int __user *) optval)) 422 if (get_user(val, (int __user *) optval))
423 return -EFAULT; 423 return -EFAULT;
@@ -511,7 +511,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
511 val &= ~3; 511 val &= ~3;
512 val |= inet->tos & 3; 512 val |= inet->tos & 3;
513 } 513 }
514 if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP && 514 if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
515 !capable(CAP_NET_ADMIN)) { 515 !capable(CAP_NET_ADMIN)) {
516 err = -EPERM; 516 err = -EPERM;
517 break; 517 break;
@@ -519,7 +519,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
519 if (inet->tos != val) { 519 if (inet->tos != val) {
520 inet->tos = val; 520 inet->tos = val;
521 sk->sk_priority = rt_tos2priority(val); 521 sk->sk_priority = rt_tos2priority(val);
522 sk_dst_reset(sk); 522 sk_dst_reset(sk);
523 } 523 }
524 break; 524 break;
525 case IP_TTL: 525 case IP_TTL:
@@ -556,13 +556,13 @@ static int do_ip_setsockopt(struct sock *sk, int level,
556 if (val < 0 || val > 255) 556 if (val < 0 || val > 255)
557 goto e_inval; 557 goto e_inval;
558 inet->mc_ttl = val; 558 inet->mc_ttl = val;
559 break; 559 break;
560 case IP_MULTICAST_LOOP: 560 case IP_MULTICAST_LOOP:
561 if (optlen<1) 561 if (optlen<1)
562 goto e_inval; 562 goto e_inval;
563 inet->mc_loop = !!val; 563 inet->mc_loop = !!val;
564 break; 564 break;
565 case IP_MULTICAST_IF: 565 case IP_MULTICAST_IF:
566 { 566 {
567 struct ip_mreqn mreq; 567 struct ip_mreqn mreq;
568 struct net_device *dev = NULL; 568 struct net_device *dev = NULL;
@@ -616,7 +616,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
616 } 616 }
617 617
618 case IP_ADD_MEMBERSHIP: 618 case IP_ADD_MEMBERSHIP:
619 case IP_DROP_MEMBERSHIP: 619 case IP_DROP_MEMBERSHIP:
620 { 620 {
621 struct ip_mreqn mreq; 621 struct ip_mreqn mreq;
622 622
@@ -629,7 +629,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
629 } else { 629 } else {
630 memset(&mreq, 0, sizeof(mreq)); 630 memset(&mreq, 0, sizeof(mreq));
631 if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq))) 631 if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq)))
632 break; 632 break;
633 } 633 }
634 634
635 if (optname == IP_ADD_MEMBERSHIP) 635 if (optname == IP_ADD_MEMBERSHIP)
@@ -714,7 +714,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
714 break; 714 break;
715 } 715 }
716 case MCAST_JOIN_GROUP: 716 case MCAST_JOIN_GROUP:
717 case MCAST_LEAVE_GROUP: 717 case MCAST_LEAVE_GROUP:
718 { 718 {
719 struct group_req greq; 719 struct group_req greq;
720 struct sockaddr_in *psin; 720 struct sockaddr_in *psin;
@@ -858,16 +858,16 @@ mc_msf_out:
858 kfree(gsf); 858 kfree(gsf);
859 break; 859 break;
860 } 860 }
861 case IP_ROUTER_ALERT: 861 case IP_ROUTER_ALERT:
862 err = ip_ra_control(sk, val ? 1 : 0, NULL); 862 err = ip_ra_control(sk, val ? 1 : 0, NULL);
863 break; 863 break;
864 864
865 case IP_FREEBIND: 865 case IP_FREEBIND:
866 if (optlen<1) 866 if (optlen<1)
867 goto e_inval; 867 goto e_inval;
868 inet->freebind = !!val; 868 inet->freebind = !!val;
869 break; 869 break;
870 870
871 case IP_IPSEC_POLICY: 871 case IP_IPSEC_POLICY:
872 case IP_XFRM_POLICY: 872 case IP_XFRM_POLICY:
873 err = -EPERM; 873 err = -EPERM;
@@ -954,7 +954,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
954 struct inet_sock *inet = inet_sk(sk); 954 struct inet_sock *inet = inet_sk(sk);
955 int val; 955 int val;
956 int len; 956 int len;
957 957
958 if(level!=SOL_IP) 958 if(level!=SOL_IP)
959 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
960 960
@@ -969,7 +969,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
969 return -EFAULT; 969 return -EFAULT;
970 if(len < 0) 970 if(len < 0)
971 return -EINVAL; 971 return -EINVAL;
972 972
973 lock_sock(sk); 973 lock_sock(sk);
974 974
975 switch(optname) { 975 switch(optname) {
@@ -984,7 +984,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
984 inet->opt->optlen); 984 inet->opt->optlen);
985 release_sock(sk); 985 release_sock(sk);
986 986
987 if (opt->optlen == 0) 987 if (opt->optlen == 0)
988 return put_user(0, optlen); 988 return put_user(0, optlen);
989 989
990 ip_options_undo(opt); 990 ip_options_undo(opt);
@@ -1059,8 +1059,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1059 addr.s_addr = inet->mc_addr; 1059 addr.s_addr = inet->mc_addr;
1060 release_sock(sk); 1060 release_sock(sk);
1061 1061
1062 if(put_user(len, optlen)) 1062 if(put_user(len, optlen))
1063 return -EFAULT; 1063 return -EFAULT;
1064 if(copy_to_user(optval, &addr, len)) 1064 if(copy_to_user(optval, &addr, len))
1065 return -EFAULT; 1065 return -EFAULT;
1066 return 0; 1066 return 0;
@@ -1101,7 +1101,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1101 release_sock(sk); 1101 release_sock(sk);
1102 return err; 1102 return err;
1103 } 1103 }
1104 case IP_PKTOPTIONS: 1104 case IP_PKTOPTIONS:
1105 { 1105 {
1106 struct msghdr msg; 1106 struct msghdr msg;
1107 1107
@@ -1129,15 +1129,15 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1129 len -= msg.msg_controllen; 1129 len -= msg.msg_controllen;
1130 return put_user(len, optlen); 1130 return put_user(len, optlen);
1131 } 1131 }
1132 case IP_FREEBIND: 1132 case IP_FREEBIND:
1133 val = inet->freebind; 1133 val = inet->freebind;
1134 break; 1134 break;
1135 default: 1135 default:
1136 release_sock(sk); 1136 release_sock(sk);
1137 return -ENOPROTOOPT; 1137 return -ENOPROTOOPT;
1138 } 1138 }
1139 release_sock(sk); 1139 release_sock(sk);
1140 1140
1141 if (len < sizeof(int) && len > 0 && val>=0 && val<255) { 1141 if (len < sizeof(int) && len > 0 && val>=0 && val<255) {
1142 unsigned char ucval = (unsigned char)val; 1142 unsigned char ucval = (unsigned char)val;
1143 len = 1; 1143 len = 1;
@@ -1168,7 +1168,7 @@ int ip_getsockopt(struct sock *sk, int level,
1168 && (optname < MRT_BASE || optname > MRT_BASE+10) 1168 && (optname < MRT_BASE || optname > MRT_BASE+10)
1169#endif 1169#endif
1170 ) { 1170 ) {
1171 int len; 1171 int len;
1172 1172
1173 if(get_user(len,optlen)) 1173 if(get_user(len,optlen))
1174 return -EFAULT; 1174 return -EFAULT;
@@ -1197,7 +1197,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1197 && (optname < MRT_BASE || optname > MRT_BASE+10) 1197 && (optname < MRT_BASE || optname > MRT_BASE+10)
1198#endif 1198#endif
1199 ) { 1199 ) {
1200 int len; 1200 int len;
1201 1201
1202 if (get_user(len, optlen)) 1202 if (get_user(len, optlen))
1203 return -EFAULT; 1203 return -EFAULT;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 3839b706142e..aa704b88f014 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option) 8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version. 9 * any later version.
10 * 10 *
11 * Todo: 11 * Todo:
@@ -48,7 +48,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
48 u8 *start, *scratch; 48 u8 *start, *scratch;
49 struct crypto_comp *tfm; 49 struct crypto_comp *tfm;
50 int cpu; 50 int cpu;
51 51
52 plen = skb->len; 52 plen = skb->len;
53 dlen = IPCOMP_SCRATCH_SIZE; 53 dlen = IPCOMP_SCRATCH_SIZE;
54 start = skb->data; 54 start = skb->data;
@@ -69,11 +69,11 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
69 err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); 69 err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC);
70 if (err) 70 if (err)
71 goto out; 71 goto out;
72 72
73 skb->truesize += dlen - plen; 73 skb->truesize += dlen - plen;
74 __skb_put(skb, dlen - plen); 74 __skb_put(skb, dlen - plen);
75 memcpy(skb->data, scratch, dlen); 75 memcpy(skb->data, scratch, dlen);
76out: 76out:
77 put_cpu(); 77 put_cpu();
78 return err; 78 return err;
79} 79}
@@ -85,11 +85,11 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
85 struct ip_comp_hdr *ipch; 85 struct ip_comp_hdr *ipch;
86 86
87 if (skb_linearize_cow(skb)) 87 if (skb_linearize_cow(skb))
88 goto out; 88 goto out;
89 89
90 skb->ip_summed = CHECKSUM_NONE; 90 skb->ip_summed = CHECKSUM_NONE;
91 91
92 /* Remove ipcomp header and decompress original payload */ 92 /* Remove ipcomp header and decompress original payload */
93 iph = skb->nh.iph; 93 iph = skb->nh.iph;
94 ipch = (void *)skb->data; 94 ipch = (void *)skb->data;
95 iph->protocol = ipch->nexthdr; 95 iph->protocol = ipch->nexthdr;
@@ -97,7 +97,7 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
97 __skb_pull(skb, sizeof(*ipch)); 97 __skb_pull(skb, sizeof(*ipch));
98 err = ipcomp_decompress(x, skb); 98 err = ipcomp_decompress(x, skb);
99 99
100out: 100out:
101 return err; 101 return err;
102} 102}
103 103
@@ -109,7 +109,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
109 u8 *start, *scratch; 109 u8 *start, *scratch;
110 struct crypto_comp *tfm; 110 struct crypto_comp *tfm;
111 int cpu; 111 int cpu;
112 112
113 ihlen = iph->ihl * 4; 113 ihlen = iph->ihl * 4;
114 plen = skb->len - ihlen; 114 plen = skb->len - ihlen;
115 dlen = IPCOMP_SCRATCH_SIZE; 115 dlen = IPCOMP_SCRATCH_SIZE;
@@ -127,14 +127,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
127 err = -EMSGSIZE; 127 err = -EMSGSIZE;
128 goto out; 128 goto out;
129 } 129 }
130 130
131 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 131 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
132 put_cpu(); 132 put_cpu();
133 133
134 pskb_trim(skb, ihlen + dlen + sizeof(struct ip_comp_hdr)); 134 pskb_trim(skb, ihlen + dlen + sizeof(struct ip_comp_hdr));
135 return 0; 135 return 0;
136 136
137out: 137out:
138 put_cpu(); 138 put_cpu();
139 return err; 139 return err;
140} 140}
@@ -157,7 +157,7 @@ static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
157 157
158 if (skb_linearize_cow(skb)) 158 if (skb_linearize_cow(skb))
159 goto out_ok; 159 goto out_ok;
160 160
161 err = ipcomp_compress(x, skb); 161 err = ipcomp_compress(x, skb);
162 iph = skb->nh.iph; 162 iph = skb->nh.iph;
163 163
@@ -194,7 +194,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
194 194
195 spi = htonl(ntohs(ipch->cpi)); 195 spi = htonl(ntohs(ipch->cpi));
196 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, 196 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
197 spi, IPPROTO_COMP, AF_INET); 197 spi, IPPROTO_COMP, AF_INET);
198 if (!x) 198 if (!x)
199 return; 199 return;
200 NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n", 200 NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n",
@@ -202,12 +202,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
202 xfrm_state_put(x); 202 xfrm_state_put(x);
203} 203}
204 204
205/* We always hold one tunnel user reference to indicate a tunnel */ 205/* We always hold one tunnel user reference to indicate a tunnel */
206static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) 206static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
207{ 207{
208 struct xfrm_state *t; 208 struct xfrm_state *t;
209 u8 mode = XFRM_MODE_TUNNEL; 209 u8 mode = XFRM_MODE_TUNNEL;
210 210
211 t = xfrm_state_alloc(); 211 t = xfrm_state_alloc();
212 if (t == NULL) 212 if (t == NULL)
213 goto out; 213 goto out;
@@ -247,7 +247,7 @@ static int ipcomp_tunnel_attach(struct xfrm_state *x)
247 struct xfrm_state *t; 247 struct xfrm_state *t;
248 248
249 t = xfrm_state_lookup((xfrm_address_t *)&x->id.daddr.a4, 249 t = xfrm_state_lookup((xfrm_address_t *)&x->id.daddr.a4,
250 x->props.saddr.a4, IPPROTO_IPIP, AF_INET); 250 x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
251 if (!t) { 251 if (!t) {
252 t = ipcomp_tunnel_create(x); 252 t = ipcomp_tunnel_create(x);
253 if (!t) { 253 if (!t) {
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index afa60b9a003f..ba882bec317a 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -12,7 +12,7 @@
12 * BOOTP rewritten to construct and analyse packets itself instead 12 * BOOTP rewritten to construct and analyse packets itself instead
13 * of misusing the IP layer. num_bugs_causing_wrong_arp_replies--; 13 * of misusing the IP layer. num_bugs_causing_wrong_arp_replies--;
14 * -- MJ, December 1998 14 * -- MJ, December 1998
15 * 15 *
16 * Fixed ip_auto_config_setup calling at startup in the new "Linker Magic" 16 * Fixed ip_auto_config_setup calling at startup in the new "Linker Magic"
17 * initialization scheme. 17 * initialization scheme.
18 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 08/11/1999 18 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 08/11/1999
@@ -98,8 +98,8 @@
98#define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */ 98#define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */
99#define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */ 99#define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */
100#define CONF_TIMEOUT_MAX (HZ*30) /* Maximum allowed timeout */ 100#define CONF_TIMEOUT_MAX (HZ*30) /* Maximum allowed timeout */
101#define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers 101#define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers
102 - '3' from resolv.h */ 102 - '3' from resolv.h */
103 103
104#define NONE __constant_htonl(INADDR_NONE) 104#define NONE __constant_htonl(INADDR_NONE)
105 105
@@ -365,7 +365,7 @@ static int __init ic_defaults(void)
365 * At this point we have no userspace running so need not 365 * At this point we have no userspace running so need not
366 * claim locks on system_utsname 366 * claim locks on system_utsname
367 */ 367 */
368 368
369 if (!ic_host_name_set) 369 if (!ic_host_name_set)
370 sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr)); 370 sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr));
371 371
@@ -650,9 +650,9 @@ static void __init ic_bootp_init_ext(u8 *e)
650 *e++ = 40; 650 *e++ = 40;
651 e += 40; 651 e += 40;
652 652
653 *e++ = 57; /* set extension buffer size for reply */ 653 *e++ = 57; /* set extension buffer size for reply */
654 *e++ = 2; 654 *e++ = 2;
655 *e++ = 1; /* 128+236+8+20+14, see dhcpd sources */ 655 *e++ = 1; /* 128+236+8+20+14, see dhcpd sources */
656 *e++ = 150; 656 *e++ = 150;
657 657
658 *e++ = 255; /* End of the list */ 658 *e++ = 255; /* End of the list */
@@ -913,7 +913,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
913 /* Parse extensions */ 913 /* Parse extensions */
914 if (ext_len >= 4 && 914 if (ext_len >= 4 &&
915 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */ 915 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
916 u8 *end = (u8 *) b + ntohs(b->iph.tot_len); 916 u8 *end = (u8 *) b + ntohs(b->iph.tot_len);
917 u8 *ext; 917 u8 *ext;
918 918
919#ifdef IPCONFIG_DHCP 919#ifdef IPCONFIG_DHCP
@@ -1020,7 +1020,7 @@ drop:
1020 kfree_skb(skb); 1020 kfree_skb(skb);
1021 1021
1022 return 0; 1022 return 0;
1023} 1023}
1024 1024
1025 1025
1026#endif 1026#endif
@@ -1080,7 +1080,7 @@ static int __init ic_dynamic(void)
1080 * seems to be a terrible waste of CPU time, but actually there is 1080 * seems to be a terrible waste of CPU time, but actually there is
1081 * only one process running at all, so we don't need to use any 1081 * only one process running at all, so we don't need to use any
1082 * scheduler functions. 1082 * scheduler functions.
1083 * [Actually we could now, but the nothing else running note still 1083 * [Actually we could now, but the nothing else running note still
1084 * applies.. - AC] 1084 * applies.. - AC]
1085 */ 1085 */
1086 printk(KERN_NOTICE "Sending %s%s%s requests .", 1086 printk(KERN_NOTICE "Sending %s%s%s requests .",
@@ -1156,7 +1156,7 @@ static int __init ic_dynamic(void)
1156 } 1156 }
1157 1157
1158 printk("IP-Config: Got %s answer from %u.%u.%u.%u, ", 1158 printk("IP-Config: Got %s answer from %u.%u.%u.%u, ",
1159 ((ic_got_reply & IC_RARP) ? "RARP" 1159 ((ic_got_reply & IC_RARP) ? "RARP"
1160 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1160 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1161 NIPQUAD(ic_servaddr)); 1161 NIPQUAD(ic_servaddr));
1162 printk("my address is %u.%u.%u.%u\n", NIPQUAD(ic_myaddr)); 1162 printk("my address is %u.%u.%u.%u\n", NIPQUAD(ic_myaddr));
@@ -1286,7 +1286,7 @@ static int __init ip_auto_config(void)
1286#endif 1286#endif
1287 ic_first_dev->next) { 1287 ic_first_dev->next) {
1288#ifdef IPCONFIG_DYNAMIC 1288#ifdef IPCONFIG_DYNAMIC
1289 1289
1290 int retries = CONF_OPEN_RETRIES; 1290 int retries = CONF_OPEN_RETRIES;
1291 1291
1292 if (ic_dynamic() < 0) { 1292 if (ic_dynamic() < 0) {
@@ -1308,14 +1308,14 @@ static int __init ip_auto_config(void)
1308 */ 1308 */
1309#ifdef CONFIG_ROOT_NFS 1309#ifdef CONFIG_ROOT_NFS
1310 if (ROOT_DEV == Root_NFS) { 1310 if (ROOT_DEV == Root_NFS) {
1311 printk(KERN_ERR 1311 printk(KERN_ERR
1312 "IP-Config: Retrying forever (NFS root)...\n"); 1312 "IP-Config: Retrying forever (NFS root)...\n");
1313 goto try_try_again; 1313 goto try_try_again;
1314 } 1314 }
1315#endif 1315#endif
1316 1316
1317 if (--retries) { 1317 if (--retries) {
1318 printk(KERN_ERR 1318 printk(KERN_ERR
1319 "IP-Config: Reopening network devices...\n"); 1319 "IP-Config: Reopening network devices...\n");
1320 goto try_try_again; 1320 goto try_try_again;
1321 } 1321 }
@@ -1443,8 +1443,8 @@ static int __init ip_auto_config_setup(char *addrs)
1443 1443
1444 ic_set_manually = 1; 1444 ic_set_manually = 1;
1445 1445
1446 ic_enable = (*addrs && 1446 ic_enable = (*addrs &&
1447 (strcmp(addrs, "off") != 0) && 1447 (strcmp(addrs, "off") != 0) &&
1448 (strcmp(addrs, "none") != 0)); 1448 (strcmp(addrs, "none") != 0));
1449 if (!ic_enable) 1449 if (!ic_enable)
1450 return 1; 1450 return 1;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index da8bbd20c7ed..475bcd1e4181 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Linux NET3: IP/IP protocol decoder. 2 * Linux NET3: IP/IP protocol decoder.
3 * 3 *
4 * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $ 4 * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $
5 * 5 *
@@ -35,14 +35,14 @@
35 Thanks for the great code! 35 Thanks for the great code!
36 36
37 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 37 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
38 38
39 Minor tweaks: 39 Minor tweaks:
40 Cleaned up the code a little and added some pre-1.3.0 tweaks. 40 Cleaned up the code a little and added some pre-1.3.0 tweaks.
41 dev->hard_header/hard_header_len changed to use no headers. 41 dev->hard_header/hard_header_len changed to use no headers.
42 Comments/bracketing tweaked. 42 Comments/bracketing tweaked.
43 Made the tunnels use dev->name not tunnel: when error reporting. 43 Made the tunnels use dev->name not tunnel: when error reporting.
44 Added tx_dropped stat 44 Added tx_dropped stat
45 45
46 -Alan Cox (Alan.Cox@linux.org) 21 March 95 46 -Alan Cox (Alan.Cox@linux.org) 21 March 95
47 47
48 Reworked: 48 Reworked:
@@ -52,7 +52,7 @@
52 Note: There is currently no firewall or ICMP handling done. 52 Note: There is currently no firewall or ICMP handling done.
53 53
54 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96 54 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96
55 55
56*/ 56*/
57 57
58/* Things I wish I had known when writing the tunnel driver: 58/* Things I wish I had known when writing the tunnel driver:
@@ -75,7 +75,7 @@
75 "allocated" with skb_put(). You can then write up to skb->len 75 "allocated" with skb_put(). You can then write up to skb->len
76 bytes to that buffer. If you need more, you can call skb_put() 76 bytes to that buffer. If you need more, you can call skb_put()
77 again with the additional amount of space you need. You can 77 again with the additional amount of space you need. You can
78 find out how much more space you can allocate by calling 78 find out how much more space you can allocate by calling
79 "skb_tailroom(skb)". 79 "skb_tailroom(skb)".
80 Now, to add header space, call "skb_push(skb, header_len)". 80 Now, to add header space, call "skb_push(skb, header_len)".
81 This creates space at the beginning of the buffer and returns 81 This creates space at the beginning of the buffer and returns
@@ -92,7 +92,7 @@
92 For comments look at net/ipv4/ip_gre.c --ANK 92 For comments look at net/ipv4/ip_gre.c --ANK
93 */ 93 */
94 94
95 95
96#include <linux/capability.h> 96#include <linux/capability.h>
97#include <linux/module.h> 97#include <linux/module.h>
98#include <linux/types.h> 98#include <linux/types.h>
@@ -607,7 +607,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
607 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 607 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
608 if (!new_skb) { 608 if (!new_skb) {
609 ip_rt_put(rt); 609 ip_rt_put(rt);
610 stats->tx_dropped++; 610 stats->tx_dropped++;
611 dev_kfree_skb(skb); 611 dev_kfree_skb(skb);
612 tunnel->recursion--; 612 tunnel->recursion--;
613 return 0; 613 return 0;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index d7e1e60f51d5..604f5b585104 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -241,7 +241,7 @@ failure:
241/* 241/*
242 * Delete a VIF entry 242 * Delete a VIF entry
243 */ 243 */
244 244
245static int vif_delete(int vifi) 245static int vif_delete(int vifi)
246{ 246{
247 struct vif_device *v; 247 struct vif_device *v;
@@ -409,7 +409,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
409 return -ENOBUFS; 409 return -ENOBUFS;
410 break; 410 break;
411#endif 411#endif
412 case VIFF_TUNNEL: 412 case VIFF_TUNNEL:
413 dev = ipmr_new_tunnel(vifc); 413 dev = ipmr_new_tunnel(vifc);
414 if (!dev) 414 if (!dev)
415 return -ENOBUFS; 415 return -ENOBUFS;
@@ -499,7 +499,7 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void)
499/* 499/*
500 * A cache entry has gone into a resolved state from queued 500 * A cache entry has gone into a resolved state from queued
501 */ 501 */
502 502
503static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) 503static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
504{ 504{
505 struct sk_buff *skb; 505 struct sk_buff *skb;
@@ -536,7 +536,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
536 * 536 *
537 * Called under mrt_lock. 537 * Called under mrt_lock.
538 */ 538 */
539 539
540static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) 540static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
541{ 541{
542 struct sk_buff *skb; 542 struct sk_buff *skb;
@@ -567,13 +567,13 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
567 memcpy(msg, pkt->nh.raw, sizeof(struct iphdr)); 567 memcpy(msg, pkt->nh.raw, sizeof(struct iphdr));
568 msg->im_msgtype = IGMPMSG_WHOLEPKT; 568 msg->im_msgtype = IGMPMSG_WHOLEPKT;
569 msg->im_mbz = 0; 569 msg->im_mbz = 0;
570 msg->im_vif = reg_vif_num; 570 msg->im_vif = reg_vif_num;
571 skb->nh.iph->ihl = sizeof(struct iphdr) >> 2; 571 skb->nh.iph->ihl = sizeof(struct iphdr) >> 2;
572 skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr)); 572 skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr));
573 } else 573 } else
574#endif 574#endif
575 { 575 {
576 576
577 /* 577 /*
578 * Copy the IP header 578 * Copy the IP header
579 */ 579 */
@@ -595,7 +595,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
595 igmp->code = 0; 595 igmp->code = 0;
596 skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */ 596 skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */
597 skb->h.raw = skb->nh.raw; 597 skb->h.raw = skb->nh.raw;
598 } 598 }
599 599
600 if (mroute_socket == NULL) { 600 if (mroute_socket == NULL) {
601 kfree_skb(skb); 601 kfree_skb(skb);
@@ -617,7 +617,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
617/* 617/*
618 * Queue a packet for resolution. It gets locked cache entry! 618 * Queue a packet for resolution. It gets locked cache entry!
619 */ 619 */
620 620
621static int 621static int
622ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) 622ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
623{ 623{
@@ -655,7 +655,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
655 * Reflect first query at mrouted. 655 * Reflect first query at mrouted.
656 */ 656 */
657 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) { 657 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
658 /* If the report failed throw the cache entry 658 /* If the report failed throw the cache entry
659 out - Brad Parker 659 out - Brad Parker
660 */ 660 */
661 spin_unlock_bh(&mfc_unres_lock); 661 spin_unlock_bh(&mfc_unres_lock);
@@ -781,11 +781,11 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
781/* 781/*
782 * Close the multicast socket, and clear the vif tables etc 782 * Close the multicast socket, and clear the vif tables etc
783 */ 783 */
784 784
785static void mroute_clean_tables(struct sock *sk) 785static void mroute_clean_tables(struct sock *sk)
786{ 786{
787 int i; 787 int i;
788 788
789 /* 789 /*
790 * Shut down all active vif entries 790 * Shut down all active vif entries
791 */ 791 */
@@ -852,13 +852,13 @@ static void mrtsock_destruct(struct sock *sk)
852 * that's how BSD mrouted happens to think. Maybe one day with a proper 852 * that's how BSD mrouted happens to think. Maybe one day with a proper
853 * MOSPF/PIM router set up we can clean this up. 853 * MOSPF/PIM router set up we can clean this up.
854 */ 854 */
855 855
856int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen) 856int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
857{ 857{
858 int ret; 858 int ret;
859 struct vifctl vif; 859 struct vifctl vif;
860 struct mfcctl mfc; 860 struct mfcctl mfc;
861 861
862 if(optname!=MRT_INIT) 862 if(optname!=MRT_INIT)
863 { 863 {
864 if(sk!=mroute_socket && !capable(CAP_NET_ADMIN)) 864 if(sk!=mroute_socket && !capable(CAP_NET_ADMIN))
@@ -899,7 +899,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
899 if(optlen!=sizeof(vif)) 899 if(optlen!=sizeof(vif))
900 return -EINVAL; 900 return -EINVAL;
901 if (copy_from_user(&vif,optval,sizeof(vif))) 901 if (copy_from_user(&vif,optval,sizeof(vif)))
902 return -EFAULT; 902 return -EFAULT;
903 if(vif.vifc_vifi >= MAXVIFS) 903 if(vif.vifc_vifi >= MAXVIFS)
904 return -ENFILE; 904 return -ENFILE;
905 rtnl_lock(); 905 rtnl_lock();
@@ -978,13 +978,13 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
978/* 978/*
979 * Getsock opt support for the multicast routing system. 979 * Getsock opt support for the multicast routing system.
980 */ 980 */
981 981
982int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen) 982int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
983{ 983{
984 int olr; 984 int olr;
985 int val; 985 int val;
986 986
987 if(optname!=MRT_VERSION && 987 if(optname!=MRT_VERSION &&
988#ifdef CONFIG_IP_PIMSM 988#ifdef CONFIG_IP_PIMSM
989 optname!=MRT_PIM && 989 optname!=MRT_PIM &&
990#endif 990#endif
@@ -997,7 +997,7 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u
997 olr = min_t(unsigned int, olr, sizeof(int)); 997 olr = min_t(unsigned int, olr, sizeof(int));
998 if (olr < 0) 998 if (olr < 0)
999 return -EINVAL; 999 return -EINVAL;
1000 1000
1001 if(put_user(olr,optlen)) 1001 if(put_user(olr,optlen))
1002 return -EFAULT; 1002 return -EFAULT;
1003 if(optname==MRT_VERSION) 1003 if(optname==MRT_VERSION)
@@ -1016,19 +1016,19 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u
1016/* 1016/*
1017 * The IP multicast ioctl support routines. 1017 * The IP multicast ioctl support routines.
1018 */ 1018 */
1019 1019
1020int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) 1020int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1021{ 1021{
1022 struct sioc_sg_req sr; 1022 struct sioc_sg_req sr;
1023 struct sioc_vif_req vr; 1023 struct sioc_vif_req vr;
1024 struct vif_device *vif; 1024 struct vif_device *vif;
1025 struct mfc_cache *c; 1025 struct mfc_cache *c;
1026 1026
1027 switch(cmd) 1027 switch(cmd)
1028 { 1028 {
1029 case SIOCGETVIFCNT: 1029 case SIOCGETVIFCNT:
1030 if (copy_from_user(&vr,arg,sizeof(vr))) 1030 if (copy_from_user(&vr,arg,sizeof(vr)))
1031 return -EFAULT; 1031 return -EFAULT;
1032 if(vr.vifi>=maxvif) 1032 if(vr.vifi>=maxvif)
1033 return -EINVAL; 1033 return -EINVAL;
1034 read_lock(&mrt_lock); 1034 read_lock(&mrt_lock);
@@ -1094,7 +1094,7 @@ static struct notifier_block ip_mr_notifier={
1094 * This avoids tunnel drivers and other mess and gives us the speed so 1094 * This avoids tunnel drivers and other mess and gives us the speed so
1095 * important for multicast video. 1095 * important for multicast video.
1096 */ 1096 */
1097 1097
1098static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1098static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1099{ 1099{
1100 struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr)); 1100 struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));
@@ -1192,7 +1192,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1192 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; 1192 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1193 1193
1194 if (skb_cow(skb, encap)) { 1194 if (skb_cow(skb, encap)) {
1195 ip_rt_put(rt); 1195 ip_rt_put(rt);
1196 goto out_free; 1196 goto out_free;
1197 } 1197 }
1198 1198
@@ -1226,7 +1226,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1226 * not mrouter) cannot join to more than one interface - it will 1226 * not mrouter) cannot join to more than one interface - it will
1227 * result in receiving multiple packets. 1227 * result in receiving multiple packets.
1228 */ 1228 */
1229 NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev, 1229 NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
1230 ipmr_forward_finish); 1230 ipmr_forward_finish);
1231 return; 1231 return;
1232 1232
@@ -1287,7 +1287,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1287 large chunk of pimd to kernel. Ough... --ANK 1287 large chunk of pimd to kernel. Ough... --ANK
1288 */ 1288 */
1289 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && 1289 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1290 time_after(jiffies, 1290 time_after(jiffies,
1291 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1291 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1292 cache->mfc_un.res.last_assert = jiffies; 1292 cache->mfc_un.res.last_assert = jiffies;
1293 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF); 1293 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
@@ -1424,14 +1424,14 @@ int pim_rcv_v1(struct sk_buff * skb)
1424 struct iphdr *encap; 1424 struct iphdr *encap;
1425 struct net_device *reg_dev = NULL; 1425 struct net_device *reg_dev = NULL;
1426 1426
1427 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) 1427 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1428 goto drop; 1428 goto drop;
1429 1429
1430 pim = (struct igmphdr*)skb->h.raw; 1430 pim = (struct igmphdr*)skb->h.raw;
1431 1431
1432 if (!mroute_do_pim || 1432 if (!mroute_do_pim ||
1433 skb->len < sizeof(*pim) + sizeof(*encap) || 1433 skb->len < sizeof(*pim) + sizeof(*encap) ||
1434 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1434 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1435 goto drop; 1435 goto drop;
1436 1436
1437 encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr)); 1437 encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
@@ -1443,7 +1443,7 @@ int pim_rcv_v1(struct sk_buff * skb)
1443 */ 1443 */
1444 if (!MULTICAST(encap->daddr) || 1444 if (!MULTICAST(encap->daddr) ||
1445 encap->tot_len == 0 || 1445 encap->tot_len == 0 ||
1446 ntohs(encap->tot_len) + sizeof(*pim) > skb->len) 1446 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1447 goto drop; 1447 goto drop;
1448 1448
1449 read_lock(&mrt_lock); 1449 read_lock(&mrt_lock);
@@ -1453,7 +1453,7 @@ int pim_rcv_v1(struct sk_buff * skb)
1453 dev_hold(reg_dev); 1453 dev_hold(reg_dev);
1454 read_unlock(&mrt_lock); 1454 read_unlock(&mrt_lock);
1455 1455
1456 if (reg_dev == NULL) 1456 if (reg_dev == NULL)
1457 goto drop; 1457 goto drop;
1458 1458
1459 skb->mac.raw = skb->nh.raw; 1459 skb->mac.raw = skb->nh.raw;
@@ -1484,13 +1484,13 @@ static int pim_rcv(struct sk_buff * skb)
1484 struct iphdr *encap; 1484 struct iphdr *encap;
1485 struct net_device *reg_dev = NULL; 1485 struct net_device *reg_dev = NULL;
1486 1486
1487 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) 1487 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1488 goto drop; 1488 goto drop;
1489 1489
1490 pim = (struct pimreghdr*)skb->h.raw; 1490 pim = (struct pimreghdr*)skb->h.raw;
1491 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || 1491 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1492 (pim->flags&PIM_NULL_REGISTER) || 1492 (pim->flags&PIM_NULL_REGISTER) ||
1493 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 1493 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1494 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1494 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1495 goto drop; 1495 goto drop;
1496 1496
@@ -1498,7 +1498,7 @@ static int pim_rcv(struct sk_buff * skb)
1498 encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr)); 1498 encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
1499 if (!MULTICAST(encap->daddr) || 1499 if (!MULTICAST(encap->daddr) ||
1500 encap->tot_len == 0 || 1500 encap->tot_len == 0 ||
1501 ntohs(encap->tot_len) + sizeof(*pim) > skb->len) 1501 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1502 goto drop; 1502 goto drop;
1503 1503
1504 read_lock(&mrt_lock); 1504 read_lock(&mrt_lock);
@@ -1508,7 +1508,7 @@ static int pim_rcv(struct sk_buff * skb)
1508 dev_hold(reg_dev); 1508 dev_hold(reg_dev);
1509 read_unlock(&mrt_lock); 1509 read_unlock(&mrt_lock);
1510 1510
1511 if (reg_dev == NULL) 1511 if (reg_dev == NULL)
1512 goto drop; 1512 goto drop;
1513 1513
1514 skb->mac.raw = skb->nh.raw; 1514 skb->mac.raw = skb->nh.raw;
@@ -1614,7 +1614,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1614 return err; 1614 return err;
1615} 1615}
1616 1616
1617#ifdef CONFIG_PROC_FS 1617#ifdef CONFIG_PROC_FS
1618/* 1618/*
1619 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif 1619 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1620 */ 1620 */
@@ -1628,7 +1628,7 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1628 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) { 1628 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
1629 if(!VIF_EXISTS(iter->ct)) 1629 if(!VIF_EXISTS(iter->ct))
1630 continue; 1630 continue;
1631 if (pos-- == 0) 1631 if (pos-- == 0)
1632 return &vif_table[iter->ct]; 1632 return &vif_table[iter->ct];
1633 } 1633 }
1634 return NULL; 1634 return NULL;
@@ -1637,7 +1637,7 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1637static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) 1637static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1638{ 1638{
1639 read_lock(&mrt_lock); 1639 read_lock(&mrt_lock);
1640 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1) 1640 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
1641 : SEQ_START_TOKEN; 1641 : SEQ_START_TOKEN;
1642} 1642}
1643 1643
@@ -1648,7 +1648,7 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1648 ++*pos; 1648 ++*pos;
1649 if (v == SEQ_START_TOKEN) 1649 if (v == SEQ_START_TOKEN)
1650 return ipmr_vif_seq_idx(iter, 0); 1650 return ipmr_vif_seq_idx(iter, 0);
1651 1651
1652 while (++iter->ct < maxvif) { 1652 while (++iter->ct < maxvif) {
1653 if(!VIF_EXISTS(iter->ct)) 1653 if(!VIF_EXISTS(iter->ct))
1654 continue; 1654 continue;
@@ -1665,7 +1665,7 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1665static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 1665static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1666{ 1666{
1667 if (v == SEQ_START_TOKEN) { 1667 if (v == SEQ_START_TOKEN) {
1668 seq_puts(seq, 1668 seq_puts(seq,
1669 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); 1669 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1670 } else { 1670 } else {
1671 const struct vif_device *vif = v; 1671 const struct vif_device *vif = v;
@@ -1674,7 +1674,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1674 seq_printf(seq, 1674 seq_printf(seq,
1675 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 1675 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1676 vif - vif_table, 1676 vif - vif_table,
1677 name, vif->bytes_in, vif->pkt_in, 1677 name, vif->bytes_in, vif->pkt_in,
1678 vif->bytes_out, vif->pkt_out, 1678 vif->bytes_out, vif->pkt_out,
1679 vif->flags, vif->local, vif->remote); 1679 vif->flags, vif->local, vif->remote);
1680 } 1680 }
@@ -1693,7 +1693,7 @@ static int ipmr_vif_open(struct inode *inode, struct file *file)
1693 struct seq_file *seq; 1693 struct seq_file *seq;
1694 int rc = -ENOMEM; 1694 int rc = -ENOMEM;
1695 struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); 1695 struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
1696 1696
1697 if (!s) 1697 if (!s)
1698 goto out; 1698 goto out;
1699 1699
@@ -1732,15 +1732,15 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1732 1732
1733 it->cache = mfc_cache_array; 1733 it->cache = mfc_cache_array;
1734 read_lock(&mrt_lock); 1734 read_lock(&mrt_lock);
1735 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) 1735 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
1736 for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next) 1736 for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
1737 if (pos-- == 0) 1737 if (pos-- == 0)
1738 return mfc; 1738 return mfc;
1739 read_unlock(&mrt_lock); 1739 read_unlock(&mrt_lock);
1740 1740
1741 it->cache = &mfc_unres_queue; 1741 it->cache = &mfc_unres_queue;
1742 spin_lock_bh(&mfc_unres_lock); 1742 spin_lock_bh(&mfc_unres_lock);
1743 for(mfc = mfc_unres_queue; mfc; mfc = mfc->next) 1743 for(mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1744 if (pos-- == 0) 1744 if (pos-- == 0)
1745 return mfc; 1745 return mfc;
1746 spin_unlock_bh(&mfc_unres_lock); 1746 spin_unlock_bh(&mfc_unres_lock);
@@ -1755,7 +1755,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1755 struct ipmr_mfc_iter *it = seq->private; 1755 struct ipmr_mfc_iter *it = seq->private;
1756 it->cache = NULL; 1756 it->cache = NULL;
1757 it->ct = 0; 1757 it->ct = 0;
1758 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1) 1758 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
1759 : SEQ_START_TOKEN; 1759 : SEQ_START_TOKEN;
1760} 1760}
1761 1761
@@ -1771,8 +1771,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1771 1771
1772 if (mfc->next) 1772 if (mfc->next)
1773 return mfc->next; 1773 return mfc->next;
1774 1774
1775 if (it->cache == &mfc_unres_queue) 1775 if (it->cache == &mfc_unres_queue)
1776 goto end_of_list; 1776 goto end_of_list;
1777 1777
1778 BUG_ON(it->cache != mfc_cache_array); 1778 BUG_ON(it->cache != mfc_cache_array);
@@ -1787,10 +1787,10 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1787 read_unlock(&mrt_lock); 1787 read_unlock(&mrt_lock);
1788 it->cache = &mfc_unres_queue; 1788 it->cache = &mfc_unres_queue;
1789 it->ct = 0; 1789 it->ct = 0;
1790 1790
1791 spin_lock_bh(&mfc_unres_lock); 1791 spin_lock_bh(&mfc_unres_lock);
1792 mfc = mfc_unres_queue; 1792 mfc = mfc_unres_queue;
1793 if (mfc) 1793 if (mfc)
1794 return mfc; 1794 return mfc;
1795 1795
1796 end_of_list: 1796 end_of_list:
@@ -1815,12 +1815,12 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1815 int n; 1815 int n;
1816 1816
1817 if (v == SEQ_START_TOKEN) { 1817 if (v == SEQ_START_TOKEN) {
1818 seq_puts(seq, 1818 seq_puts(seq,
1819 "Group Origin Iif Pkts Bytes Wrong Oifs\n"); 1819 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1820 } else { 1820 } else {
1821 const struct mfc_cache *mfc = v; 1821 const struct mfc_cache *mfc = v;
1822 const struct ipmr_mfc_iter *it = seq->private; 1822 const struct ipmr_mfc_iter *it = seq->private;
1823 1823
1824 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld", 1824 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
1825 (unsigned long) mfc->mfc_mcastgrp, 1825 (unsigned long) mfc->mfc_mcastgrp,
1826 (unsigned long) mfc->mfc_origin, 1826 (unsigned long) mfc->mfc_origin,
@@ -1830,12 +1830,12 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1830 mfc->mfc_un.res.wrong_if); 1830 mfc->mfc_un.res.wrong_if);
1831 1831
1832 if (it->cache != &mfc_unres_queue) { 1832 if (it->cache != &mfc_unres_queue) {
1833 for(n = mfc->mfc_un.res.minvif; 1833 for(n = mfc->mfc_un.res.minvif;
1834 n < mfc->mfc_un.res.maxvif; n++ ) { 1834 n < mfc->mfc_un.res.maxvif; n++ ) {
1835 if(VIF_EXISTS(n) 1835 if(VIF_EXISTS(n)
1836 && mfc->mfc_un.res.ttls[n] < 255) 1836 && mfc->mfc_un.res.ttls[n] < 255)
1837 seq_printf(seq, 1837 seq_printf(seq,
1838 " %2d:%-3d", 1838 " %2d:%-3d",
1839 n, mfc->mfc_un.res.ttls[n]); 1839 n, mfc->mfc_un.res.ttls[n]);
1840 } 1840 }
1841 } 1841 }
@@ -1856,7 +1856,7 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
1856 struct seq_file *seq; 1856 struct seq_file *seq;
1857 int rc = -ENOMEM; 1857 int rc = -ENOMEM;
1858 struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); 1858 struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
1859 1859
1860 if (!s) 1860 if (!s)
1861 goto out; 1861 goto out;
1862 1862
@@ -1881,7 +1881,7 @@ static struct file_operations ipmr_mfc_fops = {
1881 .llseek = seq_lseek, 1881 .llseek = seq_lseek,
1882 .release = seq_release_private, 1882 .release = seq_release_private,
1883}; 1883};
1884#endif 1884#endif
1885 1885
1886#ifdef CONFIG_IP_PIMSM_V2 1886#ifdef CONFIG_IP_PIMSM_V2
1887static struct net_protocol pim_protocol = { 1887static struct net_protocol pim_protocol = {
@@ -1893,7 +1893,7 @@ static struct net_protocol pim_protocol = {
1893/* 1893/*
1894 * Setup for IP multicast routing 1894 * Setup for IP multicast routing
1895 */ 1895 */
1896 1896
1897void __init ip_mr_init(void) 1897void __init ip_mr_init(void)
1898{ 1898{
1899 mrt_cachep = kmem_cache_create("ip_mrt_cache", 1899 mrt_cachep = kmem_cache_create("ip_mrt_cache",
@@ -1903,8 +1903,8 @@ void __init ip_mr_init(void)
1903 init_timer(&ipmr_expire_timer); 1903 init_timer(&ipmr_expire_timer);
1904 ipmr_expire_timer.function=ipmr_expire_process; 1904 ipmr_expire_timer.function=ipmr_expire_process;
1905 register_netdevice_notifier(&ip_mr_notifier); 1905 register_netdevice_notifier(&ip_mr_notifier);
1906#ifdef CONFIG_PROC_FS 1906#ifdef CONFIG_PROC_FS
1907 proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops); 1907 proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops);
1908 proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops); 1908 proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops);
1909#endif 1909#endif
1910} 1910}
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 3aec4ac66e3c..0b5e03476ce4 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -494,8 +494,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
494 * Checking the dest server status. 494 * Checking the dest server status.
495 */ 495 */
496 if ((dest == NULL) || 496 if ((dest == NULL) ||
497 !(dest->flags & IP_VS_DEST_F_AVAILABLE) || 497 !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
498 (sysctl_ip_vs_expire_quiescent_template && 498 (sysctl_ip_vs_expire_quiescent_template &&
499 (atomic_read(&dest->weight) == 0))) { 499 (atomic_read(&dest->weight) == 0))) {
500 IP_VS_DBG(9, "check_template: dest not available for " 500 IP_VS_DBG(9, "check_template: dest not available for "
501 "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " 501 "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d "
@@ -666,7 +666,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
666{ 666{
667 int idx; 667 int idx;
668 struct ip_vs_conn *cp; 668 struct ip_vs_conn *cp;
669 669
670 for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { 670 for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
671 ct_read_lock_bh(idx); 671 ct_read_lock_bh(idx);
672 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 672 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
@@ -694,7 +694,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
694 int idx; 694 int idx;
695 695
696 ++*pos; 696 ++*pos;
697 if (v == SEQ_START_TOKEN) 697 if (v == SEQ_START_TOKEN)
698 return ip_vs_conn_array(seq, 0); 698 return ip_vs_conn_array(seq, 0);
699 699
700 /* more on same hash chain? */ 700 /* more on same hash chain? */
@@ -709,7 +709,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
709 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 709 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
710 seq->private = &ip_vs_conn_tab[idx]; 710 seq->private = &ip_vs_conn_tab[idx];
711 return cp; 711 return cp;
712 } 712 }
713 ct_read_unlock_bh(idx); 713 ct_read_unlock_bh(idx);
714 } 714 }
715 seq->private = NULL; 715 seq->private = NULL;
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 34257520a3a6..24d7b66eb6d2 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -813,14 +813,14 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
813 skb->nh.iph->saddr = cp->vaddr; 813 skb->nh.iph->saddr = cp->vaddr;
814 ip_send_check(skb->nh.iph); 814 ip_send_check(skb->nh.iph);
815 815
816 /* For policy routing, packets originating from this 816 /* For policy routing, packets originating from this
817 * machine itself may be routed differently to packets 817 * machine itself may be routed differently to packets
818 * passing through. We want this packet to be routed as 818 * passing through. We want this packet to be routed as
819 * if it came from this machine itself. So re-compute 819 * if it came from this machine itself. So re-compute
820 * the routing information. 820 * the routing information.
821 */ 821 */
822 if (ip_route_me_harder(pskb, RTN_LOCAL) != 0) 822 if (ip_route_me_harder(pskb, RTN_LOCAL) != 0)
823 goto drop; 823 goto drop;
824 skb = *pskb; 824 skb = *pskb;
825 825
826 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); 826 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
@@ -847,7 +847,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
847 * forward to the right destination host if relevant. 847 * forward to the right destination host if relevant.
848 * Currently handles error types - unreachable, quench, ttl exceeded. 848 * Currently handles error types - unreachable, quench, ttl exceeded.
849 */ 849 */
850static int 850static int
851ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) 851ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
852{ 852{
853 struct sk_buff *skb = *pskb; 853 struct sk_buff *skb = *pskb;
@@ -863,7 +863,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum)
863 /* reassemble IP fragments */ 863 /* reassemble IP fragments */
864 if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) { 864 if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
865 skb = ip_vs_gather_frags(skb, 865 skb = ip_vs_gather_frags(skb,
866 hooknum == NF_IP_LOCAL_IN ? 866 hooknum == NF_IP_LOCAL_IN ?
867 IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD); 867 IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD);
868 if (!skb) 868 if (!skb)
869 return NF_STOLEN; 869 return NF_STOLEN;
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index 687c1de1146f..847c47af040c 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -370,7 +370,7 @@ static int __init ip_vs_ftp_init(void)
370 if (ret) 370 if (ret)
371 break; 371 break;
372 IP_VS_INFO("%s: loaded support on port[%d] = %d\n", 372 IP_VS_INFO("%s: loaded support on port[%d] = %d\n",
373 app->name, i, ports[i]); 373 app->name, i, ports[i]);
374 } 374 }
375 375
376 if (ret) 376 if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index a4385a2180ee..76fd1fb91878 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -118,7 +118,7 @@ static ctl_table vs_vars_table[] = {
118 .procname = "lblc_expiration", 118 .procname = "lblc_expiration",
119 .data = &sysctl_ip_vs_lblc_expiration, 119 .data = &sysctl_ip_vs_lblc_expiration,
120 .maxlen = sizeof(int), 120 .maxlen = sizeof(int),
121 .mode = 0644, 121 .mode = 0644,
122 .proc_handler = &proc_dointvec_jiffies, 122 .proc_handler = &proc_dointvec_jiffies,
123 }, 123 },
124 { .ctl_name = 0 } 124 { .ctl_name = 0 }
@@ -128,7 +128,7 @@ static ctl_table vs_table[] = {
128 { 128 {
129 .ctl_name = NET_IPV4_VS, 129 .ctl_name = NET_IPV4_VS,
130 .procname = "vs", 130 .procname = "vs",
131 .mode = 0555, 131 .mode = 0555,
132 .child = vs_vars_table 132 .child = vs_vars_table
133 }, 133 },
134 { .ctl_name = 0 } 134 { .ctl_name = 0 }
@@ -137,7 +137,7 @@ static ctl_table vs_table[] = {
137static ctl_table ipvs_ipv4_table[] = { 137static ctl_table ipvs_ipv4_table[] = {
138 { 138 {
139 .ctl_name = NET_IPV4, 139 .ctl_name = NET_IPV4,
140 .procname = "ipv4", 140 .procname = "ipv4",
141 .mode = 0555, 141 .mode = 0555,
142 .child = vs_table 142 .child = vs_table
143 }, 143 },
@@ -147,8 +147,8 @@ static ctl_table ipvs_ipv4_table[] = {
147static ctl_table lblc_root_table[] = { 147static ctl_table lblc_root_table[] = {
148 { 148 {
149 .ctl_name = CTL_NET, 149 .ctl_name = CTL_NET,
150 .procname = "net", 150 .procname = "net",
151 .mode = 0555, 151 .mode = 0555,
152 .child = ipvs_ipv4_table 152 .child = ipvs_ipv4_table
153 }, 153 },
154 { .ctl_name = 0 } 154 { .ctl_name = 0 }
@@ -288,7 +288,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
288 288
289 write_lock(&tbl->lock); 289 write_lock(&tbl->lock);
290 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 290 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
291 if (time_before(now, 291 if (time_before(now,
292 en->lastuse + sysctl_ip_vs_lblc_expiration)) 292 en->lastuse + sysctl_ip_vs_lblc_expiration))
293 continue; 293 continue;
294 294
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index fe1af5d079af..bf1e7f272b84 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -307,7 +307,7 @@ static ctl_table vs_vars_table[] = {
307 .procname = "lblcr_expiration", 307 .procname = "lblcr_expiration",
308 .data = &sysctl_ip_vs_lblcr_expiration, 308 .data = &sysctl_ip_vs_lblcr_expiration,
309 .maxlen = sizeof(int), 309 .maxlen = sizeof(int),
310 .mode = 0644, 310 .mode = 0644,
311 .proc_handler = &proc_dointvec_jiffies, 311 .proc_handler = &proc_dointvec_jiffies,
312 }, 312 },
313 { .ctl_name = 0 } 313 { .ctl_name = 0 }
@@ -326,7 +326,7 @@ static ctl_table vs_table[] = {
326static ctl_table ipvs_ipv4_table[] = { 326static ctl_table ipvs_ipv4_table[] = {
327 { 327 {
328 .ctl_name = NET_IPV4, 328 .ctl_name = NET_IPV4,
329 .procname = "ipv4", 329 .procname = "ipv4",
330 .mode = 0555, 330 .mode = 0555,
331 .child = vs_table 331 .child = vs_table
332 }, 332 },
@@ -336,8 +336,8 @@ static ctl_table ipvs_ipv4_table[] = {
336static ctl_table lblcr_root_table[] = { 336static ctl_table lblcr_root_table[] = {
337 { 337 {
338 .ctl_name = CTL_NET, 338 .ctl_name = CTL_NET,
339 .procname = "net", 339 .procname = "net",
340 .mode = 0555, 340 .mode = 0555,
341 .child = ipvs_ipv4_table 341 .child = ipvs_ipv4_table
342 }, 342 },
343 { .ctl_name = 0 } 343 { .ctl_name = 0 }
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index b23bab231cab..433f8a947924 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -68,7 +68,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
68 q = q->next; 68 q = q->next;
69 continue; 69 continue;
70 } 70 }
71 71
72 dest = list_entry(q, struct ip_vs_dest, n_list); 72 dest = list_entry(q, struct ip_vs_dest, n_list);
73 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && 73 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
74 atomic_read(&dest->weight) > 0) 74 atomic_read(&dest->weight) > 0)
diff --git a/net/ipv4/multipath_drr.c b/net/ipv4/multipath_drr.c
index 252e837b17a5..cb8fce467349 100644
--- a/net/ipv4/multipath_drr.c
+++ b/net/ipv4/multipath_drr.c
@@ -134,7 +134,7 @@ static void drr_select_route(const struct flowi *flp,
134 struct rtable *first, struct rtable **rp) 134 struct rtable *first, struct rtable **rp)
135{ 135{
136 struct rtable *nh, *result, *cur_min; 136 struct rtable *nh, *result, *cur_min;
137 int min_usecount = -1; 137 int min_usecount = -1;
138 int devidx = -1; 138 int devidx = -1;
139 int cur_min_devidx = -1; 139 int cur_min_devidx = -1;
140 140
@@ -143,7 +143,7 @@ static void drr_select_route(const struct flowi *flp,
143 result = NULL; 143 result = NULL;
144 cur_min = NULL; 144 cur_min = NULL;
145 for (nh = rcu_dereference(first); nh; 145 for (nh = rcu_dereference(first); nh;
146 nh = rcu_dereference(nh->u.rt_next)) { 146 nh = rcu_dereference(nh->u.dst.rt_next)) {
147 if ((nh->u.dst.flags & DST_BALANCED) != 0 && 147 if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
148 multipath_comparekeys(&nh->fl, flp)) { 148 multipath_comparekeys(&nh->fl, flp)) {
149 int nh_ifidx = nh->u.dst.dev->ifindex; 149 int nh_ifidx = nh->u.dst.dev->ifindex;
@@ -161,7 +161,7 @@ static void drr_select_route(const struct flowi *flp,
161 */ 161 */
162 devidx = __multipath_finddev(nh_ifidx); 162 devidx = __multipath_finddev(nh_ifidx);
163 if (devidx == -1) { 163 if (devidx == -1) {
164 /* add the interface to the array 164 /* add the interface to the array
165 * SMP safe 165 * SMP safe
166 */ 166 */
167 spin_lock_bh(&state_lock); 167 spin_lock_bh(&state_lock);
diff --git a/net/ipv4/multipath_random.c b/net/ipv4/multipath_random.c
index b8c289f247cb..047e861f06bd 100644
--- a/net/ipv4/multipath_random.c
+++ b/net/ipv4/multipath_random.c
@@ -74,7 +74,7 @@ static void random_select_route(const struct flowi *flp,
74 74
75 /* count all candidate */ 75 /* count all candidate */
76 for (rt = rcu_dereference(first); rt; 76 for (rt = rcu_dereference(first); rt;
77 rt = rcu_dereference(rt->u.rt_next)) { 77 rt = rcu_dereference(rt->u.dst.rt_next)) {
78 if ((rt->u.dst.flags & DST_BALANCED) != 0 && 78 if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
79 multipath_comparekeys(&rt->fl, flp)) 79 multipath_comparekeys(&rt->fl, flp))
80 ++candidate_count; 80 ++candidate_count;
@@ -90,7 +90,7 @@ static void random_select_route(const struct flowi *flp,
90 /* find chosen candidate and adjust GC data for all candidates 90 /* find chosen candidate and adjust GC data for all candidates
91 * to ensure they stay in cache 91 * to ensure they stay in cache
92 */ 92 */
93 for (rt = first; rt; rt = rt->u.rt_next) { 93 for (rt = first; rt; rt = rt->u.dst.rt_next) {
94 if ((rt->u.dst.flags & DST_BALANCED) != 0 && 94 if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
95 multipath_comparekeys(&rt->fl, flp)) { 95 multipath_comparekeys(&rt->fl, flp)) {
96 rt->u.dst.lastuse = jiffies; 96 rt->u.dst.lastuse = jiffies;
diff --git a/net/ipv4/multipath_rr.c b/net/ipv4/multipath_rr.c
index bba5abe5542d..896246d8040e 100644
--- a/net/ipv4/multipath_rr.c
+++ b/net/ipv4/multipath_rr.c
@@ -58,7 +58,7 @@ static void rr_select_route(const struct flowi *flp,
58 */ 58 */
59 result = NULL; 59 result = NULL;
60 for (nh = rcu_dereference(first); nh; 60 for (nh = rcu_dereference(first); nh;
61 nh = rcu_dereference(nh->u.rt_next)) { 61 nh = rcu_dereference(nh->u.dst.rt_next)) {
62 if ((nh->u.dst.flags & DST_BALANCED) != 0 && 62 if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
63 multipath_comparekeys(&nh->fl, flp)) { 63 multipath_comparekeys(&nh->fl, flp)) {
64 nh->u.dst.lastuse = jiffies; 64 nh->u.dst.lastuse = jiffies;
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c
index 92b04823e034..7e22f15d13df 100644
--- a/net/ipv4/multipath_wrandom.c
+++ b/net/ipv4/multipath_wrandom.c
@@ -142,7 +142,7 @@ out:
142 return weight; 142 return weight;
143} 143}
144 144
145static void wrandom_init_state(void) 145static void wrandom_init_state(void)
146{ 146{
147 int i; 147 int i;
148 148
@@ -167,7 +167,7 @@ static void wrandom_select_route(const struct flowi *flp,
167 167
168 /* collect all candidates and identify their weights */ 168 /* collect all candidates and identify their weights */
169 for (rt = rcu_dereference(first); rt; 169 for (rt = rcu_dereference(first); rt;
170 rt = rcu_dereference(rt->u.rt_next)) { 170 rt = rcu_dereference(rt->u.dst.rt_next)) {
171 if ((rt->u.dst.flags & DST_BALANCED) != 0 && 171 if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
172 multipath_comparekeys(&rt->fl, flp)) { 172 multipath_comparekeys(&rt->fl, flp)) {
173 struct multipath_candidate* mpc = 173 struct multipath_candidate* mpc =
@@ -287,7 +287,7 @@ static void __multipath_free(struct rcu_head *head)
287 287
288static void __multipath_free_dst(struct rcu_head *head) 288static void __multipath_free_dst(struct rcu_head *head)
289{ 289{
290 struct multipath_dest *dst = container_of(head, 290 struct multipath_dest *dst = container_of(head,
291 struct multipath_dest, 291 struct multipath_dest,
292 rcu); 292 rcu);
293 kfree(dst); 293 kfree(dst);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c47ce7076bd5..6069a11514f6 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -53,7 +53,7 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type)
53 dst_release(&rt->u.dst); 53 dst_release(&rt->u.dst);
54 dst_release(odst); 54 dst_release(odst);
55 } 55 }
56 56
57 if ((*pskb)->dst->error) 57 if ((*pskb)->dst->error)
58 return -1; 58 return -1;
59 59
@@ -70,7 +70,7 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type)
70 struct sk_buff *nskb; 70 struct sk_buff *nskb;
71 71
72 nskb = skb_realloc_headroom(*pskb, hh_len); 72 nskb = skb_realloc_headroom(*pskb, hh_len);
73 if (!nskb) 73 if (!nskb)
74 return -1; 74 return -1;
75 if ((*pskb)->sk) 75 if ((*pskb)->sk)
76 skb_set_owner_w(nskb, (*pskb)->sk); 76 skb_set_owner_w(nskb, (*pskb)->sk);
@@ -177,7 +177,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
177 break; 177 break;
178 if ((protocol == 0 && !csum_fold(skb->csum)) || 178 if ((protocol == 0 && !csum_fold(skb->csum)) ||
179 !csum_tcpudp_magic(iph->saddr, iph->daddr, 179 !csum_tcpudp_magic(iph->saddr, iph->daddr,
180 skb->len - dataoff, protocol, 180 skb->len - dataoff, protocol,
181 skb->csum)) { 181 skb->csum)) {
182 skb->ip_summed = CHECKSUM_UNNECESSARY; 182 skb->ip_summed = CHECKSUM_UNNECESSARY;
183 break; 183 break;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 9aa22398b3dc..5170f5c75f9d 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -544,7 +544,7 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
544 } 544 }
545 545
546 /* FIXME: underflows must be unconditional, standard verdicts 546 /* FIXME: underflows must be unconditional, standard verdicts
547 < 0 (not ARPT_RETURN). --RR */ 547 < 0 (not ARPT_RETURN). --RR */
548 548
549 /* Clear counters and comefrom */ 549 /* Clear counters and comefrom */
550 e->counters = ((struct xt_counters) { 0, 0 }); 550 e->counters = ((struct xt_counters) { 0, 0 });
@@ -869,8 +869,8 @@ static int do_replace(void __user *user, unsigned int len)
869 /* Update module usage count based on number of rules */ 869 /* Update module usage count based on number of rules */
870 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 870 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
871 oldinfo->number, oldinfo->initial_entries, newinfo->number); 871 oldinfo->number, oldinfo->initial_entries, newinfo->number);
872 if ((oldinfo->number > oldinfo->initial_entries) || 872 if ((oldinfo->number > oldinfo->initial_entries) ||
873 (newinfo->number <= oldinfo->initial_entries)) 873 (newinfo->number <= oldinfo->initial_entries))
874 module_put(t->me); 874 module_put(t->me);
875 if ((oldinfo->number > oldinfo->initial_entries) && 875 if ((oldinfo->number > oldinfo->initial_entries) &&
876 (newinfo->number <= oldinfo->initial_entries)) 876 (newinfo->number <= oldinfo->initial_entries))
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index d12b1df252a1..709db4d3f48f 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -67,7 +67,7 @@ target(struct sk_buff **pskb,
67 67
68static int 68static int
69checkentry(const char *tablename, const void *e, const struct xt_target *target, 69checkentry(const char *tablename, const void *e, const struct xt_target *target,
70 void *targinfo, unsigned int hook_mask) 70 void *targinfo, unsigned int hook_mask)
71{ 71{
72 const struct arpt_mangle *mangle = targinfo; 72 const struct arpt_mangle *mangle = targinfo;
73 73
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c
index ad246ba7790b..4f561f52c83a 100644
--- a/net/ipv4/netfilter/ip_conntrack_amanda.c
+++ b/net/ipv4/netfilter/ip_conntrack_amanda.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * Module load syntax: 10 * Module load syntax:
11 * insmod ip_conntrack_amanda.o [master_timeout=n] 11 * insmod ip_conntrack_amanda.o [master_timeout=n]
12 * 12 *
13 * Where master_timeout is the timeout (in seconds) of the master 13 * Where master_timeout is the timeout (in seconds) of the master
14 * connection (port 10080). This defaults to 5 minutes but if 14 * connection (port 10080). This defaults to 5 minutes but if
15 * your clients take longer than 5 minutes to do their work 15 * your clients take longer than 5 minutes to do their work
@@ -84,7 +84,7 @@ static struct {
84}; 84};
85 85
86static int help(struct sk_buff **pskb, 86static int help(struct sk_buff **pskb,
87 struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) 87 struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
88{ 88{
89 struct ts_state ts; 89 struct ts_state ts;
90 struct ip_conntrack_expect *exp; 90 struct ip_conntrack_expect *exp;
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 62be2eb37698..04e466d53c0b 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -2,7 +2,7 @@
2 but required by, the NAT layer; it can also be used by an iptables 2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */ 3 extension. */
4 4
5/* (C) 1999-2001 Paul `Rusty' Russell 5/* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> 6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -99,7 +99,7 @@ __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
99void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) 99void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
100{ 100{
101 struct ip_conntrack_ecache *ecache; 101 struct ip_conntrack_ecache *ecache;
102 102
103 local_bh_disable(); 103 local_bh_disable();
104 ecache = &__get_cpu_var(ip_conntrack_ecache); 104 ecache = &__get_cpu_var(ip_conntrack_ecache);
105 if (ecache->ct == ct) 105 if (ecache->ct == ct)
@@ -147,9 +147,9 @@ static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
147 unsigned int size, unsigned int rnd) 147 unsigned int size, unsigned int rnd)
148{ 148{
149 return (jhash_3words((__force u32)tuple->src.ip, 149 return (jhash_3words((__force u32)tuple->src.ip,
150 ((__force u32)tuple->dst.ip ^ tuple->dst.protonum), 150 ((__force u32)tuple->dst.ip ^ tuple->dst.protonum),
151 (tuple->src.u.all | (tuple->dst.u.all << 16)), 151 (tuple->src.u.all | (tuple->dst.u.all << 16)),
152 rnd) % size); 152 rnd) % size);
153} 153}
154 154
155static u_int32_t 155static u_int32_t
@@ -219,7 +219,7 @@ struct ip_conntrack_expect *
219__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) 219__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
220{ 220{
221 struct ip_conntrack_expect *i; 221 struct ip_conntrack_expect *i;
222 222
223 list_for_each_entry(i, &ip_conntrack_expect_list, list) { 223 list_for_each_entry(i, &ip_conntrack_expect_list, list) {
224 if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) 224 if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
225 return i; 225 return i;
@@ -232,7 +232,7 @@ struct ip_conntrack_expect *
232ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple) 232ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
233{ 233{
234 struct ip_conntrack_expect *i; 234 struct ip_conntrack_expect *i;
235 235
236 read_lock_bh(&ip_conntrack_lock); 236 read_lock_bh(&ip_conntrack_lock);
237 i = __ip_conntrack_expect_find(tuple); 237 i = __ip_conntrack_expect_find(tuple);
238 if (i) 238 if (i)
@@ -398,7 +398,7 @@ ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
398 398
399static void __ip_conntrack_hash_insert(struct ip_conntrack *ct, 399static void __ip_conntrack_hash_insert(struct ip_conntrack *ct,
400 unsigned int hash, 400 unsigned int hash,
401 unsigned int repl_hash) 401 unsigned int repl_hash)
402{ 402{
403 ct->id = ++ip_conntrack_next_id; 403 ct->id = ++ip_conntrack_next_id;
404 list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, 404 list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list,
@@ -446,15 +446,15 @@ __ip_conntrack_confirm(struct sk_buff **pskb)
446 /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ 446 /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
447 447
448 /* No external references means noone else could have 448 /* No external references means noone else could have
449 confirmed us. */ 449 confirmed us. */
450 IP_NF_ASSERT(!is_confirmed(ct)); 450 IP_NF_ASSERT(!is_confirmed(ct));
451 DEBUGP("Confirming conntrack %p\n", ct); 451 DEBUGP("Confirming conntrack %p\n", ct);
452 452
453 write_lock_bh(&ip_conntrack_lock); 453 write_lock_bh(&ip_conntrack_lock);
454 454
455 /* See if there's one in the list already, including reverse: 455 /* See if there's one in the list already, including reverse:
456 NAT could have grabbed it without realizing, since we're 456 NAT could have grabbed it without realizing, since we're
457 not in the hash. If there is, we lost race. */ 457 not in the hash. If there is, we lost race. */
458 list_for_each_entry(h, &ip_conntrack_hash[hash], list) 458 list_for_each_entry(h, &ip_conntrack_hash[hash], list)
459 if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 459 if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
460 &h->tuple)) 460 &h->tuple))
@@ -602,7 +602,7 @@ ip_conntrack_proto_find_get(u_int8_t protocol)
602 p = &ip_conntrack_generic_protocol; 602 p = &ip_conntrack_generic_protocol;
603 } 603 }
604 preempt_enable(); 604 preempt_enable();
605 605
606 return p; 606 return p;
607} 607}
608 608
@@ -745,7 +745,7 @@ resolve_normal_ct(struct sk_buff *skb,
745 745
746 IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0); 746 IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);
747 747
748 if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, 748 if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4,
749 &tuple,proto)) 749 &tuple,proto))
750 return NULL; 750 return NULL;
751 751
@@ -770,7 +770,7 @@ resolve_normal_ct(struct sk_buff *skb,
770 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 770 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
771 DEBUGP("ip_conntrack_in: normal packet for %p\n", 771 DEBUGP("ip_conntrack_in: normal packet for %p\n",
772 ct); 772 ct);
773 *ctinfo = IP_CT_ESTABLISHED; 773 *ctinfo = IP_CT_ESTABLISHED;
774 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 774 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
775 DEBUGP("ip_conntrack_in: related packet for %p\n", 775 DEBUGP("ip_conntrack_in: related packet for %p\n",
776 ct); 776 ct);
@@ -821,7 +821,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
821 if ((*pskb)->pkt_type == PACKET_BROADCAST) { 821 if ((*pskb)->pkt_type == PACKET_BROADCAST) {
822 printk("Broadcast packet!\n"); 822 printk("Broadcast packet!\n");
823 return NF_ACCEPT; 823 return NF_ACCEPT;
824 } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) 824 } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
825 == htonl(0x000000FF)) { 825 == htonl(0x000000FF)) {
826 printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n", 826 printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",
827 NIPQUAD((*pskb)->nh.iph->saddr), 827 NIPQUAD((*pskb)->nh.iph->saddr),
@@ -835,7 +835,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
835 /* It may be an special packet, error, unclean... 835 /* It may be an special packet, error, unclean...
836 * inverse of the return code tells to the netfilter 836 * inverse of the return code tells to the netfilter
837 * core what to do with the packet. */ 837 * core what to do with the packet. */
838 if (proto->error != NULL 838 if (proto->error != NULL
839 && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) { 839 && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) {
840 CONNTRACK_STAT_INC(error); 840 CONNTRACK_STAT_INC(error);
841 CONNTRACK_STAT_INC(invalid); 841 CONNTRACK_STAT_INC(invalid);
@@ -875,7 +875,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
875int invert_tuplepr(struct ip_conntrack_tuple *inverse, 875int invert_tuplepr(struct ip_conntrack_tuple *inverse,
876 const struct ip_conntrack_tuple *orig) 876 const struct ip_conntrack_tuple *orig)
877{ 877{
878 return ip_ct_invert_tuple(inverse, orig, 878 return ip_ct_invert_tuple(inverse, orig,
879 __ip_conntrack_proto_find(orig->dst.protonum)); 879 __ip_conntrack_proto_find(orig->dst.protonum));
880} 880}
881 881
@@ -884,7 +884,7 @@ static inline int expect_clash(const struct ip_conntrack_expect *a,
884 const struct ip_conntrack_expect *b) 884 const struct ip_conntrack_expect *b)
885{ 885{
886 /* Part covered by intersection of masks must be unequal, 886 /* Part covered by intersection of masks must be unequal,
887 otherwise they clash */ 887 otherwise they clash */
888 struct ip_conntrack_tuple intersect_mask 888 struct ip_conntrack_tuple intersect_mask
889 = { { a->mask.src.ip & b->mask.src.ip, 889 = { { a->mask.src.ip & b->mask.src.ip,
890 { a->mask.src.u.all & b->mask.src.u.all } }, 890 { a->mask.src.u.all & b->mask.src.u.all } },
@@ -922,7 +922,7 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp)
922} 922}
923 923
924/* We don't increase the master conntrack refcount for non-fulfilled 924/* We don't increase the master conntrack refcount for non-fulfilled
925 * conntracks. During the conntrack destruction, the expectations are 925 * conntracks. During the conntrack destruction, the expectations are
926 * always killed before the conntrack itself */ 926 * always killed before the conntrack itself */
927struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me) 927struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me)
928{ 928{
@@ -1011,7 +1011,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
1011 } 1011 }
1012 1012
1013 /* Will be over limit? */ 1013 /* Will be over limit? */
1014 if (expect->master->helper->max_expected && 1014 if (expect->master->helper->max_expected &&
1015 expect->master->expecting >= expect->master->helper->max_expected) 1015 expect->master->expecting >= expect->master->helper->max_expected)
1016 evict_oldest_expect(expect->master); 1016 evict_oldest_expect(expect->master);
1017 1017
@@ -1020,7 +1020,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
1020 ret = 0; 1020 ret = 0;
1021out: 1021out:
1022 write_unlock_bh(&ip_conntrack_lock); 1022 write_unlock_bh(&ip_conntrack_lock);
1023 return ret; 1023 return ret;
1024} 1024}
1025 1025
1026/* Alter reply tuple (maybe alter helper). This is for NAT, and is 1026/* Alter reply tuple (maybe alter helper). This is for NAT, and is
@@ -1068,7 +1068,7 @@ static inline void unhelp(struct ip_conntrack_tuple_hash *i,
1068 const struct ip_conntrack_helper *me) 1068 const struct ip_conntrack_helper *me)
1069{ 1069{
1070 if (tuplehash_to_ctrack(i)->helper == me) { 1070 if (tuplehash_to_ctrack(i)->helper == me) {
1071 ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i)); 1071 ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i));
1072 tuplehash_to_ctrack(i)->helper = NULL; 1072 tuplehash_to_ctrack(i)->helper = NULL;
1073 } 1073 }
1074} 1074}
@@ -1104,8 +1104,8 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
1104} 1104}
1105 1105
1106/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ 1106/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1107void __ip_ct_refresh_acct(struct ip_conntrack *ct, 1107void __ip_ct_refresh_acct(struct ip_conntrack *ct,
1108 enum ip_conntrack_info ctinfo, 1108 enum ip_conntrack_info ctinfo,
1109 const struct sk_buff *skb, 1109 const struct sk_buff *skb,
1110 unsigned long extra_jiffies, 1110 unsigned long extra_jiffies,
1111 int do_acct) 1111 int do_acct)
@@ -1139,7 +1139,7 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct,
1139#ifdef CONFIG_IP_NF_CT_ACCT 1139#ifdef CONFIG_IP_NF_CT_ACCT
1140 if (do_acct) { 1140 if (do_acct) {
1141 ct->counters[CTINFO2DIR(ctinfo)].packets++; 1141 ct->counters[CTINFO2DIR(ctinfo)].packets++;
1142 ct->counters[CTINFO2DIR(ctinfo)].bytes += 1142 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
1143 ntohs(skb->nh.iph->tot_len); 1143 ntohs(skb->nh.iph->tot_len);
1144 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) 1144 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
1145 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) 1145 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
@@ -1193,7 +1193,7 @@ ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
1193{ 1193{
1194 skb_orphan(skb); 1194 skb_orphan(skb);
1195 1195
1196 local_bh_disable(); 1196 local_bh_disable();
1197 skb = ip_defrag(skb, user); 1197 skb = ip_defrag(skb, user);
1198 local_bh_enable(); 1198 local_bh_enable();
1199 1199
@@ -1210,7 +1210,7 @@ static void ip_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1210 1210
1211 /* This ICMP is in reverse direction to the packet which caused it */ 1211 /* This ICMP is in reverse direction to the packet which caused it */
1212 ct = ip_conntrack_get(skb, &ctinfo); 1212 ct = ip_conntrack_get(skb, &ctinfo);
1213 1213
1214 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 1214 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1215 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 1215 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
1216 else 1216 else
@@ -1278,7 +1278,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
1278 struct inet_sock *inet = inet_sk(sk); 1278 struct inet_sock *inet = inet_sk(sk);
1279 struct ip_conntrack_tuple_hash *h; 1279 struct ip_conntrack_tuple_hash *h;
1280 struct ip_conntrack_tuple tuple; 1280 struct ip_conntrack_tuple tuple;
1281 1281
1282 IP_CT_TUPLE_U_BLANK(&tuple); 1282 IP_CT_TUPLE_U_BLANK(&tuple);
1283 tuple.src.ip = inet->rcv_saddr; 1283 tuple.src.ip = inet->rcv_saddr;
1284 tuple.src.u.tcp.port = inet->sport; 1284 tuple.src.u.tcp.port = inet->sport;
@@ -1346,7 +1346,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
1346 if (vmalloced) 1346 if (vmalloced)
1347 vfree(hash); 1347 vfree(hash);
1348 else 1348 else
1349 free_pages((unsigned long)hash, 1349 free_pages((unsigned long)hash,
1350 get_order(sizeof(struct list_head) * size)); 1350 get_order(sizeof(struct list_head) * size));
1351} 1351}
1352 1352
@@ -1357,8 +1357,8 @@ void ip_conntrack_cleanup(void)
1357 ip_ct_attach = NULL; 1357 ip_ct_attach = NULL;
1358 1358
1359 /* This makes sure all current packets have passed through 1359 /* This makes sure all current packets have passed through
1360 netfilter framework. Roll on, two-stage module 1360 netfilter framework. Roll on, two-stage module
1361 delete... */ 1361 delete... */
1362 synchronize_net(); 1362 synchronize_net();
1363 1363
1364 ip_ct_event_cache_flush(); 1364 ip_ct_event_cache_flush();
@@ -1384,11 +1384,11 @@ static struct list_head *alloc_hashtable(int size, int *vmalloced)
1384 struct list_head *hash; 1384 struct list_head *hash;
1385 unsigned int i; 1385 unsigned int i;
1386 1386
1387 *vmalloced = 0; 1387 *vmalloced = 0;
1388 hash = (void*)__get_free_pages(GFP_KERNEL, 1388 hash = (void*)__get_free_pages(GFP_KERNEL,
1389 get_order(sizeof(struct list_head) 1389 get_order(sizeof(struct list_head)
1390 * size)); 1390 * size));
1391 if (!hash) { 1391 if (!hash) {
1392 *vmalloced = 1; 1392 *vmalloced = 1;
1393 printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); 1393 printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
1394 hash = vmalloc(sizeof(struct list_head) * size); 1394 hash = vmalloc(sizeof(struct list_head) * size);
@@ -1421,7 +1421,7 @@ static int set_hashsize(const char *val, struct kernel_param *kp)
1421 if (!hash) 1421 if (!hash)
1422 return -ENOMEM; 1422 return -ENOMEM;
1423 1423
1424 /* We have to rehash for the new table anyway, so we also can 1424 /* We have to rehash for the new table anyway, so we also can
1425 * use a new random seed */ 1425 * use a new random seed */
1426 get_random_bytes(&rnd, 4); 1426 get_random_bytes(&rnd, 4);
1427 1427
@@ -1459,7 +1459,7 @@ int __init ip_conntrack_init(void)
1459 1459
1460 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1460 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1461 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ 1461 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
1462 if (!ip_conntrack_htable_size) { 1462 if (!ip_conntrack_htable_size) {
1463 ip_conntrack_htable_size 1463 ip_conntrack_htable_size
1464 = (((num_physpages << PAGE_SHIFT) / 16384) 1464 = (((num_physpages << PAGE_SHIFT) / 16384)
1465 / sizeof(struct list_head)); 1465 / sizeof(struct list_head));
@@ -1489,8 +1489,8 @@ int __init ip_conntrack_init(void)
1489 } 1489 }
1490 1490
1491 ip_conntrack_cachep = kmem_cache_create("ip_conntrack", 1491 ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
1492 sizeof(struct ip_conntrack), 0, 1492 sizeof(struct ip_conntrack), 0,
1493 0, NULL, NULL); 1493 0, NULL, NULL);
1494 if (!ip_conntrack_cachep) { 1494 if (!ip_conntrack_cachep) {
1495 printk(KERN_ERR "Unable to create ip_conntrack slab cache\n"); 1495 printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
1496 goto err_free_hash; 1496 goto err_free_hash;
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c
index 0410c99cacae..1faa68ab9432 100644
--- a/net/ipv4/netfilter/ip_conntrack_ftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_ftp.c
@@ -1,6 +1,6 @@
1/* FTP extension for IP connection tracking. */ 1/* FTP extension for IP connection tracking. */
2 2
3/* (C) 1999-2001 Paul `Rusty' Russell 3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> 4 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
@@ -169,7 +169,7 @@ static int try_eprt(const char *data, size_t dlen, u_int32_t array[6],
169 int length; 169 int length;
170 170
171 /* First character is delimiter, then "1" for IPv4, then 171 /* First character is delimiter, then "1" for IPv4, then
172 delimiter again. */ 172 delimiter again. */
173 if (dlen <= 3) return 0; 173 if (dlen <= 3) return 0;
174 delim = data[0]; 174 delim = data[0];
175 if (isdigit(delim) || delim < 33 || delim > 126 175 if (isdigit(delim) || delim < 33 || delim > 126
@@ -344,14 +344,14 @@ static int help(struct sk_buff **pskb,
344 if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { 344 if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
345 /* Now if this ends in \n, update ftp info. */ 345 /* Now if this ends in \n, update ftp info. */
346 DEBUGP("ip_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n", 346 DEBUGP("ip_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n",
347 ct_ftp_info->seq_aft_nl[0][dir] 347 ct_ftp_info->seq_aft_nl[0][dir]
348 old_seq_aft_nl_set ? "":"(UNSET) ", old_seq_aft_nl); 348 old_seq_aft_nl_set ? "":"(UNSET) ", old_seq_aft_nl);
349 ret = NF_ACCEPT; 349 ret = NF_ACCEPT;
350 goto out_update_nl; 350 goto out_update_nl;
351 } 351 }
352 352
353 /* Initialize IP array to expected address (it's not mentioned 353 /* Initialize IP array to expected address (it's not mentioned
354 in EPSV responses) */ 354 in EPSV responses) */
355 array[0] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 24) & 0xFF; 355 array[0] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 24) & 0xFF;
356 array[1] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 16) & 0xFF; 356 array[1] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 16) & 0xFF;
357 array[2] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 8) & 0xFF; 357 array[2] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 8) & 0xFF;
@@ -386,7 +386,7 @@ static int help(struct sk_buff **pskb,
386 386
387 DEBUGP("conntrack_ftp: match `%s' (%u bytes at %u)\n", 387 DEBUGP("conntrack_ftp: match `%s' (%u bytes at %u)\n",
388 fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff); 388 fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff);
389 389
390 /* Allocate expectation which will be inserted */ 390 /* Allocate expectation which will be inserted */
391 exp = ip_conntrack_expect_alloc(ct); 391 exp = ip_conntrack_expect_alloc(ct);
392 if (exp == NULL) { 392 if (exp == NULL) {
@@ -504,7 +504,7 @@ static int __init ip_conntrack_ftp_init(void)
504 sprintf(tmpname, "ftp-%d", ports[i]); 504 sprintf(tmpname, "ftp-%d", ports[i]);
505 ftp[i].name = tmpname; 505 ftp[i].name = tmpname;
506 506
507 DEBUGP("ip_ct_ftp: registering helper for port %d\n", 507 DEBUGP("ip_ct_ftp: registering helper for port %d\n",
508 ports[i]); 508 ports[i]);
509 ret = ip_conntrack_helper_register(&ftp[i]); 509 ret = ip_conntrack_helper_register(&ftp[i]);
510 510
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index aabfe1c06905..53eb365ccc7e 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
42static int callforward_filter = 1; 42static int callforward_filter = 1;
43module_param(callforward_filter, bool, 0600); 43module_param(callforward_filter, bool, 0600);
44MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " 44MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
45 "if both endpoints are on different sides " 45 "if both endpoints are on different sides "
46 "(determined by routing information)"); 46 "(determined by routing information)");
47 47
48/* Hooks for NAT */ 48/* Hooks for NAT */
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
index 4d19373bbf0d..2b760c5cf709 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -560,7 +560,7 @@ conntrack_pptp_help(struct sk_buff **pskb,
560 tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); 560 tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph);
561 BUG_ON(!tcph); 561 BUG_ON(!tcph);
562 nexthdr_off += tcph->doff * 4; 562 nexthdr_off += tcph->doff * 4;
563 datalen = tcplen - tcph->doff * 4; 563 datalen = tcplen - tcph->doff * 4;
564 564
565 pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); 565 pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph);
566 if (!pptph) { 566 if (!pptph) {
@@ -624,7 +624,7 @@ static struct ip_conntrack_helper pptp = {
624 .max_expected = 2, 624 .max_expected = 2,
625 .timeout = 5 * 60, 625 .timeout = 5 * 60,
626 .tuple = { .src = { .ip = 0, 626 .tuple = { .src = { .ip = 0,
627 .u = { .tcp = { .port = 627 .u = { .tcp = { .port =
628 __constant_htons(PPTP_CONTROL_PORT) } } 628 __constant_htons(PPTP_CONTROL_PORT) } }
629 }, 629 },
630 .dst = { .ip = 0, 630 .dst = { .ip = 0,
@@ -638,7 +638,7 @@ static struct ip_conntrack_helper pptp = {
638 .dst = { .ip = 0, 638 .dst = { .ip = 0,
639 .u = { .all = 0 }, 639 .u = { .all = 0 },
640 .protonum = 0xff 640 .protonum = 0xff
641 } 641 }
642 }, 642 },
643 .help = conntrack_pptp_help, 643 .help = conntrack_pptp_help,
644 .destroy = pptp_destroy_siblings, 644 .destroy = pptp_destroy_siblings,
diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c
index 91832eca4106..053e591f407a 100644
--- a/net/ipv4/netfilter/ip_conntrack_irc.c
+++ b/net/ipv4/netfilter/ip_conntrack_irc.c
@@ -1,6 +1,6 @@
1/* IRC extension for IP connection tracking, Version 1.21 1/* IRC extension for IP connection tracking, Version 1.21
2 * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org> 2 * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org>
3 * based on RR's ip_conntrack_ftp.c 3 * based on RR's ip_conntrack_ftp.c
4 * 4 *
5 * ip_conntrack_irc.c,v 1.21 2002/02/05 14:49:26 laforge Exp 5 * ip_conntrack_irc.c,v 1.21 2002/02/05 14:49:26 laforge Exp
6 * 6 *
@@ -12,12 +12,12 @@
12 * Module load syntax: 12 * Module load syntax:
13 * insmod ip_conntrack_irc.o ports=port1,port2,...port<MAX_PORTS> 13 * insmod ip_conntrack_irc.o ports=port1,port2,...port<MAX_PORTS>
14 * max_dcc_channels=n dcc_timeout=secs 14 * max_dcc_channels=n dcc_timeout=secs
15 * 15 *
16 * please give the ports of all IRC servers You wish to connect to. 16 * please give the ports of all IRC servers You wish to connect to.
17 * If You don't specify ports, the default will be port 6667. 17 * If You don't specify ports, the default will be port 6667.
18 * With max_dcc_channels you can define the maximum number of not 18 * With max_dcc_channels you can define the maximum number of not
19 * yet answered DCC channels per IRC session (default 8). 19 * yet answered DCC channels per IRC session (default 8).
20 * With dcc_timeout you can specify how long the system waits for 20 * With dcc_timeout you can specify how long the system waits for
21 * an expected DCC channel (default 300 seconds). 21 * an expected DCC channel (default 300 seconds).
22 * 22 *
23 */ 23 */
@@ -63,7 +63,7 @@ static const char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
63 63
64#if 0 64#if 0
65#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \ 65#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \
66 __FILE__, __FUNCTION__ , ## args) 66 __FILE__, __FUNCTION__ , ## args)
67#else 67#else
68#define DEBUGP(format, args...) 68#define DEBUGP(format, args...)
69#endif 69#endif
@@ -71,7 +71,7 @@ static const char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
71static int parse_dcc(char *data, char *data_end, u_int32_t *ip, 71static int parse_dcc(char *data, char *data_end, u_int32_t *ip,
72 u_int16_t *port, char **ad_beg_p, char **ad_end_p) 72 u_int16_t *port, char **ad_beg_p, char **ad_end_p)
73/* tries to get the ip_addr and port out of a dcc command 73/* tries to get the ip_addr and port out of a dcc command
74 return value: -1 on failure, 0 on success 74 return value: -1 on failure, 0 on success
75 data pointer to first byte of DCC command data 75 data pointer to first byte of DCC command data
76 data_end pointer to last byte of dcc command data 76 data_end pointer to last byte of dcc command data
77 ip returns parsed ip of dcc command 77 ip returns parsed ip of dcc command
@@ -90,7 +90,7 @@ static int parse_dcc(char *data, char *data_end, u_int32_t *ip,
90 90
91 /* skip blanks between ip and port */ 91 /* skip blanks between ip and port */
92 while (*data == ' ') { 92 while (*data == ' ') {
93 if (data >= data_end) 93 if (data >= data_end)
94 return -1; 94 return -1;
95 data++; 95 data++;
96 } 96 }
@@ -171,7 +171,7 @@ static int help(struct sk_buff **pskb,
171 171
172 DEBUGP("DCC %s detected\n", dccprotos[i]); 172 DEBUGP("DCC %s detected\n", dccprotos[i]);
173 data += strlen(dccprotos[i]); 173 data += strlen(dccprotos[i]);
174 /* we have at least 174 /* we have at least
175 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid 175 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
176 * data left (== 14/13 bytes) */ 176 * data left (== 14/13 bytes) */
177 if (parse_dcc((char *)data, data_limit, &dcc_ip, 177 if (parse_dcc((char *)data, data_limit, &dcc_ip,
@@ -260,7 +260,7 @@ static int __init ip_conntrack_irc_init(void)
260 irc_buffer = kmalloc(65536, GFP_KERNEL); 260 irc_buffer = kmalloc(65536, GFP_KERNEL);
261 if (!irc_buffer) 261 if (!irc_buffer)
262 return -ENOMEM; 262 return -ENOMEM;
263 263
264 /* If no port given, default to standard irc port */ 264 /* If no port given, default to standard irc port */
265 if (ports_c == 0) 265 if (ports_c == 0)
266 ports[ports_c++] = IRC_PORT; 266 ports[ports_c++] = IRC_PORT;
@@ -297,7 +297,7 @@ static int __init ip_conntrack_irc_init(void)
297 return 0; 297 return 0;
298} 298}
299 299
300/* This function is intentionally _NOT_ defined as __exit, because 300/* This function is intentionally _NOT_ defined as __exit, because
301 * it is needed by the init function */ 301 * it is needed by the init function */
302static void ip_conntrack_irc_fini(void) 302static void ip_conntrack_irc_fini(void)
303{ 303{
diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
index a1d6a89f64aa..cc6dd49c9da0 100644
--- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
+++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
@@ -42,7 +42,7 @@ module_param(timeout, uint, 0400);
42MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); 42MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
43 43
44static int help(struct sk_buff **pskb, 44static int help(struct sk_buff **pskb,
45 struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) 45 struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
46{ 46{
47 struct ip_conntrack_expect *exp; 47 struct ip_conntrack_expect *exp;
48 struct iphdr *iph = (*pskb)->nh.iph; 48 struct iphdr *iph = (*pskb)->nh.iph;
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 7f70b0886b83..9228b76ccd9a 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -6,10 +6,10 @@
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net> 7 * (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net>
8 * 8 *
9 * I've reworked this stuff to use attributes instead of conntrack 9 * I've reworked this stuff to use attributes instead of conntrack
10 * structures. 5.44 am. I need more tea. --pablo 05/07/11. 10 * structures. 5.44 am. I need more tea. --pablo 05/07/11.
11 * 11 *
12 * Initial connection tracking via netlink development funded and 12 * Initial connection tracking via netlink development funded and
13 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 13 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
14 * 14 *
15 * Further development of this code funded by Astaro AG (http://www.astaro.com) 15 * Further development of this code funded by Astaro AG (http://www.astaro.com)
@@ -45,7 +45,7 @@ MODULE_LICENSE("GPL");
45static char __initdata version[] = "0.90"; 45static char __initdata version[] = "0.90";
46 46
47static inline int 47static inline int
48ctnetlink_dump_tuples_proto(struct sk_buff *skb, 48ctnetlink_dump_tuples_proto(struct sk_buff *skb,
49 const struct ip_conntrack_tuple *tuple, 49 const struct ip_conntrack_tuple *tuple,
50 struct ip_conntrack_protocol *proto) 50 struct ip_conntrack_protocol *proto)
51{ 51{
@@ -56,7 +56,7 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb,
56 56
57 if (likely(proto->tuple_to_nfattr)) 57 if (likely(proto->tuple_to_nfattr))
58 ret = proto->tuple_to_nfattr(skb, tuple); 58 ret = proto->tuple_to_nfattr(skb, tuple);
59 59
60 NFA_NEST_END(skb, nest_parms); 60 NFA_NEST_END(skb, nest_parms);
61 61
62 return ret; 62 return ret;
@@ -70,7 +70,7 @@ ctnetlink_dump_tuples_ip(struct sk_buff *skb,
70 const struct ip_conntrack_tuple *tuple) 70 const struct ip_conntrack_tuple *tuple)
71{ 71{
72 struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP); 72 struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP);
73 73
74 NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(__be32), &tuple->src.ip); 74 NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(__be32), &tuple->src.ip);
75 NFA_PUT(skb, CTA_IP_V4_DST, sizeof(__be32), &tuple->dst.ip); 75 NFA_PUT(skb, CTA_IP_V4_DST, sizeof(__be32), &tuple->dst.ip);
76 76
@@ -121,7 +121,7 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct ip_conntrack *ct)
121 timeout = 0; 121 timeout = 0;
122 else 122 else
123 timeout = htonl(timeout_l / HZ); 123 timeout = htonl(timeout_l / HZ);
124 124
125 NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout); 125 NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout);
126 return 0; 126 return 0;
127 127
@@ -141,7 +141,7 @@ ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct ip_conntrack *ct)
141 ip_conntrack_proto_put(proto); 141 ip_conntrack_proto_put(proto);
142 return 0; 142 return 0;
143 } 143 }
144 144
145 nest_proto = NFA_NEST(skb, CTA_PROTOINFO); 145 nest_proto = NFA_NEST(skb, CTA_PROTOINFO);
146 146
147 ret = proto->to_nfattr(skb, nest_proto, ct); 147 ret = proto->to_nfattr(skb, nest_proto, ct);
@@ -164,7 +164,7 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct ip_conntrack *ct)
164 164
165 if (!ct->helper) 165 if (!ct->helper)
166 return 0; 166 return 0;
167 167
168 nest_helper = NFA_NEST(skb, CTA_HELP); 168 nest_helper = NFA_NEST(skb, CTA_HELP);
169 NFA_PUT(skb, CTA_HELP_NAME, strlen(ct->helper->name), ct->helper->name); 169 NFA_PUT(skb, CTA_HELP_NAME, strlen(ct->helper->name), ct->helper->name);
170 170
@@ -236,7 +236,7 @@ static inline int
236ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct) 236ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct)
237{ 237{
238 __be32 use = htonl(atomic_read(&ct->ct_general.use)); 238 __be32 use = htonl(atomic_read(&ct->ct_general.use));
239 239
240 NFA_PUT(skb, CTA_USE, sizeof(__be32), &use); 240 NFA_PUT(skb, CTA_USE, sizeof(__be32), &use);
241 return 0; 241 return 0;
242 242
@@ -248,7 +248,7 @@ nfattr_failure:
248 248
249static int 249static int
250ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 250ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
251 int event, int nowait, 251 int event, int nowait,
252 const struct ip_conntrack *ct) 252 const struct ip_conntrack *ct)
253{ 253{
254 struct nlmsghdr *nlh; 254 struct nlmsghdr *nlh;
@@ -271,7 +271,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
271 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 271 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
272 goto nfattr_failure; 272 goto nfattr_failure;
273 NFA_NEST_END(skb, nest_parms); 273 NFA_NEST_END(skb, nest_parms);
274 274
275 nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); 275 nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY);
276 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) 276 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
277 goto nfattr_failure; 277 goto nfattr_failure;
@@ -299,7 +299,7 @@ nfattr_failure:
299 299
300#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS 300#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
301static int ctnetlink_conntrack_event(struct notifier_block *this, 301static int ctnetlink_conntrack_event(struct notifier_block *this,
302 unsigned long events, void *ptr) 302 unsigned long events, void *ptr)
303{ 303{
304 struct nlmsghdr *nlh; 304 struct nlmsghdr *nlh;
305 struct nfgenmsg *nfmsg; 305 struct nfgenmsg *nfmsg;
@@ -324,7 +324,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
324 } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { 324 } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) {
325 type = IPCTNL_MSG_CT_NEW; 325 type = IPCTNL_MSG_CT_NEW;
326 group = NFNLGRP_CONNTRACK_UPDATE; 326 group = NFNLGRP_CONNTRACK_UPDATE;
327 } else 327 } else
328 return NOTIFY_DONE; 328 return NOTIFY_DONE;
329 329
330 if (!nfnetlink_has_listeners(group)) 330 if (!nfnetlink_has_listeners(group))
@@ -349,7 +349,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
349 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 349 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
350 goto nfattr_failure; 350 goto nfattr_failure;
351 NFA_NEST_END(skb, nest_parms); 351 NFA_NEST_END(skb, nest_parms);
352 352
353 nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); 353 nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY);
354 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) 354 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
355 goto nfattr_failure; 355 goto nfattr_failure;
@@ -368,16 +368,16 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
368 368
369 if (events & IPCT_PROTOINFO 369 if (events & IPCT_PROTOINFO
370 && ctnetlink_dump_protoinfo(skb, ct) < 0) 370 && ctnetlink_dump_protoinfo(skb, ct) < 0)
371 goto nfattr_failure; 371 goto nfattr_failure;
372 372
373 if ((events & IPCT_HELPER || ct->helper) 373 if ((events & IPCT_HELPER || ct->helper)
374 && ctnetlink_dump_helpinfo(skb, ct) < 0) 374 && ctnetlink_dump_helpinfo(skb, ct) < 0)
375 goto nfattr_failure; 375 goto nfattr_failure;
376 376
377#ifdef CONFIG_IP_NF_CONNTRACK_MARK 377#ifdef CONFIG_IP_NF_CONNTRACK_MARK
378 if ((events & IPCT_MARK || ct->mark) 378 if ((events & IPCT_MARK || ct->mark)
379 && ctnetlink_dump_mark(skb, ct) < 0) 379 && ctnetlink_dump_mark(skb, ct) < 0)
380 goto nfattr_failure; 380 goto nfattr_failure;
381#endif 381#endif
382 382
383 if (events & IPCT_COUNTER_FILLING && 383 if (events & IPCT_COUNTER_FILLING &&
@@ -426,7 +426,7 @@ restart:
426 cb->args[1] = 0; 426 cb->args[1] = 0;
427 } 427 }
428 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 428 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
429 cb->nlh->nlmsg_seq, 429 cb->nlh->nlmsg_seq,
430 IPCTNL_MSG_CT_NEW, 430 IPCTNL_MSG_CT_NEW,
431 1, ct) < 0) { 431 1, ct) < 0) {
432 nf_conntrack_get(&ct->ct_general); 432 nf_conntrack_get(&ct->ct_general);
@@ -488,7 +488,7 @@ static const size_t cta_min_proto[CTA_PROTO_MAX] = {
488}; 488};
489 489
490static inline int 490static inline int
491ctnetlink_parse_tuple_proto(struct nfattr *attr, 491ctnetlink_parse_tuple_proto(struct nfattr *attr,
492 struct ip_conntrack_tuple *tuple) 492 struct ip_conntrack_tuple *tuple)
493{ 493{
494 struct nfattr *tb[CTA_PROTO_MAX]; 494 struct nfattr *tb[CTA_PROTO_MAX];
@@ -508,9 +508,9 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
508 508
509 if (likely(proto->nfattr_to_tuple)) 509 if (likely(proto->nfattr_to_tuple))
510 ret = proto->nfattr_to_tuple(tb, tuple); 510 ret = proto->nfattr_to_tuple(tb, tuple);
511 511
512 ip_conntrack_proto_put(proto); 512 ip_conntrack_proto_put(proto);
513 513
514 return ret; 514 return ret;
515} 515}
516 516
@@ -595,7 +595,7 @@ ctnetlink_parse_nat(struct nfattr *nat,
595 int err; 595 int err;
596 596
597 memset(range, 0, sizeof(*range)); 597 memset(range, 0, sizeof(*range));
598 598
599 nfattr_parse_nested(tb, CTA_NAT_MAX, nat); 599 nfattr_parse_nested(tb, CTA_NAT_MAX, nat);
600 600
601 if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat)) 601 if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat))
@@ -647,7 +647,7 @@ static const size_t cta_min[CTA_MAX] = {
647}; 647};
648 648
649static int 649static int
650ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, 650ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
651 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) 651 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
652{ 652{
653 struct ip_conntrack_tuple_hash *h; 653 struct ip_conntrack_tuple_hash *h;
@@ -676,14 +676,14 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
676 return -ENOENT; 676 return -ENOENT;
677 677
678 ct = tuplehash_to_ctrack(h); 678 ct = tuplehash_to_ctrack(h);
679 679
680 if (cda[CTA_ID-1]) { 680 if (cda[CTA_ID-1]) {
681 u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1])); 681 u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1]));
682 if (ct->id != id) { 682 if (ct->id != id) {
683 ip_conntrack_put(ct); 683 ip_conntrack_put(ct);
684 return -ENOENT; 684 return -ENOENT;
685 } 685 }
686 } 686 }
687 if (del_timer(&ct->timeout)) 687 if (del_timer(&ct->timeout))
688 ct->timeout.function((unsigned long)ct); 688 ct->timeout.function((unsigned long)ct);
689 689
@@ -693,7 +693,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
693} 693}
694 694
695static int 695static int
696ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, 696ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
697 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) 697 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
698{ 698{
699 struct ip_conntrack_tuple_hash *h; 699 struct ip_conntrack_tuple_hash *h;
@@ -714,8 +714,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
714 return -ENOTSUPP; 714 return -ENOTSUPP;
715#endif 715#endif
716 if ((*errp = netlink_dump_start(ctnl, skb, nlh, 716 if ((*errp = netlink_dump_start(ctnl, skb, nlh,
717 ctnetlink_dump_table, 717 ctnetlink_dump_table,
718 ctnetlink_done)) != 0) 718 ctnetlink_done)) != 0)
719 return -EINVAL; 719 return -EINVAL;
720 720
721 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 721 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
@@ -751,7 +751,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
751 return -ENOMEM; 751 return -ENOMEM;
752 } 752 }
753 753
754 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 754 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
755 IPCTNL_MSG_CT_NEW, 1, ct); 755 IPCTNL_MSG_CT_NEW, 1, ct);
756 ip_conntrack_put(ct); 756 ip_conntrack_put(ct);
757 if (err <= 0) 757 if (err <= 0)
@@ -779,12 +779,12 @@ ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[])
779 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) 779 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
780 /* unchangeable */ 780 /* unchangeable */
781 return -EINVAL; 781 return -EINVAL;
782 782
783 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) 783 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
784 /* SEEN_REPLY bit can only be set */ 784 /* SEEN_REPLY bit can only be set */
785 return -EINVAL; 785 return -EINVAL;
786 786
787 787
788 if (d & IPS_ASSURED && !(status & IPS_ASSURED)) 788 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
789 /* ASSURED bit can only be set */ 789 /* ASSURED bit can only be set */
790 return -EINVAL; 790 return -EINVAL;
@@ -857,7 +857,7 @@ ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[])
857 memset(&ct->help, 0, sizeof(ct->help)); 857 memset(&ct->help, 0, sizeof(ct->help));
858 } 858 }
859 } 859 }
860 860
861 ct->helper = helper; 861 ct->helper = helper;
862 862
863 return 0; 863 return 0;
@@ -867,7 +867,7 @@ static inline int
867ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[]) 867ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[])
868{ 868{
869 u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); 869 u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1]));
870 870
871 if (!del_timer(&ct->timeout)) 871 if (!del_timer(&ct->timeout))
872 return -ETIME; 872 return -ETIME;
873 873
@@ -891,7 +891,7 @@ ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[])
891 891
892 if (proto->from_nfattr) 892 if (proto->from_nfattr)
893 err = proto->from_nfattr(tb, ct); 893 err = proto->from_nfattr(tb, ct);
894 ip_conntrack_proto_put(proto); 894 ip_conntrack_proto_put(proto);
895 895
896 return err; 896 return err;
897} 897}
@@ -934,7 +934,7 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
934} 934}
935 935
936static int 936static int
937ctnetlink_create_conntrack(struct nfattr *cda[], 937ctnetlink_create_conntrack(struct nfattr *cda[],
938 struct ip_conntrack_tuple *otuple, 938 struct ip_conntrack_tuple *otuple,
939 struct ip_conntrack_tuple *rtuple) 939 struct ip_conntrack_tuple *rtuple)
940{ 940{
@@ -943,7 +943,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
943 943
944 ct = ip_conntrack_alloc(otuple, rtuple); 944 ct = ip_conntrack_alloc(otuple, rtuple);
945 if (ct == NULL || IS_ERR(ct)) 945 if (ct == NULL || IS_ERR(ct))
946 return -ENOMEM; 946 return -ENOMEM;
947 947
948 if (!cda[CTA_TIMEOUT-1]) 948 if (!cda[CTA_TIMEOUT-1])
949 goto err; 949 goto err;
@@ -979,13 +979,13 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
979 979
980 return 0; 980 return 0;
981 981
982err: 982err:
983 ip_conntrack_free(ct); 983 ip_conntrack_free(ct);
984 return err; 984 return err;
985} 985}
986 986
987static int 987static int
988ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, 988ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
989 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) 989 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
990{ 990{
991 struct ip_conntrack_tuple otuple, rtuple; 991 struct ip_conntrack_tuple otuple, rtuple;
@@ -1039,9 +1039,9 @@ out_unlock:
1039 return err; 1039 return err;
1040} 1040}
1041 1041
1042/*********************************************************************** 1042/***********************************************************************
1043 * EXPECT 1043 * EXPECT
1044 ***********************************************************************/ 1044 ***********************************************************************/
1045 1045
1046static inline int 1046static inline int
1047ctnetlink_exp_dump_tuple(struct sk_buff *skb, 1047ctnetlink_exp_dump_tuple(struct sk_buff *skb,
@@ -1049,7 +1049,7 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb,
1049 enum ctattr_expect type) 1049 enum ctattr_expect type)
1050{ 1050{
1051 struct nfattr *nest_parms = NFA_NEST(skb, type); 1051 struct nfattr *nest_parms = NFA_NEST(skb, type);
1052 1052
1053 if (ctnetlink_dump_tuples(skb, tuple) < 0) 1053 if (ctnetlink_dump_tuples(skb, tuple) < 0)
1054 goto nfattr_failure; 1054 goto nfattr_failure;
1055 1055
@@ -1059,7 +1059,7 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb,
1059 1059
1060nfattr_failure: 1060nfattr_failure:
1061 return -1; 1061 return -1;
1062} 1062}
1063 1063
1064static inline int 1064static inline int
1065ctnetlink_exp_dump_mask(struct sk_buff *skb, 1065ctnetlink_exp_dump_mask(struct sk_buff *skb,
@@ -1090,7 +1090,7 @@ nfattr_failure:
1090 1090
1091static inline int 1091static inline int
1092ctnetlink_exp_dump_expect(struct sk_buff *skb, 1092ctnetlink_exp_dump_expect(struct sk_buff *skb,
1093 const struct ip_conntrack_expect *exp) 1093 const struct ip_conntrack_expect *exp)
1094{ 1094{
1095 struct ip_conntrack *master = exp->master; 1095 struct ip_conntrack *master = exp->master;
1096 __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ); 1096 __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ);
@@ -1104,20 +1104,20 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1104 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 1104 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1105 CTA_EXPECT_MASTER) < 0) 1105 CTA_EXPECT_MASTER) < 0)
1106 goto nfattr_failure; 1106 goto nfattr_failure;
1107 1107
1108 NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(__be32), &timeout); 1108 NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(__be32), &timeout);
1109 NFA_PUT(skb, CTA_EXPECT_ID, sizeof(__be32), &id); 1109 NFA_PUT(skb, CTA_EXPECT_ID, sizeof(__be32), &id);
1110 1110
1111 return 0; 1111 return 0;
1112 1112
1113nfattr_failure: 1113nfattr_failure:
1114 return -1; 1114 return -1;
1115} 1115}
1116 1116
1117static int 1117static int
1118ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1118ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1119 int event, 1119 int event,
1120 int nowait, 1120 int nowait,
1121 const struct ip_conntrack_expect *exp) 1121 const struct ip_conntrack_expect *exp)
1122{ 1122{
1123 struct nlmsghdr *nlh; 1123 struct nlmsghdr *nlh;
@@ -1216,7 +1216,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1216 goto out; 1216 goto out;
1217 *id = exp->id; 1217 *id = exp->id;
1218 } 1218 }
1219out: 1219out:
1220 read_unlock_bh(&ip_conntrack_lock); 1220 read_unlock_bh(&ip_conntrack_lock);
1221 1221
1222 return skb->len; 1222 return skb->len;
@@ -1228,7 +1228,7 @@ static const size_t cta_min_exp[CTA_EXPECT_MAX] = {
1228}; 1228};
1229 1229
1230static int 1230static int
1231ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, 1231ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1232 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) 1232 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
1233{ 1233{
1234 struct ip_conntrack_tuple tuple; 1234 struct ip_conntrack_tuple tuple;
@@ -1247,7 +1247,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1247 return -EAFNOSUPPORT; 1247 return -EAFNOSUPPORT;
1248 1248
1249 if ((*errp = netlink_dump_start(ctnl, skb, nlh, 1249 if ((*errp = netlink_dump_start(ctnl, skb, nlh,
1250 ctnetlink_exp_dump_table, 1250 ctnetlink_exp_dump_table,
1251 ctnetlink_done)) != 0) 1251 ctnetlink_done)) != 0)
1252 return -EINVAL; 1252 return -EINVAL;
1253 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 1253 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
@@ -1275,14 +1275,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1275 ip_conntrack_expect_put(exp); 1275 ip_conntrack_expect_put(exp);
1276 return -ENOENT; 1276 return -ENOENT;
1277 } 1277 }
1278 } 1278 }
1279 1279
1280 err = -ENOMEM; 1280 err = -ENOMEM;
1281 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1281 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1282 if (!skb2) 1282 if (!skb2)
1283 goto out; 1283 goto out;
1284 1284
1285 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, 1285 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
1286 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, 1286 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW,
1287 1, exp); 1287 1, exp);
1288 if (err <= 0) 1288 if (err <= 0)
@@ -1300,7 +1300,7 @@ out:
1300} 1300}
1301 1301
1302static int 1302static int
1303ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, 1303ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1304 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) 1304 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
1305{ 1305{
1306 struct ip_conntrack_expect *exp, *tmp; 1306 struct ip_conntrack_expect *exp, *tmp;
@@ -1333,7 +1333,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1333 1333
1334 /* after list removal, usage count == 1 */ 1334 /* after list removal, usage count == 1 */
1335 ip_conntrack_unexpect_related(exp); 1335 ip_conntrack_unexpect_related(exp);
1336 /* have to put what we 'get' above. 1336 /* have to put what we 'get' above.
1337 * after this line usage count == 0 */ 1337 * after this line usage count == 0 */
1338 ip_conntrack_expect_put(exp); 1338 ip_conntrack_expect_put(exp);
1339 } else if (cda[CTA_EXPECT_HELP_NAME-1]) { 1339 } else if (cda[CTA_EXPECT_HELP_NAME-1]) {
@@ -1348,7 +1348,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1348 } 1348 }
1349 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, 1349 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list,
1350 list) { 1350 list) {
1351 if (exp->master->helper == h 1351 if (exp->master->helper == h
1352 && del_timer(&exp->timeout)) { 1352 && del_timer(&exp->timeout)) {
1353 ip_ct_unlink_expect(exp); 1353 ip_ct_unlink_expect(exp);
1354 ip_conntrack_expect_put(exp); 1354 ip_conntrack_expect_put(exp);
@@ -1413,7 +1413,7 @@ ctnetlink_create_expect(struct nfattr *cda[])
1413 err = -ENOMEM; 1413 err = -ENOMEM;
1414 goto out; 1414 goto out;
1415 } 1415 }
1416 1416
1417 exp->expectfn = NULL; 1417 exp->expectfn = NULL;
1418 exp->flags = 0; 1418 exp->flags = 0;
1419 exp->master = ct; 1419 exp->master = ct;
@@ -1423,7 +1423,7 @@ ctnetlink_create_expect(struct nfattr *cda[])
1423 err = ip_conntrack_expect_related(exp); 1423 err = ip_conntrack_expect_related(exp);
1424 ip_conntrack_expect_put(exp); 1424 ip_conntrack_expect_put(exp);
1425 1425
1426out: 1426out:
1427 ip_conntrack_put(tuplehash_to_ctrack(h)); 1427 ip_conntrack_put(tuplehash_to_ctrack(h));
1428 return err; 1428 return err;
1429} 1429}
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 295b6fa340db..ec71abead00c 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -94,9 +94,9 @@ static int icmp_packet(struct ip_conntrack *ct,
94 enum ip_conntrack_info ctinfo) 94 enum ip_conntrack_info ctinfo)
95{ 95{
96 /* Try to delete connection immediately after all replies: 96 /* Try to delete connection immediately after all replies:
97 won't actually vanish as we still have skb, and del_timer 97 won't actually vanish as we still have skb, and del_timer
98 means this will only run once even if count hits zero twice 98 means this will only run once even if count hits zero twice
99 (theoretically possible with SMP) */ 99 (theoretically possible with SMP) */
100 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 100 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
101 if (atomic_dec_and_test(&ct->proto.icmp.count) 101 if (atomic_dec_and_test(&ct->proto.icmp.count)
102 && del_timer(&ct->timeout)) 102 && del_timer(&ct->timeout))
@@ -114,11 +114,11 @@ static int icmp_packet(struct ip_conntrack *ct,
114static int icmp_new(struct ip_conntrack *conntrack, 114static int icmp_new(struct ip_conntrack *conntrack,
115 const struct sk_buff *skb) 115 const struct sk_buff *skb)
116{ 116{
117 static const u_int8_t valid_new[] = { 117 static const u_int8_t valid_new[] = {
118 [ICMP_ECHO] = 1, 118 [ICMP_ECHO] = 1,
119 [ICMP_TIMESTAMP] = 1, 119 [ICMP_TIMESTAMP] = 1,
120 [ICMP_INFO_REQUEST] = 1, 120 [ICMP_INFO_REQUEST] = 1,
121 [ICMP_ADDRESS] = 1 121 [ICMP_ADDRESS] = 1
122 }; 122 };
123 123
124 if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) 124 if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new)
@@ -282,7 +282,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
282 || !tb[CTA_PROTO_ICMP_ID-1]) 282 || !tb[CTA_PROTO_ICMP_ID-1])
283 return -EINVAL; 283 return -EINVAL;
284 284
285 tuple->dst.u.icmp.type = 285 tuple->dst.u.icmp.type =
286 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]); 286 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]);
287 tuple->dst.u.icmp.code = 287 tuple->dst.u.icmp.code =
288 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); 288 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
index 2443322e4128..9d5b917f49cd 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * Connection tracking protocol helper module for SCTP. 2 * Connection tracking protocol helper module for SCTP.
3 * 3 *
4 * SCTP is defined in RFC 2960. References to various sections in this code 4 * SCTP is defined in RFC 2960. References to various sections in this code
5 * are to this RFC. 5 * are to this RFC.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
@@ -38,7 +38,7 @@
38static DEFINE_RWLOCK(sctp_lock); 38static DEFINE_RWLOCK(sctp_lock);
39 39
40/* FIXME: Examine ipfilter's timeouts and conntrack transitions more 40/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
41 closely. They're more complex. --RR 41 closely. They're more complex. --RR
42 42
43 And so for me for SCTP :D -Kiran */ 43 And so for me for SCTP :D -Kiran */
44 44
@@ -87,32 +87,32 @@ static const unsigned int * sctp_timeouts[]
87#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT 87#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
88#define sIV SCTP_CONNTRACK_MAX 88#define sIV SCTP_CONNTRACK_MAX
89 89
90/* 90/*
91 These are the descriptions of the states: 91 These are the descriptions of the states:
92 92
93NOTE: These state names are tantalizingly similar to the states of an 93NOTE: These state names are tantalizingly similar to the states of an
94SCTP endpoint. But the interpretation of the states is a little different, 94SCTP endpoint. But the interpretation of the states is a little different,
95considering that these are the states of the connection and not of an end 95considering that these are the states of the connection and not of an end
96point. Please note the subtleties. -Kiran 96point. Please note the subtleties. -Kiran
97 97
98NONE - Nothing so far. 98NONE - Nothing so far.
99COOKIE WAIT - We have seen an INIT chunk in the original direction, or also 99COOKIE WAIT - We have seen an INIT chunk in the original direction, or also
100 an INIT_ACK chunk in the reply direction. 100 an INIT_ACK chunk in the reply direction.
101COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. 101COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction.
102ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. 102ESTABLISHED - We have seen a COOKIE_ACK in the reply direction.
103SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. 103SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction.
104SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. 104SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin.
105SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite 105SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
106 to that of the SHUTDOWN chunk. 106 to that of the SHUTDOWN chunk.
107CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of 107CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
108 the SHUTDOWN chunk. Connection is closed. 108 the SHUTDOWN chunk. Connection is closed.
109*/ 109*/
110 110
111/* TODO 111/* TODO
112 - I have assumed that the first INIT is in the original direction. 112 - I have assumed that the first INIT is in the original direction.
113 This messes things when an INIT comes in the reply direction in CLOSED 113 This messes things when an INIT comes in the reply direction in CLOSED
114 state. 114 state.
115 - Check the error type in the reply dir before transitioning from 115 - Check the error type in the reply dir before transitioning from
116cookie echoed to closed. 116cookie echoed to closed.
117 - Sec 5.2.4 of RFC 2960 117 - Sec 5.2.4 of RFC 2960
118 - Multi Homing support. 118 - Multi Homing support.
@@ -229,7 +229,7 @@ static int do_basic_checks(struct ip_conntrack *conntrack,
229 for_each_sctp_chunk (skb, sch, _sch, offset, count) { 229 for_each_sctp_chunk (skb, sch, _sch, offset, count) {
230 DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type); 230 DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type);
231 231
232 if (sch->type == SCTP_CID_INIT 232 if (sch->type == SCTP_CID_INIT
233 || sch->type == SCTP_CID_INIT_ACK 233 || sch->type == SCTP_CID_INIT_ACK
234 || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { 234 || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
235 flag = 1; 235 flag = 1;
@@ -269,42 +269,42 @@ static int new_state(enum ip_conntrack_dir dir,
269 DEBUGP("Chunk type: %d\n", chunk_type); 269 DEBUGP("Chunk type: %d\n", chunk_type);
270 270
271 switch (chunk_type) { 271 switch (chunk_type) {
272 case SCTP_CID_INIT: 272 case SCTP_CID_INIT:
273 DEBUGP("SCTP_CID_INIT\n"); 273 DEBUGP("SCTP_CID_INIT\n");
274 i = 0; break; 274 i = 0; break;
275 case SCTP_CID_INIT_ACK: 275 case SCTP_CID_INIT_ACK:
276 DEBUGP("SCTP_CID_INIT_ACK\n"); 276 DEBUGP("SCTP_CID_INIT_ACK\n");
277 i = 1; break; 277 i = 1; break;
278 case SCTP_CID_ABORT: 278 case SCTP_CID_ABORT:
279 DEBUGP("SCTP_CID_ABORT\n"); 279 DEBUGP("SCTP_CID_ABORT\n");
280 i = 2; break; 280 i = 2; break;
281 case SCTP_CID_SHUTDOWN: 281 case SCTP_CID_SHUTDOWN:
282 DEBUGP("SCTP_CID_SHUTDOWN\n"); 282 DEBUGP("SCTP_CID_SHUTDOWN\n");
283 i = 3; break; 283 i = 3; break;
284 case SCTP_CID_SHUTDOWN_ACK: 284 case SCTP_CID_SHUTDOWN_ACK:
285 DEBUGP("SCTP_CID_SHUTDOWN_ACK\n"); 285 DEBUGP("SCTP_CID_SHUTDOWN_ACK\n");
286 i = 4; break; 286 i = 4; break;
287 case SCTP_CID_ERROR: 287 case SCTP_CID_ERROR:
288 DEBUGP("SCTP_CID_ERROR\n"); 288 DEBUGP("SCTP_CID_ERROR\n");
289 i = 5; break; 289 i = 5; break;
290 case SCTP_CID_COOKIE_ECHO: 290 case SCTP_CID_COOKIE_ECHO:
291 DEBUGP("SCTP_CID_COOKIE_ECHO\n"); 291 DEBUGP("SCTP_CID_COOKIE_ECHO\n");
292 i = 6; break; 292 i = 6; break;
293 case SCTP_CID_COOKIE_ACK: 293 case SCTP_CID_COOKIE_ACK:
294 DEBUGP("SCTP_CID_COOKIE_ACK\n"); 294 DEBUGP("SCTP_CID_COOKIE_ACK\n");
295 i = 7; break; 295 i = 7; break;
296 case SCTP_CID_SHUTDOWN_COMPLETE: 296 case SCTP_CID_SHUTDOWN_COMPLETE:
297 DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n"); 297 DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n");
298 i = 8; break; 298 i = 8; break;
299 default: 299 default:
300 /* Other chunks like DATA, SACK, HEARTBEAT and 300 /* Other chunks like DATA, SACK, HEARTBEAT and
301 its ACK do not cause a change in state */ 301 its ACK do not cause a change in state */
302 DEBUGP("Unknown chunk type, Will stay in %s\n", 302 DEBUGP("Unknown chunk type, Will stay in %s\n",
303 sctp_conntrack_names[cur_state]); 303 sctp_conntrack_names[cur_state]);
304 return cur_state; 304 return cur_state;
305 } 305 }
306 306
307 DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", 307 DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n",
308 dir, sctp_conntrack_names[cur_state], chunk_type, 308 dir, sctp_conntrack_names[cur_state], chunk_type,
309 sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); 309 sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]);
310 310
@@ -367,7 +367,7 @@ static int sctp_packet(struct ip_conntrack *conntrack,
367 /* Sec 8.5.1 (C) */ 367 /* Sec 8.5.1 (C) */
368 if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)]) 368 if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])
369 && !(sh->vtag == conntrack->proto.sctp.vtag 369 && !(sh->vtag == conntrack->proto.sctp.vtag
370 [1 - CTINFO2DIR(ctinfo)] 370 [1 - CTINFO2DIR(ctinfo)]
371 && (sch->flags & 1))) { 371 && (sch->flags & 1))) {
372 write_unlock_bh(&sctp_lock); 372 write_unlock_bh(&sctp_lock);
373 return -1; 373 return -1;
@@ -392,17 +392,17 @@ static int sctp_packet(struct ip_conntrack *conntrack,
392 } 392 }
393 393
394 /* If it is an INIT or an INIT ACK note down the vtag */ 394 /* If it is an INIT or an INIT ACK note down the vtag */
395 if (sch->type == SCTP_CID_INIT 395 if (sch->type == SCTP_CID_INIT
396 || sch->type == SCTP_CID_INIT_ACK) { 396 || sch->type == SCTP_CID_INIT_ACK) {
397 sctp_inithdr_t _inithdr, *ih; 397 sctp_inithdr_t _inithdr, *ih;
398 398
399 ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), 399 ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
400 sizeof(_inithdr), &_inithdr); 400 sizeof(_inithdr), &_inithdr);
401 if (ih == NULL) { 401 if (ih == NULL) {
402 write_unlock_bh(&sctp_lock); 402 write_unlock_bh(&sctp_lock);
403 return -1; 403 return -1;
404 } 404 }
405 DEBUGP("Setting vtag %x for dir %d\n", 405 DEBUGP("Setting vtag %x for dir %d\n",
406 ih->init_tag, !CTINFO2DIR(ctinfo)); 406 ih->init_tag, !CTINFO2DIR(ctinfo));
407 conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag; 407 conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag;
408 } 408 }
@@ -427,7 +427,7 @@ static int sctp_packet(struct ip_conntrack *conntrack,
427} 427}
428 428
429/* Called when a new connection for this protocol found. */ 429/* Called when a new connection for this protocol found. */
430static int sctp_new(struct ip_conntrack *conntrack, 430static int sctp_new(struct ip_conntrack *conntrack,
431 const struct sk_buff *skb) 431 const struct sk_buff *skb)
432{ 432{
433 enum sctp_conntrack newconntrack; 433 enum sctp_conntrack newconntrack;
@@ -457,7 +457,7 @@ static int sctp_new(struct ip_conntrack *conntrack,
457 newconntrack = SCTP_CONNTRACK_MAX; 457 newconntrack = SCTP_CONNTRACK_MAX;
458 for_each_sctp_chunk (skb, sch, _sch, offset, count) { 458 for_each_sctp_chunk (skb, sch, _sch, offset, count) {
459 /* Don't need lock here: this conntrack not in circulation yet */ 459 /* Don't need lock here: this conntrack not in circulation yet */
460 newconntrack = new_state (IP_CT_DIR_ORIGINAL, 460 newconntrack = new_state (IP_CT_DIR_ORIGINAL,
461 SCTP_CONNTRACK_NONE, sch->type); 461 SCTP_CONNTRACK_NONE, sch->type);
462 462
463 /* Invalid: delete conntrack */ 463 /* Invalid: delete conntrack */
@@ -472,14 +472,14 @@ static int sctp_new(struct ip_conntrack *conntrack,
472 sctp_inithdr_t _inithdr, *ih; 472 sctp_inithdr_t _inithdr, *ih;
473 473
474 ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), 474 ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
475 sizeof(_inithdr), &_inithdr); 475 sizeof(_inithdr), &_inithdr);
476 if (ih == NULL) 476 if (ih == NULL)
477 return 0; 477 return 0;
478 478
479 DEBUGP("Setting vtag %x for new conn\n", 479 DEBUGP("Setting vtag %x for new conn\n",
480 ih->init_tag); 480 ih->init_tag);
481 481
482 conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = 482 conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] =
483 ih->init_tag; 483 ih->init_tag;
484 } else { 484 } else {
485 /* Sec 8.5.1 (A) */ 485 /* Sec 8.5.1 (A) */
@@ -489,7 +489,7 @@ static int sctp_new(struct ip_conntrack *conntrack,
489 /* If it is a shutdown ack OOTB packet, we expect a return 489 /* If it is a shutdown ack OOTB packet, we expect a return
490 shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ 490 shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
491 else { 491 else {
492 DEBUGP("Setting vtag %x for new conn OOTB\n", 492 DEBUGP("Setting vtag %x for new conn OOTB\n",
493 sh->vtag); 493 sh->vtag);
494 conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; 494 conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
495 } 495 }
@@ -500,16 +500,16 @@ static int sctp_new(struct ip_conntrack *conntrack,
500 return 1; 500 return 1;
501} 501}
502 502
503static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = { 503static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = {
504 .proto = IPPROTO_SCTP, 504 .proto = IPPROTO_SCTP,
505 .name = "sctp", 505 .name = "sctp",
506 .pkt_to_tuple = sctp_pkt_to_tuple, 506 .pkt_to_tuple = sctp_pkt_to_tuple,
507 .invert_tuple = sctp_invert_tuple, 507 .invert_tuple = sctp_invert_tuple,
508 .print_tuple = sctp_print_tuple, 508 .print_tuple = sctp_print_tuple,
509 .print_conntrack = sctp_print_conntrack, 509 .print_conntrack = sctp_print_conntrack,
510 .packet = sctp_packet, 510 .packet = sctp_packet,
511 .new = sctp_new, 511 .new = sctp_new,
512 .destroy = NULL, 512 .destroy = NULL,
513 .me = THIS_MODULE, 513 .me = THIS_MODULE,
514#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ 514#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
515 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) 515 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
@@ -603,7 +603,7 @@ static ctl_table ip_ct_net_table[] = {
603 { 603 {
604 .ctl_name = CTL_NET, 604 .ctl_name = CTL_NET,
605 .procname = "net", 605 .procname = "net",
606 .mode = 0555, 606 .mode = 0555,
607 .child = ip_ct_ipv4_table, 607 .child = ip_ct_ipv4_table,
608 }, 608 },
609 { .ctl_name = 0 } 609 { .ctl_name = 0 }
@@ -638,7 +638,7 @@ static int __init ip_conntrack_proto_sctp_init(void)
638 ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp); 638 ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp);
639#endif 639#endif
640 out: 640 out:
641 DEBUGP("SCTP conntrack module loading %s\n", 641 DEBUGP("SCTP conntrack module loading %s\n",
642 ret ? "failed": "succeeded"); 642 ret ? "failed": "succeeded");
643 return ret; 643 return ret;
644} 644}
@@ -647,7 +647,7 @@ static void __exit ip_conntrack_proto_sctp_fini(void)
647{ 647{
648 ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp); 648 ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp);
649#ifdef CONFIG_SYSCTL 649#ifdef CONFIG_SYSCTL
650 unregister_sysctl_table(ip_ct_sysctl_header); 650 unregister_sysctl_table(ip_ct_sysctl_header);
651#endif 651#endif
652 DEBUGP("SCTP conntrack module unloaded\n"); 652 DEBUGP("SCTP conntrack module unloaded\n");
653} 653}
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index c34f48fe5478..fa35b49fe2fa 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -45,8 +45,8 @@
45/* Protects conntrack->proto.tcp */ 45/* Protects conntrack->proto.tcp */
46static DEFINE_RWLOCK(tcp_lock); 46static DEFINE_RWLOCK(tcp_lock);
47 47
48/* "Be conservative in what you do, 48/* "Be conservative in what you do,
49 be liberal in what you accept from others." 49 be liberal in what you accept from others."
50 If it's non-zero, we mark only out of window RST segments as INVALID. */ 50 If it's non-zero, we mark only out of window RST segments as INVALID. */
51int ip_ct_tcp_be_liberal __read_mostly = 0; 51int ip_ct_tcp_be_liberal __read_mostly = 0;
52 52
@@ -54,8 +54,8 @@ int ip_ct_tcp_be_liberal __read_mostly = 0;
54 connections. */ 54 connections. */
55int ip_ct_tcp_loose __read_mostly = 1; 55int ip_ct_tcp_loose __read_mostly = 1;
56 56
57/* Max number of the retransmitted packets without receiving an (acceptable) 57/* Max number of the retransmitted packets without receiving an (acceptable)
58 ACK from the destination. If this number is reached, a shorter timer 58 ACK from the destination. If this number is reached, a shorter timer
59 will be started. */ 59 will be started. */
60int ip_ct_tcp_max_retrans __read_mostly = 3; 60int ip_ct_tcp_max_retrans __read_mostly = 3;
61 61
@@ -74,7 +74,7 @@ static const char *tcp_conntrack_names[] = {
74 "CLOSE", 74 "CLOSE",
75 "LISTEN" 75 "LISTEN"
76}; 76};
77 77
78#define SECS * HZ 78#define SECS * HZ
79#define MINS * 60 SECS 79#define MINS * 60 SECS
80#define HOURS * 60 MINS 80#define HOURS * 60 MINS
@@ -90,10 +90,10 @@ unsigned int ip_ct_tcp_timeout_time_wait __read_mostly = 2 MINS;
90unsigned int ip_ct_tcp_timeout_close __read_mostly = 10 SECS; 90unsigned int ip_ct_tcp_timeout_close __read_mostly = 10 SECS;
91 91
92/* RFC1122 says the R2 limit should be at least 100 seconds. 92/* RFC1122 says the R2 limit should be at least 100 seconds.
93 Linux uses 15 packets as limit, which corresponds 93 Linux uses 15 packets as limit, which corresponds
94 to ~13-30min depending on RTO. */ 94 to ~13-30min depending on RTO. */
95unsigned int ip_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; 95unsigned int ip_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS;
96 96
97static const unsigned int * tcp_timeouts[] 97static const unsigned int * tcp_timeouts[]
98= { NULL, /* TCP_CONNTRACK_NONE */ 98= { NULL, /* TCP_CONNTRACK_NONE */
99 &ip_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */ 99 &ip_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */
@@ -106,7 +106,7 @@ static const unsigned int * tcp_timeouts[]
106 &ip_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */ 106 &ip_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */
107 NULL, /* TCP_CONNTRACK_LISTEN */ 107 NULL, /* TCP_CONNTRACK_LISTEN */
108 }; 108 };
109 109
110#define sNO TCP_CONNTRACK_NONE 110#define sNO TCP_CONNTRACK_NONE
111#define sSS TCP_CONNTRACK_SYN_SENT 111#define sSS TCP_CONNTRACK_SYN_SENT
112#define sSR TCP_CONNTRACK_SYN_RECV 112#define sSR TCP_CONNTRACK_SYN_RECV
@@ -129,13 +129,13 @@ enum tcp_bit_set {
129 TCP_RST_SET, 129 TCP_RST_SET,
130 TCP_NONE_SET, 130 TCP_NONE_SET,
131}; 131};
132 132
133/* 133/*
134 * The TCP state transition table needs a few words... 134 * The TCP state transition table needs a few words...
135 * 135 *
136 * We are the man in the middle. All the packets go through us 136 * We are the man in the middle. All the packets go through us
137 * but might get lost in transit to the destination. 137 * but might get lost in transit to the destination.
138 * It is assumed that the destinations can't receive segments 138 * It is assumed that the destinations can't receive segments
139 * we haven't seen. 139 * we haven't seen.
140 * 140 *
141 * The checked segment is in window, but our windows are *not* 141 * The checked segment is in window, but our windows are *not*
@@ -145,11 +145,11 @@ enum tcp_bit_set {
145 * The meaning of the states are: 145 * The meaning of the states are:
146 * 146 *
147 * NONE: initial state 147 * NONE: initial state
148 * SYN_SENT: SYN-only packet seen 148 * SYN_SENT: SYN-only packet seen
149 * SYN_RECV: SYN-ACK packet seen 149 * SYN_RECV: SYN-ACK packet seen
150 * ESTABLISHED: ACK packet seen 150 * ESTABLISHED: ACK packet seen
151 * FIN_WAIT: FIN packet seen 151 * FIN_WAIT: FIN packet seen
152 * CLOSE_WAIT: ACK seen (after FIN) 152 * CLOSE_WAIT: ACK seen (after FIN)
153 * LAST_ACK: FIN seen (after FIN) 153 * LAST_ACK: FIN seen (after FIN)
154 * TIME_WAIT: last ACK seen 154 * TIME_WAIT: last ACK seen
155 * CLOSE: closed connection 155 * CLOSE: closed connection
@@ -157,8 +157,8 @@ enum tcp_bit_set {
157 * LISTEN state is not used. 157 * LISTEN state is not used.
158 * 158 *
159 * Packets marked as IGNORED (sIG): 159 * Packets marked as IGNORED (sIG):
160 * if they may be either invalid or valid 160 * if they may be either invalid or valid
161 * and the receiver may send back a connection 161 * and the receiver may send back a connection
162 * closing RST or a SYN/ACK. 162 * closing RST or a SYN/ACK.
163 * 163 *
164 * Packets marked as INVALID (sIV): 164 * Packets marked as INVALID (sIV):
@@ -175,7 +175,7 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
175 * sSS -> sSS Retransmitted SYN 175 * sSS -> sSS Retransmitted SYN
176 * sSR -> sIG Late retransmitted SYN? 176 * sSR -> sIG Late retransmitted SYN?
177 * sES -> sIG Error: SYNs in window outside the SYN_SENT state 177 * sES -> sIG Error: SYNs in window outside the SYN_SENT state
178 * are errors. Receiver will reply with RST 178 * are errors. Receiver will reply with RST
179 * and close the connection. 179 * and close the connection.
180 * Or we are not in sync and hold a dead connection. 180 * Or we are not in sync and hold a dead connection.
181 * sFW -> sIG 181 * sFW -> sIG
@@ -188,10 +188,10 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
188/*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, 188/*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV },
189/* 189/*
190 * A SYN/ACK from the client is always invalid: 190 * A SYN/ACK from the client is always invalid:
191 * - either it tries to set up a simultaneous open, which is 191 * - either it tries to set up a simultaneous open, which is
192 * not supported; 192 * not supported;
193 * - or the firewall has just been inserted between the two hosts 193 * - or the firewall has just been inserted between the two hosts
194 * during the session set-up. The SYN will be retransmitted 194 * during the session set-up. The SYN will be retransmitted
195 * by the true client (or it'll time out). 195 * by the true client (or it'll time out).
196 */ 196 */
197/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 197/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
@@ -201,9 +201,9 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
201 * sSS -> sIV Client migth not send FIN in this state: 201 * sSS -> sIV Client migth not send FIN in this state:
202 * we enforce waiting for a SYN/ACK reply first. 202 * we enforce waiting for a SYN/ACK reply first.
203 * sSR -> sFW Close started. 203 * sSR -> sFW Close started.
204 * sES -> sFW 204 * sES -> sFW
205 * sFW -> sLA FIN seen in both directions, waiting for 205 * sFW -> sLA FIN seen in both directions, waiting for
206 * the last ACK. 206 * the last ACK.
207 * Migth be a retransmitted FIN as well... 207 * Migth be a retransmitted FIN as well...
208 * sCW -> sLA 208 * sCW -> sLA
209 * sLA -> sLA Retransmitted FIN. Remain in the same state. 209 * sLA -> sLA Retransmitted FIN. Remain in the same state.
@@ -281,7 +281,7 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
281/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ 281/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
282/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, 282/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
283/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } 283/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
284 } 284 }
285}; 285};
286 286
287static int tcp_pkt_to_tuple(const struct sk_buff *skb, 287static int tcp_pkt_to_tuple(const struct sk_buff *skb,
@@ -337,7 +337,7 @@ static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
337 const struct ip_conntrack *ct) 337 const struct ip_conntrack *ct)
338{ 338{
339 struct nfattr *nest_parms; 339 struct nfattr *nest_parms;
340 340
341 read_lock_bh(&tcp_lock); 341 read_lock_bh(&tcp_lock);
342 nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); 342 nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
343 NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), 343 NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
@@ -367,7 +367,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
367 if (!attr) 367 if (!attr)
368 return 0; 368 return 0;
369 369
370 nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr); 370 nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr);
371 371
372 if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp)) 372 if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp))
373 return -EINVAL; 373 return -EINVAL;
@@ -376,7 +376,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
376 return -EINVAL; 376 return -EINVAL;
377 377
378 write_lock_bh(&tcp_lock); 378 write_lock_bh(&tcp_lock);
379 ct->proto.tcp.state = 379 ct->proto.tcp.state =
380 *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); 380 *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]);
381 write_unlock_bh(&tcp_lock); 381 write_unlock_bh(&tcp_lock);
382 382
@@ -395,30 +395,30 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph)
395 395
396/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering 396/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
397 in IP Filter' by Guido van Rooij. 397 in IP Filter' by Guido van Rooij.
398 398
399 http://www.nluug.nl/events/sane2000/papers.html 399 http://www.nluug.nl/events/sane2000/papers.html
400 http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz 400 http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz
401 401
402 The boundaries and the conditions are changed according to RFC793: 402 The boundaries and the conditions are changed according to RFC793:
403 the packet must intersect the window (i.e. segments may be 403 the packet must intersect the window (i.e. segments may be
404 after the right or before the left edge) and thus receivers may ACK 404 after the right or before the left edge) and thus receivers may ACK
405 segments after the right edge of the window. 405 segments after the right edge of the window.
406 406
407 td_maxend = max(sack + max(win,1)) seen in reply packets 407 td_maxend = max(sack + max(win,1)) seen in reply packets
408 td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets 408 td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
409 td_maxwin += seq + len - sender.td_maxend 409 td_maxwin += seq + len - sender.td_maxend
410 if seq + len > sender.td_maxend 410 if seq + len > sender.td_maxend
411 td_end = max(seq + len) seen in sent packets 411 td_end = max(seq + len) seen in sent packets
412 412
413 I. Upper bound for valid data: seq <= sender.td_maxend 413 I. Upper bound for valid data: seq <= sender.td_maxend
414 II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin 414 II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
415 III. Upper bound for valid ack: sack <= receiver.td_end 415 III. Upper bound for valid ack: sack <= receiver.td_end
416 IV. Lower bound for valid ack: ack >= receiver.td_end - MAXACKWINDOW 416 IV. Lower bound for valid ack: ack >= receiver.td_end - MAXACKWINDOW
417 417
418 where sack is the highest right edge of sack block found in the packet. 418 where sack is the highest right edge of sack block found in the packet.
419 419
420 The upper bound limit for a valid ack is not ignored - 420 The upper bound limit for a valid ack is not ignored -
421 we doesn't have to deal with fragments. 421 we doesn't have to deal with fragments.
422*/ 422*/
423 423
424static inline __u32 segment_seq_plus_len(__u32 seq, 424static inline __u32 segment_seq_plus_len(__u32 seq,
@@ -429,25 +429,25 @@ static inline __u32 segment_seq_plus_len(__u32 seq,
429 return (seq + len - (iph->ihl + tcph->doff)*4 429 return (seq + len - (iph->ihl + tcph->doff)*4
430 + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0)); 430 + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
431} 431}
432 432
433/* Fixme: what about big packets? */ 433/* Fixme: what about big packets? */
434#define MAXACKWINCONST 66000 434#define MAXACKWINCONST 66000
435#define MAXACKWINDOW(sender) \ 435#define MAXACKWINDOW(sender) \
436 ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \ 436 ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
437 : MAXACKWINCONST) 437 : MAXACKWINCONST)
438 438
439/* 439/*
440 * Simplified tcp_parse_options routine from tcp_input.c 440 * Simplified tcp_parse_options routine from tcp_input.c
441 */ 441 */
442static void tcp_options(const struct sk_buff *skb, 442static void tcp_options(const struct sk_buff *skb,
443 struct iphdr *iph, 443 struct iphdr *iph,
444 struct tcphdr *tcph, 444 struct tcphdr *tcph,
445 struct ip_ct_tcp_state *state) 445 struct ip_ct_tcp_state *state)
446{ 446{
447 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; 447 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
448 unsigned char *ptr; 448 unsigned char *ptr;
449 int length = (tcph->doff*4) - sizeof(struct tcphdr); 449 int length = (tcph->doff*4) - sizeof(struct tcphdr);
450 450
451 if (!length) 451 if (!length)
452 return; 452 return;
453 453
@@ -456,13 +456,13 @@ static void tcp_options(const struct sk_buff *skb,
456 length, buff); 456 length, buff);
457 BUG_ON(ptr == NULL); 457 BUG_ON(ptr == NULL);
458 458
459 state->td_scale = 459 state->td_scale =
460 state->flags = 0; 460 state->flags = 0;
461 461
462 while (length > 0) { 462 while (length > 0) {
463 int opcode=*ptr++; 463 int opcode=*ptr++;
464 int opsize; 464 int opsize;
465 465
466 switch (opcode) { 466 switch (opcode) {
467 case TCPOPT_EOL: 467 case TCPOPT_EOL:
468 return; 468 return;
@@ -476,13 +476,13 @@ static void tcp_options(const struct sk_buff *skb,
476 if (opsize > length) 476 if (opsize > length)
477 break; /* don't parse partial options */ 477 break; /* don't parse partial options */
478 478
479 if (opcode == TCPOPT_SACK_PERM 479 if (opcode == TCPOPT_SACK_PERM
480 && opsize == TCPOLEN_SACK_PERM) 480 && opsize == TCPOLEN_SACK_PERM)
481 state->flags |= IP_CT_TCP_FLAG_SACK_PERM; 481 state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
482 else if (opcode == TCPOPT_WINDOW 482 else if (opcode == TCPOPT_WINDOW
483 && opsize == TCPOLEN_WINDOW) { 483 && opsize == TCPOLEN_WINDOW) {
484 state->td_scale = *(u_int8_t *)ptr; 484 state->td_scale = *(u_int8_t *)ptr;
485 485
486 if (state->td_scale > 14) { 486 if (state->td_scale > 14) {
487 /* See RFC1323 */ 487 /* See RFC1323 */
488 state->td_scale = 14; 488 state->td_scale = 14;
@@ -517,16 +517,16 @@ static void tcp_sack(const struct sk_buff *skb,
517 /* Fast path for timestamp-only option */ 517 /* Fast path for timestamp-only option */
518 if (length == TCPOLEN_TSTAMP_ALIGNED*4 518 if (length == TCPOLEN_TSTAMP_ALIGNED*4
519 && *(__be32 *)ptr == 519 && *(__be32 *)ptr ==
520 __constant_htonl((TCPOPT_NOP << 24) 520 __constant_htonl((TCPOPT_NOP << 24)
521 | (TCPOPT_NOP << 16) 521 | (TCPOPT_NOP << 16)
522 | (TCPOPT_TIMESTAMP << 8) 522 | (TCPOPT_TIMESTAMP << 8)
523 | TCPOLEN_TIMESTAMP)) 523 | TCPOLEN_TIMESTAMP))
524 return; 524 return;
525 525
526 while (length > 0) { 526 while (length > 0) {
527 int opcode=*ptr++; 527 int opcode=*ptr++;
528 int opsize, i; 528 int opsize, i;
529 529
530 switch (opcode) { 530 switch (opcode) {
531 case TCPOPT_EOL: 531 case TCPOPT_EOL:
532 return; 532 return;
@@ -540,16 +540,16 @@ static void tcp_sack(const struct sk_buff *skb,
540 if (opsize > length) 540 if (opsize > length)
541 break; /* don't parse partial options */ 541 break; /* don't parse partial options */
542 542
543 if (opcode == TCPOPT_SACK 543 if (opcode == TCPOPT_SACK
544 && opsize >= (TCPOLEN_SACK_BASE 544 && opsize >= (TCPOLEN_SACK_BASE
545 + TCPOLEN_SACK_PERBLOCK) 545 + TCPOLEN_SACK_PERBLOCK)
546 && !((opsize - TCPOLEN_SACK_BASE) 546 && !((opsize - TCPOLEN_SACK_BASE)
547 % TCPOLEN_SACK_PERBLOCK)) { 547 % TCPOLEN_SACK_PERBLOCK)) {
548 for (i = 0; 548 for (i = 0;
549 i < (opsize - TCPOLEN_SACK_BASE); 549 i < (opsize - TCPOLEN_SACK_BASE);
550 i += TCPOLEN_SACK_PERBLOCK) { 550 i += TCPOLEN_SACK_PERBLOCK) {
551 tmp = ntohl(*((__be32 *)(ptr+i)+1)); 551 tmp = ntohl(*((__be32 *)(ptr+i)+1));
552 552
553 if (after(tmp, *sack)) 553 if (after(tmp, *sack))
554 *sack = tmp; 554 *sack = tmp;
555 } 555 }
@@ -561,18 +561,18 @@ static void tcp_sack(const struct sk_buff *skb,
561 } 561 }
562} 562}
563 563
564static int tcp_in_window(struct ip_ct_tcp *state, 564static int tcp_in_window(struct ip_ct_tcp *state,
565 enum ip_conntrack_dir dir, 565 enum ip_conntrack_dir dir,
566 unsigned int index, 566 unsigned int index,
567 const struct sk_buff *skb, 567 const struct sk_buff *skb,
568 struct iphdr *iph, 568 struct iphdr *iph,
569 struct tcphdr *tcph) 569 struct tcphdr *tcph)
570{ 570{
571 struct ip_ct_tcp_state *sender = &state->seen[dir]; 571 struct ip_ct_tcp_state *sender = &state->seen[dir];
572 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 572 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
573 __u32 seq, ack, sack, end, win, swin; 573 __u32 seq, ack, sack, end, win, swin;
574 int res; 574 int res;
575 575
576 /* 576 /*
577 * Get the required data from the packet. 577 * Get the required data from the packet.
578 */ 578 */
@@ -580,23 +580,23 @@ static int tcp_in_window(struct ip_ct_tcp *state,
580 ack = sack = ntohl(tcph->ack_seq); 580 ack = sack = ntohl(tcph->ack_seq);
581 win = ntohs(tcph->window); 581 win = ntohs(tcph->window);
582 end = segment_seq_plus_len(seq, skb->len, iph, tcph); 582 end = segment_seq_plus_len(seq, skb->len, iph, tcph);
583 583
584 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) 584 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
585 tcp_sack(skb, iph, tcph, &sack); 585 tcp_sack(skb, iph, tcph, &sack);
586 586
587 DEBUGP("tcp_in_window: START\n"); 587 DEBUGP("tcp_in_window: START\n");
588 DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " 588 DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
589 "seq=%u ack=%u sack=%u win=%u end=%u\n", 589 "seq=%u ack=%u sack=%u win=%u end=%u\n",
590 NIPQUAD(iph->saddr), ntohs(tcph->source), 590 NIPQUAD(iph->saddr), ntohs(tcph->source),
591 NIPQUAD(iph->daddr), ntohs(tcph->dest), 591 NIPQUAD(iph->daddr), ntohs(tcph->dest),
592 seq, ack, sack, win, end); 592 seq, ack, sack, win, end);
593 DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " 593 DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
594 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 594 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
595 sender->td_end, sender->td_maxend, sender->td_maxwin, 595 sender->td_end, sender->td_maxend, sender->td_maxwin,
596 sender->td_scale, 596 sender->td_scale,
597 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 597 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
598 receiver->td_scale); 598 receiver->td_scale);
599 599
600 if (sender->td_end == 0) { 600 if (sender->td_end == 0) {
601 /* 601 /*
602 * Initialize sender data. 602 * Initialize sender data.
@@ -605,26 +605,26 @@ static int tcp_in_window(struct ip_ct_tcp *state,
605 /* 605 /*
606 * Outgoing SYN-ACK in reply to a SYN. 606 * Outgoing SYN-ACK in reply to a SYN.
607 */ 607 */
608 sender->td_end = 608 sender->td_end =
609 sender->td_maxend = end; 609 sender->td_maxend = end;
610 sender->td_maxwin = (win == 0 ? 1 : win); 610 sender->td_maxwin = (win == 0 ? 1 : win);
611 611
612 tcp_options(skb, iph, tcph, sender); 612 tcp_options(skb, iph, tcph, sender);
613 /* 613 /*
614 * RFC 1323: 614 * RFC 1323:
615 * Both sides must send the Window Scale option 615 * Both sides must send the Window Scale option
616 * to enable window scaling in either direction. 616 * to enable window scaling in either direction.
617 */ 617 */
618 if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE 618 if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
619 && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) 619 && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
620 sender->td_scale = 620 sender->td_scale =
621 receiver->td_scale = 0; 621 receiver->td_scale = 0;
622 } else { 622 } else {
623 /* 623 /*
624 * We are in the middle of a connection, 624 * We are in the middle of a connection,
625 * its history is lost for us. 625 * its history is lost for us.
626 * Let's try to use the data from the packet. 626 * Let's try to use the data from the packet.
627 */ 627 */
628 sender->td_end = end; 628 sender->td_end = end;
629 sender->td_maxwin = (win == 0 ? 1 : win); 629 sender->td_maxwin = (win == 0 ? 1 : win);
630 sender->td_maxend = end + sender->td_maxwin; 630 sender->td_maxend = end + sender->td_maxwin;
@@ -632,11 +632,11 @@ static int tcp_in_window(struct ip_ct_tcp *state,
632 } else if (((state->state == TCP_CONNTRACK_SYN_SENT 632 } else if (((state->state == TCP_CONNTRACK_SYN_SENT
633 && dir == IP_CT_DIR_ORIGINAL) 633 && dir == IP_CT_DIR_ORIGINAL)
634 || (state->state == TCP_CONNTRACK_SYN_RECV 634 || (state->state == TCP_CONNTRACK_SYN_RECV
635 && dir == IP_CT_DIR_REPLY)) 635 && dir == IP_CT_DIR_REPLY))
636 && after(end, sender->td_end)) { 636 && after(end, sender->td_end)) {
637 /* 637 /*
638 * RFC 793: "if a TCP is reinitialized ... then it need 638 * RFC 793: "if a TCP is reinitialized ... then it need
639 * not wait at all; it must only be sure to use sequence 639 * not wait at all; it must only be sure to use sequence
640 * numbers larger than those recently used." 640 * numbers larger than those recently used."
641 */ 641 */
642 sender->td_end = 642 sender->td_end =
@@ -645,14 +645,14 @@ static int tcp_in_window(struct ip_ct_tcp *state,
645 645
646 tcp_options(skb, iph, tcph, sender); 646 tcp_options(skb, iph, tcph, sender);
647 } 647 }
648 648
649 if (!(tcph->ack)) { 649 if (!(tcph->ack)) {
650 /* 650 /*
651 * If there is no ACK, just pretend it was set and OK. 651 * If there is no ACK, just pretend it was set and OK.
652 */ 652 */
653 ack = sack = receiver->td_end; 653 ack = sack = receiver->td_end;
654 } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) == 654 } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
655 (TCP_FLAG_ACK|TCP_FLAG_RST)) 655 (TCP_FLAG_ACK|TCP_FLAG_RST))
656 && (ack == 0)) { 656 && (ack == 0)) {
657 /* 657 /*
658 * Broken TCP stacks, that set ACK in RST packets as well 658 * Broken TCP stacks, that set ACK in RST packets as well
@@ -662,8 +662,8 @@ static int tcp_in_window(struct ip_ct_tcp *state,
662 } 662 }
663 663
664 if (seq == end 664 if (seq == end
665 && (!tcph->rst 665 && (!tcph->rst
666 || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) 666 || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
667 /* 667 /*
668 * Packets contains no data: we assume it is valid 668 * Packets contains no data: we assume it is valid
669 * and check the ack value only. 669 * and check the ack value only.
@@ -672,7 +672,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
672 * SYN. 672 * SYN.
673 */ 673 */
674 seq = end = sender->td_end; 674 seq = end = sender->td_end;
675 675
676 DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " 676 DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
677 "seq=%u ack=%u sack =%u win=%u end=%u\n", 677 "seq=%u ack=%u sack =%u win=%u end=%u\n",
678 NIPQUAD(iph->saddr), ntohs(tcph->source), 678 NIPQUAD(iph->saddr), ntohs(tcph->source),
@@ -681,26 +681,26 @@ static int tcp_in_window(struct ip_ct_tcp *state,
681 DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " 681 DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
682 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 682 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
683 sender->td_end, sender->td_maxend, sender->td_maxwin, 683 sender->td_end, sender->td_maxend, sender->td_maxwin,
684 sender->td_scale, 684 sender->td_scale,
685 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 685 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
686 receiver->td_scale); 686 receiver->td_scale);
687 687
688 DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n", 688 DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
689 before(seq, sender->td_maxend + 1), 689 before(seq, sender->td_maxend + 1),
690 after(end, sender->td_end - receiver->td_maxwin - 1), 690 after(end, sender->td_end - receiver->td_maxwin - 1),
691 before(sack, receiver->td_end + 1), 691 before(sack, receiver->td_end + 1),
692 after(ack, receiver->td_end - MAXACKWINDOW(sender))); 692 after(ack, receiver->td_end - MAXACKWINDOW(sender)));
693 693
694 if (before(seq, sender->td_maxend + 1) && 694 if (before(seq, sender->td_maxend + 1) &&
695 after(end, sender->td_end - receiver->td_maxwin - 1) && 695 after(end, sender->td_end - receiver->td_maxwin - 1) &&
696 before(sack, receiver->td_end + 1) && 696 before(sack, receiver->td_end + 1) &&
697 after(ack, receiver->td_end - MAXACKWINDOW(sender))) { 697 after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
698 /* 698 /*
699 * Take into account window scaling (RFC 1323). 699 * Take into account window scaling (RFC 1323).
700 */ 700 */
701 if (!tcph->syn) 701 if (!tcph->syn)
702 win <<= sender->td_scale; 702 win <<= sender->td_scale;
703 703
704 /* 704 /*
705 * Update sender data. 705 * Update sender data.
706 */ 706 */
@@ -720,7 +720,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
720 receiver->td_maxend++; 720 receiver->td_maxend++;
721 } 721 }
722 722
723 /* 723 /*
724 * Check retransmissions. 724 * Check retransmissions.
725 */ 725 */
726 if (index == TCP_ACK_SET) { 726 if (index == TCP_ACK_SET) {
@@ -756,11 +756,11 @@ static int tcp_in_window(struct ip_ct_tcp *state,
756 : "ACK is over the upper bound (ACKed data not seen yet)" 756 : "ACK is over the upper bound (ACKed data not seen yet)"
757 : "SEQ is under the lower bound (already ACKed data retransmitted)" 757 : "SEQ is under the lower bound (already ACKed data retransmitted)"
758 : "SEQ is over the upper bound (over the window of the receiver)"); 758 : "SEQ is over the upper bound (over the window of the receiver)");
759 } 759 }
760 760
761 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " 761 DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
762 "receiver end=%u maxend=%u maxwin=%u\n", 762 "receiver end=%u maxend=%u maxwin=%u\n",
763 res, sender->td_end, sender->td_maxend, sender->td_maxwin, 763 res, sender->td_end, sender->td_maxend, sender->td_maxwin,
764 receiver->td_end, receiver->td_maxend, receiver->td_maxwin); 764 receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
765 765
766 return res; 766 return res;
@@ -769,7 +769,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
769#ifdef CONFIG_IP_NF_NAT_NEEDED 769#ifdef CONFIG_IP_NF_NAT_NEEDED
770/* Update sender->td_end after NAT successfully mangled the packet */ 770/* Update sender->td_end after NAT successfully mangled the packet */
771void ip_conntrack_tcp_update(struct sk_buff *skb, 771void ip_conntrack_tcp_update(struct sk_buff *skb,
772 struct ip_conntrack *conntrack, 772 struct ip_conntrack *conntrack,
773 enum ip_conntrack_dir dir) 773 enum ip_conntrack_dir dir)
774{ 774{
775 struct iphdr *iph = skb->nh.iph; 775 struct iphdr *iph = skb->nh.iph;
@@ -781,7 +781,7 @@ void ip_conntrack_tcp_update(struct sk_buff *skb,
781#endif 781#endif
782 782
783 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph); 783 end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph);
784 784
785 write_lock_bh(&tcp_lock); 785 write_lock_bh(&tcp_lock);
786 /* 786 /*
787 * We have to worry for the ack in the reply packet only... 787 * We have to worry for the ack in the reply packet only...
@@ -793,11 +793,11 @@ void ip_conntrack_tcp_update(struct sk_buff *skb,
793 DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " 793 DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
794 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 794 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
795 sender->td_end, sender->td_maxend, sender->td_maxwin, 795 sender->td_end, sender->td_maxend, sender->td_maxwin,
796 sender->td_scale, 796 sender->td_scale,
797 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 797 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
798 receiver->td_scale); 798 receiver->td_scale);
799} 799}
800 800
801#endif 801#endif
802 802
803#define TH_FIN 0x01 803#define TH_FIN 0x01
@@ -847,8 +847,8 @@ static int tcp_error(struct sk_buff *skb,
847 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 847 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
848 "ip_ct_tcp: short packet "); 848 "ip_ct_tcp: short packet ");
849 return -NF_ACCEPT; 849 return -NF_ACCEPT;
850 } 850 }
851 851
852 /* Not whole TCP header or malformed packet */ 852 /* Not whole TCP header or malformed packet */
853 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { 853 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
854 if (LOG_INVALID(IPPROTO_TCP)) 854 if (LOG_INVALID(IPPROTO_TCP))
@@ -856,7 +856,7 @@ static int tcp_error(struct sk_buff *skb,
856 "ip_ct_tcp: truncated/malformed packet "); 856 "ip_ct_tcp: truncated/malformed packet ");
857 return -NF_ACCEPT; 857 return -NF_ACCEPT;
858 } 858 }
859 859
860 /* Checksum invalid? Ignore. 860 /* Checksum invalid? Ignore.
861 * We skip checking packets on the outgoing path 861 * We skip checking packets on the outgoing path
862 * because it is assumed to be correct. 862 * because it is assumed to be correct.
@@ -893,11 +893,11 @@ static int tcp_packet(struct ip_conntrack *conntrack,
893 struct tcphdr *th, _tcph; 893 struct tcphdr *th, _tcph;
894 unsigned long timeout; 894 unsigned long timeout;
895 unsigned int index; 895 unsigned int index;
896 896
897 th = skb_header_pointer(skb, iph->ihl * 4, 897 th = skb_header_pointer(skb, iph->ihl * 4,
898 sizeof(_tcph), &_tcph); 898 sizeof(_tcph), &_tcph);
899 BUG_ON(th == NULL); 899 BUG_ON(th == NULL);
900 900
901 write_lock_bh(&tcp_lock); 901 write_lock_bh(&tcp_lock);
902 old_state = conntrack->proto.tcp.state; 902 old_state = conntrack->proto.tcp.state;
903 dir = CTINFO2DIR(ctinfo); 903 dir = CTINFO2DIR(ctinfo);
@@ -907,7 +907,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
907 switch (new_state) { 907 switch (new_state) {
908 case TCP_CONNTRACK_IGNORE: 908 case TCP_CONNTRACK_IGNORE:
909 /* Ignored packets: 909 /* Ignored packets:
910 * 910 *
911 * a) SYN in ORIGINAL 911 * a) SYN in ORIGINAL
912 * b) SYN/ACK in REPLY 912 * b) SYN/ACK in REPLY
913 * c) ACK in reply direction after initial SYN in original. 913 * c) ACK in reply direction after initial SYN in original.
@@ -916,30 +916,30 @@ static int tcp_packet(struct ip_conntrack *conntrack,
916 && conntrack->proto.tcp.last_index == TCP_SYN_SET 916 && conntrack->proto.tcp.last_index == TCP_SYN_SET
917 && conntrack->proto.tcp.last_dir != dir 917 && conntrack->proto.tcp.last_dir != dir
918 && ntohl(th->ack_seq) == 918 && ntohl(th->ack_seq) ==
919 conntrack->proto.tcp.last_end) { 919 conntrack->proto.tcp.last_end) {
920 /* This SYN/ACK acknowledges a SYN that we earlier 920 /* This SYN/ACK acknowledges a SYN that we earlier
921 * ignored as invalid. This means that the client and 921 * ignored as invalid. This means that the client and
922 * the server are both in sync, while the firewall is 922 * the server are both in sync, while the firewall is
923 * not. We kill this session and block the SYN/ACK so 923 * not. We kill this session and block the SYN/ACK so
924 * that the client cannot but retransmit its SYN and 924 * that the client cannot but retransmit its SYN and
925 * thus initiate a clean new session. 925 * thus initiate a clean new session.
926 */ 926 */
927 write_unlock_bh(&tcp_lock); 927 write_unlock_bh(&tcp_lock);
928 if (LOG_INVALID(IPPROTO_TCP)) 928 if (LOG_INVALID(IPPROTO_TCP))
929 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 929 nf_log_packet(PF_INET, 0, skb, NULL, NULL,
930 NULL, "ip_ct_tcp: " 930 NULL, "ip_ct_tcp: "
931 "killing out of sync session "); 931 "killing out of sync session ");
932 if (del_timer(&conntrack->timeout)) 932 if (del_timer(&conntrack->timeout))
933 conntrack->timeout.function((unsigned long) 933 conntrack->timeout.function((unsigned long)
934 conntrack); 934 conntrack);
935 return -NF_DROP; 935 return -NF_DROP;
936 } 936 }
937 conntrack->proto.tcp.last_index = index; 937 conntrack->proto.tcp.last_index = index;
938 conntrack->proto.tcp.last_dir = dir; 938 conntrack->proto.tcp.last_dir = dir;
939 conntrack->proto.tcp.last_seq = ntohl(th->seq); 939 conntrack->proto.tcp.last_seq = ntohl(th->seq);
940 conntrack->proto.tcp.last_end = 940 conntrack->proto.tcp.last_end =
941 segment_seq_plus_len(ntohl(th->seq), skb->len, iph, th); 941 segment_seq_plus_len(ntohl(th->seq), skb->len, iph, th);
942 942
943 write_unlock_bh(&tcp_lock); 943 write_unlock_bh(&tcp_lock);
944 if (LOG_INVALID(IPPROTO_TCP)) 944 if (LOG_INVALID(IPPROTO_TCP))
945 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 945 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
@@ -959,16 +959,16 @@ static int tcp_packet(struct ip_conntrack *conntrack,
959 if (old_state < TCP_CONNTRACK_TIME_WAIT) 959 if (old_state < TCP_CONNTRACK_TIME_WAIT)
960 break; 960 break;
961 if ((conntrack->proto.tcp.seen[dir].flags & 961 if ((conntrack->proto.tcp.seen[dir].flags &
962 IP_CT_TCP_FLAG_CLOSE_INIT) 962 IP_CT_TCP_FLAG_CLOSE_INIT)
963 || after(ntohl(th->seq), 963 || after(ntohl(th->seq),
964 conntrack->proto.tcp.seen[dir].td_end)) { 964 conntrack->proto.tcp.seen[dir].td_end)) {
965 /* Attempt to reopen a closed connection. 965 /* Attempt to reopen a closed connection.
966 * Delete this connection and look up again. */ 966 * Delete this connection and look up again. */
967 write_unlock_bh(&tcp_lock); 967 write_unlock_bh(&tcp_lock);
968 if (del_timer(&conntrack->timeout)) 968 if (del_timer(&conntrack->timeout))
969 conntrack->timeout.function((unsigned long) 969 conntrack->timeout.function((unsigned long)
970 conntrack); 970 conntrack);
971 return -NF_REPEAT; 971 return -NF_REPEAT;
972 } else { 972 } else {
973 write_unlock_bh(&tcp_lock); 973 write_unlock_bh(&tcp_lock);
974 if (LOG_INVALID(IPPROTO_TCP)) 974 if (LOG_INVALID(IPPROTO_TCP))
@@ -979,9 +979,9 @@ static int tcp_packet(struct ip_conntrack *conntrack,
979 case TCP_CONNTRACK_CLOSE: 979 case TCP_CONNTRACK_CLOSE:
980 if (index == TCP_RST_SET 980 if (index == TCP_RST_SET
981 && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) 981 && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
982 && conntrack->proto.tcp.last_index == TCP_SYN_SET) 982 && conntrack->proto.tcp.last_index == TCP_SYN_SET)
983 || (!test_bit(IPS_ASSURED_BIT, &conntrack->status) 983 || (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
984 && conntrack->proto.tcp.last_index == TCP_ACK_SET)) 984 && conntrack->proto.tcp.last_index == TCP_ACK_SET))
985 && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) { 985 && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) {
986 /* RST sent to invalid SYN or ACK we had let through 986 /* RST sent to invalid SYN or ACK we had let through
987 * at a) and c) above: 987 * at a) and c) above:
@@ -1000,13 +1000,13 @@ static int tcp_packet(struct ip_conntrack *conntrack,
1000 break; 1000 break;
1001 } 1001 }
1002 1002
1003 if (!tcp_in_window(&conntrack->proto.tcp, dir, index, 1003 if (!tcp_in_window(&conntrack->proto.tcp, dir, index,
1004 skb, iph, th)) { 1004 skb, iph, th)) {
1005 write_unlock_bh(&tcp_lock); 1005 write_unlock_bh(&tcp_lock);
1006 return -NF_ACCEPT; 1006 return -NF_ACCEPT;
1007 } 1007 }
1008 in_window: 1008 in_window:
1009 /* From now on we have got in-window packets */ 1009 /* From now on we have got in-window packets */
1010 conntrack->proto.tcp.last_index = index; 1010 conntrack->proto.tcp.last_index = index;
1011 1011
1012 DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " 1012 DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
@@ -1018,9 +1018,9 @@ static int tcp_packet(struct ip_conntrack *conntrack,
1018 old_state, new_state); 1018 old_state, new_state);
1019 1019
1020 conntrack->proto.tcp.state = new_state; 1020 conntrack->proto.tcp.state = new_state;
1021 if (old_state != new_state 1021 if (old_state != new_state
1022 && (new_state == TCP_CONNTRACK_FIN_WAIT 1022 && (new_state == TCP_CONNTRACK_FIN_WAIT
1023 || new_state == TCP_CONNTRACK_CLOSE)) 1023 || new_state == TCP_CONNTRACK_CLOSE))
1024 conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; 1024 conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1025 timeout = conntrack->proto.tcp.retrans >= ip_ct_tcp_max_retrans 1025 timeout = conntrack->proto.tcp.retrans >= ip_ct_tcp_max_retrans
1026 && *tcp_timeouts[new_state] > ip_ct_tcp_timeout_max_retrans 1026 && *tcp_timeouts[new_state] > ip_ct_tcp_timeout_max_retrans
@@ -1046,8 +1046,8 @@ static int tcp_packet(struct ip_conntrack *conntrack,
1046 && (old_state == TCP_CONNTRACK_SYN_RECV 1046 && (old_state == TCP_CONNTRACK_SYN_RECV
1047 || old_state == TCP_CONNTRACK_ESTABLISHED) 1047 || old_state == TCP_CONNTRACK_ESTABLISHED)
1048 && new_state == TCP_CONNTRACK_ESTABLISHED) { 1048 && new_state == TCP_CONNTRACK_ESTABLISHED) {
1049 /* Set ASSURED if we see see valid ack in ESTABLISHED 1049 /* Set ASSURED if we see see valid ack in ESTABLISHED
1050 after SYN_RECV or a valid answer for a picked up 1050 after SYN_RECV or a valid answer for a picked up
1051 connection. */ 1051 connection. */
1052 set_bit(IPS_ASSURED_BIT, &conntrack->status); 1052 set_bit(IPS_ASSURED_BIT, &conntrack->status);
1053 ip_conntrack_event_cache(IPCT_STATUS, skb); 1053 ip_conntrack_event_cache(IPCT_STATUS, skb);
@@ -1056,7 +1056,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
1056 1056
1057 return NF_ACCEPT; 1057 return NF_ACCEPT;
1058} 1058}
1059 1059
1060/* Called when a new connection for this protocol found. */ 1060/* Called when a new connection for this protocol found. */
1061static int tcp_new(struct ip_conntrack *conntrack, 1061static int tcp_new(struct ip_conntrack *conntrack,
1062 const struct sk_buff *skb) 1062 const struct sk_buff *skb)
@@ -1072,7 +1072,7 @@ static int tcp_new(struct ip_conntrack *conntrack,
1072 th = skb_header_pointer(skb, iph->ihl * 4, 1072 th = skb_header_pointer(skb, iph->ihl * 4,
1073 sizeof(_tcph), &_tcph); 1073 sizeof(_tcph), &_tcph);
1074 BUG_ON(th == NULL); 1074 BUG_ON(th == NULL);
1075 1075
1076 /* Don't need lock here: this conntrack not in circulation yet */ 1076 /* Don't need lock here: this conntrack not in circulation yet */
1077 new_state 1077 new_state
1078 = tcp_conntracks[0][get_conntrack_index(th)] 1078 = tcp_conntracks[0][get_conntrack_index(th)]
@@ -1113,7 +1113,7 @@ static int tcp_new(struct ip_conntrack *conntrack,
1113 if (conntrack->proto.tcp.seen[0].td_maxwin == 0) 1113 if (conntrack->proto.tcp.seen[0].td_maxwin == 0)
1114 conntrack->proto.tcp.seen[0].td_maxwin = 1; 1114 conntrack->proto.tcp.seen[0].td_maxwin = 1;
1115 conntrack->proto.tcp.seen[0].td_maxend = 1115 conntrack->proto.tcp.seen[0].td_maxend =
1116 conntrack->proto.tcp.seen[0].td_end + 1116 conntrack->proto.tcp.seen[0].td_end +
1117 conntrack->proto.tcp.seen[0].td_maxwin; 1117 conntrack->proto.tcp.seen[0].td_maxwin;
1118 conntrack->proto.tcp.seen[0].td_scale = 0; 1118 conntrack->proto.tcp.seen[0].td_scale = 0;
1119 1119
@@ -1123,25 +1123,25 @@ static int tcp_new(struct ip_conntrack *conntrack,
1123 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | 1123 conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1124 IP_CT_TCP_FLAG_BE_LIBERAL; 1124 IP_CT_TCP_FLAG_BE_LIBERAL;
1125 } 1125 }
1126 1126
1127 conntrack->proto.tcp.seen[1].td_end = 0; 1127 conntrack->proto.tcp.seen[1].td_end = 0;
1128 conntrack->proto.tcp.seen[1].td_maxend = 0; 1128 conntrack->proto.tcp.seen[1].td_maxend = 0;
1129 conntrack->proto.tcp.seen[1].td_maxwin = 1; 1129 conntrack->proto.tcp.seen[1].td_maxwin = 1;
1130 conntrack->proto.tcp.seen[1].td_scale = 0; 1130 conntrack->proto.tcp.seen[1].td_scale = 0;
1131 1131
1132 /* tcp_packet will set them */ 1132 /* tcp_packet will set them */
1133 conntrack->proto.tcp.state = TCP_CONNTRACK_NONE; 1133 conntrack->proto.tcp.state = TCP_CONNTRACK_NONE;
1134 conntrack->proto.tcp.last_index = TCP_NONE_SET; 1134 conntrack->proto.tcp.last_index = TCP_NONE_SET;
1135 1135
1136 DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " 1136 DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
1137 "receiver end=%u maxend=%u maxwin=%u scale=%i\n", 1137 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
1138 sender->td_end, sender->td_maxend, sender->td_maxwin, 1138 sender->td_end, sender->td_maxend, sender->td_maxwin,
1139 sender->td_scale, 1139 sender->td_scale,
1140 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 1140 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
1141 receiver->td_scale); 1141 receiver->td_scale);
1142 return 1; 1142 return 1;
1143} 1143}
1144 1144
1145struct ip_conntrack_protocol ip_conntrack_protocol_tcp = 1145struct ip_conntrack_protocol ip_conntrack_protocol_tcp =
1146{ 1146{
1147 .proto = IPPROTO_TCP, 1147 .proto = IPPROTO_TCP,
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
index d0e8a16970ec..a99a7c75e5b5 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
@@ -70,7 +70,7 @@ static int udp_packet(struct ip_conntrack *conntrack,
70 /* If we've seen traffic both ways, this is some kind of UDP 70 /* If we've seen traffic both ways, this is some kind of UDP
71 stream. Extend timeout. */ 71 stream. Extend timeout. */
72 if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { 72 if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
73 ip_ct_refresh_acct(conntrack, ctinfo, skb, 73 ip_ct_refresh_acct(conntrack, ctinfo, skb,
74 ip_ct_udp_timeout_stream); 74 ip_ct_udp_timeout_stream);
75 /* Also, more likely to be important, and not a probe */ 75 /* Also, more likely to be important, and not a probe */
76 if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status)) 76 if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status))
@@ -102,7 +102,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
102 "ip_ct_udp: short packet "); 102 "ip_ct_udp: short packet ");
103 return -NF_ACCEPT; 103 return -NF_ACCEPT;
104 } 104 }
105 105
106 /* Truncated/malformed packets */ 106 /* Truncated/malformed packets */
107 if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { 107 if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
108 if (LOG_INVALID(IPPROTO_UDP)) 108 if (LOG_INVALID(IPPROTO_UDP))
@@ -110,7 +110,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
110 "ip_ct_udp: truncated/malformed packet "); 110 "ip_ct_udp: truncated/malformed packet ");
111 return -NF_ACCEPT; 111 return -NF_ACCEPT;
112 } 112 }
113 113
114 /* Packet with no checksum */ 114 /* Packet with no checksum */
115 if (!hdr->check) 115 if (!hdr->check)
116 return NF_ACCEPT; 116 return NF_ACCEPT;
@@ -126,7 +126,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
126 "ip_ct_udp: bad UDP checksum "); 126 "ip_ct_udp: bad UDP checksum ");
127 return -NF_ACCEPT; 127 return -NF_ACCEPT;
128 } 128 }
129 129
130 return NF_ACCEPT; 130 return NF_ACCEPT;
131} 131}
132 132
diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c
index 11c588a10e6b..c59a962c1f61 100644
--- a/net/ipv4/netfilter/ip_conntrack_sip.c
+++ b/net/ipv4/netfilter/ip_conntrack_sip.c
@@ -321,7 +321,7 @@ int ct_sip_get_info(const char *dptr, size_t dlen,
321 continue; 321 continue;
322 } 322 }
323 aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, 323 aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen,
324 ct_sip_lnlen(dptr, limit), 324 ct_sip_lnlen(dptr, limit),
325 hnfo->case_sensitive); 325 hnfo->case_sensitive);
326 if (!aux) { 326 if (!aux) {
327 DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, 327 DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str,
@@ -406,7 +406,7 @@ static int sip_help(struct sk_buff **pskb,
406 if (dataoff >= (*pskb)->len) { 406 if (dataoff >= (*pskb)->len) {
407 DEBUGP("skb->len = %u\n", (*pskb)->len); 407 DEBUGP("skb->len = %u\n", (*pskb)->len);
408 return NF_ACCEPT; 408 return NF_ACCEPT;
409 } 409 }
410 410
411 ip_ct_refresh(ct, *pskb, sip_timeout * HZ); 411 ip_ct_refresh(ct, *pskb, sip_timeout * HZ);
412 412
@@ -439,16 +439,16 @@ static int sip_help(struct sk_buff **pskb,
439 } 439 }
440 /* Get ip and port address from SDP packet. */ 440 /* Get ip and port address from SDP packet. */
441 if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, 441 if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen,
442 POS_CONNECTION) > 0) { 442 POS_CONNECTION) > 0) {
443 443
444 /* We'll drop only if there are parse problems. */ 444 /* We'll drop only if there are parse problems. */
445 if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr, 445 if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr,
446 dptr + datalen) < 0) { 446 dptr + datalen) < 0) {
447 ret = NF_DROP; 447 ret = NF_DROP;
448 goto out; 448 goto out;
449 } 449 }
450 if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, 450 if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen,
451 POS_MEDIA) > 0) { 451 POS_MEDIA) > 0) {
452 452
453 port = simple_strtoul(dptr + matchoff, NULL, 10); 453 port = simple_strtoul(dptr + matchoff, NULL, 10);
454 if (port < 1024) { 454 if (port < 1024) {
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 86efb5449676..5903588fddce 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -46,7 +46,7 @@ DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
46 46
47static int kill_proto(struct ip_conntrack *i, void *data) 47static int kill_proto(struct ip_conntrack *i, void *data)
48{ 48{
49 return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == 49 return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
50 *((u_int8_t *) data)); 50 *((u_int8_t *) data));
51} 51}
52 52
@@ -124,12 +124,12 @@ static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
124 (*pos)++; 124 (*pos)++;
125 return ct_get_next(s, v); 125 return ct_get_next(s, v);
126} 126}
127 127
128static void ct_seq_stop(struct seq_file *s, void *v) 128static void ct_seq_stop(struct seq_file *s, void *v)
129{ 129{
130 read_unlock_bh(&ip_conntrack_lock); 130 read_unlock_bh(&ip_conntrack_lock);
131} 131}
132 132
133static int ct_seq_show(struct seq_file *s, void *v) 133static int ct_seq_show(struct seq_file *s, void *v)
134{ 134{
135 const struct ip_conntrack_tuple_hash *hash = v; 135 const struct ip_conntrack_tuple_hash *hash = v;
@@ -155,12 +155,12 @@ static int ct_seq_show(struct seq_file *s, void *v)
155 155
156 if (proto->print_conntrack(s, conntrack)) 156 if (proto->print_conntrack(s, conntrack))
157 return -ENOSPC; 157 return -ENOSPC;
158 158
159 if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 159 if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
160 proto)) 160 proto))
161 return -ENOSPC; 161 return -ENOSPC;
162 162
163 if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL])) 163 if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
164 return -ENOSPC; 164 return -ENOSPC;
165 165
166 if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status))) 166 if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
@@ -171,7 +171,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
171 proto)) 171 proto))
172 return -ENOSPC; 172 return -ENOSPC;
173 173
174 if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY])) 174 if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
175 return -ENOSPC; 175 return -ENOSPC;
176 176
177 if (test_bit(IPS_ASSURED_BIT, &conntrack->status)) 177 if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
@@ -200,7 +200,7 @@ static struct seq_operations ct_seq_ops = {
200 .stop = ct_seq_stop, 200 .stop = ct_seq_stop,
201 .show = ct_seq_show 201 .show = ct_seq_show
202}; 202};
203 203
204static int ct_open(struct inode *inode, struct file *file) 204static int ct_open(struct inode *inode, struct file *file)
205{ 205{
206 struct seq_file *seq; 206 struct seq_file *seq;
@@ -229,7 +229,7 @@ static struct file_operations ct_file_ops = {
229 .llseek = seq_lseek, 229 .llseek = seq_lseek,
230 .release = seq_release_private, 230 .release = seq_release_private,
231}; 231};
232 232
233/* expects */ 233/* expects */
234static void *exp_seq_start(struct seq_file *s, loff_t *pos) 234static void *exp_seq_start(struct seq_file *s, loff_t *pos)
235{ 235{
@@ -253,7 +253,7 @@ static void *exp_seq_start(struct seq_file *s, loff_t *pos)
253 253
254static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) 254static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
255{ 255{
256 struct list_head *e = v; 256 struct list_head *e = v;
257 257
258 ++*pos; 258 ++*pos;
259 e = e->next; 259 e = e->next;
@@ -297,7 +297,7 @@ static int exp_open(struct inode *inode, struct file *file)
297{ 297{
298 return seq_open(file, &exp_seq_ops); 298 return seq_open(file, &exp_seq_ops);
299} 299}
300 300
301static struct file_operations exp_file_ops = { 301static struct file_operations exp_file_ops = {
302 .owner = THIS_MODULE, 302 .owner = THIS_MODULE,
303 .open = exp_open, 303 .open = exp_open,
@@ -426,14 +426,14 @@ static unsigned int ip_conntrack_help(unsigned int hooknum,
426} 426}
427 427
428static unsigned int ip_conntrack_defrag(unsigned int hooknum, 428static unsigned int ip_conntrack_defrag(unsigned int hooknum,
429 struct sk_buff **pskb, 429 struct sk_buff **pskb,
430 const struct net_device *in, 430 const struct net_device *in,
431 const struct net_device *out, 431 const struct net_device *out,
432 int (*okfn)(struct sk_buff *)) 432 int (*okfn)(struct sk_buff *))
433{ 433{
434#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE) 434#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
435 /* Previously seen (loopback)? Ignore. Do this before 435 /* Previously seen (loopback)? Ignore. Do this before
436 fragment check. */ 436 fragment check. */
437 if ((*pskb)->nfct) 437 if ((*pskb)->nfct)
438 return NF_ACCEPT; 438 return NF_ACCEPT;
439#endif 439#endif
@@ -441,7 +441,7 @@ static unsigned int ip_conntrack_defrag(unsigned int hooknum,
441 /* Gather fragments. */ 441 /* Gather fragments. */
442 if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { 442 if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
443 *pskb = ip_ct_gather_frags(*pskb, 443 *pskb = ip_ct_gather_frags(*pskb,
444 hooknum == NF_IP_PRE_ROUTING ? 444 hooknum == NF_IP_PRE_ROUTING ?
445 IP_DEFRAG_CONNTRACK_IN : 445 IP_DEFRAG_CONNTRACK_IN :
446 IP_DEFRAG_CONNTRACK_OUT); 446 IP_DEFRAG_CONNTRACK_OUT);
447 if (!*pskb) 447 if (!*pskb)
@@ -776,7 +776,7 @@ static ctl_table ip_ct_net_table[] = {
776 { 776 {
777 .ctl_name = CTL_NET, 777 .ctl_name = CTL_NET,
778 .procname = "net", 778 .procname = "net",
779 .mode = 0555, 779 .mode = 0555,
780 .child = ip_ct_ipv4_table, 780 .child = ip_ct_ipv4_table,
781 }, 781 },
782 { .ctl_name = 0 } 782 { .ctl_name = 0 }
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
index ef56de2eff0c..76e175e7a972 100644
--- a/net/ipv4/netfilter/ip_conntrack_tftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_tftp.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(ports, "port numbers of tftp servers");
33 33
34#if 0 34#if 0
35#define DEBUGP(format, args...) printk("%s:%s:" format, \ 35#define DEBUGP(format, args...) printk("%s:%s:" format, \
36 __FILE__, __FUNCTION__ , ## args) 36 __FILE__, __FUNCTION__ , ## args)
37#else 37#else
38#define DEBUGP(format, args...) 38#define DEBUGP(format, args...)
39#endif 39#endif
@@ -113,7 +113,7 @@ static void ip_conntrack_tftp_fini(void)
113 DEBUGP("unregistering helper for port %d\n", 113 DEBUGP("unregistering helper for port %d\n",
114 ports[i]); 114 ports[i]);
115 ip_conntrack_helper_unregister(&tftp[i]); 115 ip_conntrack_helper_unregister(&tftp[i]);
116 } 116 }
117} 117}
118 118
119static int __init ip_conntrack_tftp_init(void) 119static int __init ip_conntrack_tftp_init(void)
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 5e08c2bf887d..275a4d3faf0a 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -120,7 +120,7 @@ static int
120in_range(const struct ip_conntrack_tuple *tuple, 120in_range(const struct ip_conntrack_tuple *tuple,
121 const struct ip_nat_range *range) 121 const struct ip_nat_range *range)
122{ 122{
123 struct ip_nat_protocol *proto = 123 struct ip_nat_protocol *proto =
124 __ip_nat_proto_find(tuple->dst.protonum); 124 __ip_nat_proto_find(tuple->dst.protonum);
125 125
126 /* If we are supposed to map IPs, then we must be in the 126 /* If we are supposed to map IPs, then we must be in the
@@ -443,8 +443,8 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct,
443 (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); 443 (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
444 444
445 /* Redirects on non-null nats must be dropped, else they'll 445 /* Redirects on non-null nats must be dropped, else they'll
446 start talking to each other without our translation, and be 446 start talking to each other without our translation, and be
447 confused... --RR */ 447 confused... --RR */
448 if (inside->icmp.type == ICMP_REDIRECT) { 448 if (inside->icmp.type == ICMP_REDIRECT) {
449 /* If NAT isn't finished, assume it and drop. */ 449 /* If NAT isn't finished, assume it and drop. */
450 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) 450 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
@@ -458,8 +458,8 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct,
458 *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); 458 *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
459 459
460 if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 + 460 if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 +
461 sizeof(struct icmphdr) + inside->ip.ihl*4, 461 sizeof(struct icmphdr) + inside->ip.ihl*4,
462 &inner, 462 &inner,
463 __ip_conntrack_proto_find(inside->ip.protocol))) 463 __ip_conntrack_proto_find(inside->ip.protocol)))
464 return 0; 464 return 0;
465 465
@@ -537,7 +537,7 @@ EXPORT_SYMBOL(ip_nat_protocol_unregister);
537#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ 537#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
538 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) 538 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
539int 539int
540ip_nat_port_range_to_nfattr(struct sk_buff *skb, 540ip_nat_port_range_to_nfattr(struct sk_buff *skb,
541 const struct ip_nat_range *range) 541 const struct ip_nat_range *range)
542{ 542{
543 NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), 543 NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16),
@@ -555,21 +555,21 @@ int
555ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range) 555ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range)
556{ 556{
557 int ret = 0; 557 int ret = 0;
558 558
559 /* we have to return whether we actually parsed something or not */ 559 /* we have to return whether we actually parsed something or not */
560 560
561 if (tb[CTA_PROTONAT_PORT_MIN-1]) { 561 if (tb[CTA_PROTONAT_PORT_MIN-1]) {
562 ret = 1; 562 ret = 1;
563 range->min.tcp.port = 563 range->min.tcp.port =
564 *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]); 564 *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
565 } 565 }
566 566
567 if (!tb[CTA_PROTONAT_PORT_MAX-1]) { 567 if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
568 if (ret) 568 if (ret)
569 range->max.tcp.port = range->min.tcp.port; 569 range->max.tcp.port = range->min.tcp.port;
570 } else { 570 } else {
571 ret = 1; 571 ret = 1;
572 range->max.tcp.port = 572 range->max.tcp.port =
573 *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]); 573 *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
574 } 574 }
575 575
diff --git a/net/ipv4/netfilter/ip_nat_ftp.c b/net/ipv4/netfilter/ip_nat_ftp.c
index 913960e1380f..32e01d8dffcb 100644
--- a/net/ipv4/netfilter/ip_nat_ftp.c
+++ b/net/ipv4/netfilter/ip_nat_ftp.c
@@ -50,7 +50,7 @@ mangle_rfc959_packet(struct sk_buff **pskb,
50 DEBUGP("calling ip_nat_mangle_tcp_packet\n"); 50 DEBUGP("calling ip_nat_mangle_tcp_packet\n");
51 51
52 *seq += strlen(buffer) - matchlen; 52 *seq += strlen(buffer) - matchlen;
53 return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, 53 return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
54 matchlen, buffer, strlen(buffer)); 54 matchlen, buffer, strlen(buffer));
55} 55}
56 56
@@ -72,7 +72,7 @@ mangle_eprt_packet(struct sk_buff **pskb,
72 DEBUGP("calling ip_nat_mangle_tcp_packet\n"); 72 DEBUGP("calling ip_nat_mangle_tcp_packet\n");
73 73
74 *seq += strlen(buffer) - matchlen; 74 *seq += strlen(buffer) - matchlen;
75 return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, 75 return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
76 matchlen, buffer, strlen(buffer)); 76 matchlen, buffer, strlen(buffer));
77} 77}
78 78
@@ -94,7 +94,7 @@ mangle_epsv_packet(struct sk_buff **pskb,
94 DEBUGP("calling ip_nat_mangle_tcp_packet\n"); 94 DEBUGP("calling ip_nat_mangle_tcp_packet\n");
95 95
96 *seq += strlen(buffer) - matchlen; 96 *seq += strlen(buffer) - matchlen;
97 return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, 97 return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
98 matchlen, buffer, strlen(buffer)); 98 matchlen, buffer, strlen(buffer));
99} 99}
100 100
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index 2e5c4bc52a60..dc778cfef58b 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -1,4 +1,4 @@
1/* ip_nat_helper.c - generic support functions for NAT helpers 1/* ip_nat_helper.c - generic support functions for NAT helpers
2 * 2 *
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org> 3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2004 Netfilter Core Team <coreteam@netfilter.org> 4 * (C) 2003-2004 Netfilter Core Team <coreteam@netfilter.org>
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * 14 Jan 2002 Harald Welte <laforge@gnumonks.org>: 10 * 14 Jan 2002 Harald Welte <laforge@gnumonks.org>:
11 * - add support for SACK adjustment 11 * - add support for SACK adjustment
12 * 14 Mar 2002 Harald Welte <laforge@gnumonks.org>: 12 * 14 Mar 2002 Harald Welte <laforge@gnumonks.org>:
13 * - merge SACK support into newnat API 13 * - merge SACK support into newnat API
14 * 16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>: 14 * 16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>:
@@ -45,10 +45,10 @@
45static DEFINE_SPINLOCK(ip_nat_seqofs_lock); 45static DEFINE_SPINLOCK(ip_nat_seqofs_lock);
46 46
47/* Setup TCP sequence correction given this change at this sequence */ 47/* Setup TCP sequence correction given this change at this sequence */
48static inline void 48static inline void
49adjust_tcp_sequence(u32 seq, 49adjust_tcp_sequence(u32 seq,
50 int sizediff, 50 int sizediff,
51 struct ip_conntrack *ct, 51 struct ip_conntrack *ct,
52 enum ip_conntrack_info ctinfo) 52 enum ip_conntrack_info ctinfo)
53{ 53{
54 int dir; 54 int dir;
@@ -150,7 +150,7 @@ static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
150 * skb enlargement, ... 150 * skb enlargement, ...
151 * 151 *
152 * */ 152 * */
153int 153int
154ip_nat_mangle_tcp_packet(struct sk_buff **pskb, 154ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
155 struct ip_conntrack *ct, 155 struct ip_conntrack *ct,
156 enum ip_conntrack_info ctinfo, 156 enum ip_conntrack_info ctinfo,
@@ -186,7 +186,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
186 tcph->check = tcp_v4_check(datalen, 186 tcph->check = tcp_v4_check(datalen,
187 iph->saddr, iph->daddr, 187 iph->saddr, iph->daddr,
188 csum_partial((char *)tcph, 188 csum_partial((char *)tcph,
189 datalen, 0)); 189 datalen, 0));
190 } else 190 } else
191 nf_proto_csum_replace2(&tcph->check, *pskb, 191 nf_proto_csum_replace2(&tcph->check, *pskb,
192 htons(oldlen), htons(datalen), 1); 192 htons(oldlen), htons(datalen), 1);
@@ -202,7 +202,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
202 return 1; 202 return 1;
203} 203}
204EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); 204EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
205 205
206/* Generic function for mangling variable-length address changes inside 206/* Generic function for mangling variable-length address changes inside
207 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX 207 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
208 * command in the Amanda protocol) 208 * command in the Amanda protocol)
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
213 * XXX - This function could be merged with ip_nat_mangle_tcp_packet which 213 * XXX - This function could be merged with ip_nat_mangle_tcp_packet which
214 * should be fairly easy to do. 214 * should be fairly easy to do.
215 */ 215 */
216int 216int
217ip_nat_mangle_udp_packet(struct sk_buff **pskb, 217ip_nat_mangle_udp_packet(struct sk_buff **pskb,
218 struct ip_conntrack *ct, 218 struct ip_conntrack *ct,
219 enum ip_conntrack_info ctinfo, 219 enum ip_conntrack_info ctinfo,
@@ -228,8 +228,8 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
228 228
229 /* UDP helpers might accidentally mangle the wrong packet */ 229 /* UDP helpers might accidentally mangle the wrong packet */
230 iph = (*pskb)->nh.iph; 230 iph = (*pskb)->nh.iph;
231 if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + 231 if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
232 match_offset + match_len) 232 match_offset + match_len)
233 return 0; 233 return 0;
234 234
235 if (!skb_make_writable(pskb, (*pskb)->len)) 235 if (!skb_make_writable(pskb, (*pskb)->len))
@@ -258,9 +258,9 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
258 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { 258 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
259 udph->check = 0; 259 udph->check = 0;
260 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 260 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
261 datalen, IPPROTO_UDP, 261 datalen, IPPROTO_UDP,
262 csum_partial((char *)udph, 262 csum_partial((char *)udph,
263 datalen, 0)); 263 datalen, 0));
264 if (!udph->check) 264 if (!udph->check)
265 udph->check = CSUM_MANGLED_0; 265 udph->check = CSUM_MANGLED_0;
266 } else 266 } else
@@ -273,7 +273,7 @@ EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
273/* Adjust one found SACK option including checksum correction */ 273/* Adjust one found SACK option including checksum correction */
274static void 274static void
275sack_adjust(struct sk_buff *skb, 275sack_adjust(struct sk_buff *skb,
276 struct tcphdr *tcph, 276 struct tcphdr *tcph,
277 unsigned int sackoff, 277 unsigned int sackoff,
278 unsigned int sackend, 278 unsigned int sackend,
279 struct ip_nat_seq *natseq) 279 struct ip_nat_seq *natseq)
@@ -360,14 +360,14 @@ ip_nat_sack_adjust(struct sk_buff **pskb,
360 360
361/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ 361/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
362int 362int
363ip_nat_seq_adjust(struct sk_buff **pskb, 363ip_nat_seq_adjust(struct sk_buff **pskb,
364 struct ip_conntrack *ct, 364 struct ip_conntrack *ct,
365 enum ip_conntrack_info ctinfo) 365 enum ip_conntrack_info ctinfo)
366{ 366{
367 struct tcphdr *tcph; 367 struct tcphdr *tcph;
368 int dir; 368 int dir;
369 __be32 newseq, newack; 369 __be32 newseq, newack;
370 struct ip_nat_seq *this_way, *other_way; 370 struct ip_nat_seq *this_way, *other_way;
371 371
372 dir = CTINFO2DIR(ctinfo); 372 dir = CTINFO2DIR(ctinfo);
373 373
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c
index ec957bbb5366..24ce4a5023d7 100644
--- a/net/ipv4/netfilter/ip_nat_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c
@@ -202,10 +202,10 @@ pptp_outbound_pkt(struct sk_buff **pskb,
202 202
203 /* mangle packet */ 203 /* mangle packet */
204 if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, 204 if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
205 cid_off + sizeof(struct pptp_pkt_hdr) + 205 cid_off + sizeof(struct pptp_pkt_hdr) +
206 sizeof(struct PptpControlHeader), 206 sizeof(struct PptpControlHeader),
207 sizeof(new_callid), (char *)&new_callid, 207 sizeof(new_callid), (char *)&new_callid,
208 sizeof(new_callid)) == 0) 208 sizeof(new_callid)) == 0)
209 return NF_DROP; 209 return NF_DROP;
210 210
211 return NF_ACCEPT; 211 return NF_ACCEPT;
@@ -293,7 +293,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
293 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); 293 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
294 294
295 if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, 295 if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
296 pcid_off + sizeof(struct pptp_pkt_hdr) + 296 pcid_off + sizeof(struct pptp_pkt_hdr) +
297 sizeof(struct PptpControlHeader), 297 sizeof(struct PptpControlHeader),
298 sizeof(new_pcid), (char *)&new_pcid, 298 sizeof(new_pcid), (char *)&new_pcid,
299 sizeof(new_pcid)) == 0) 299 sizeof(new_pcid)) == 0)
diff --git a/net/ipv4/netfilter/ip_nat_irc.c b/net/ipv4/netfilter/ip_nat_irc.c
index feb26b48f1d5..cfaeea38314f 100644
--- a/net/ipv4/netfilter/ip_nat_irc.c
+++ b/net/ipv4/netfilter/ip_nat_irc.c
@@ -88,8 +88,8 @@ static unsigned int help(struct sk_buff **pskb,
88 DEBUGP("ip_nat_irc: Inserting '%s' == %u.%u.%u.%u, port %u\n", 88 DEBUGP("ip_nat_irc: Inserting '%s' == %u.%u.%u.%u, port %u\n",
89 buffer, NIPQUAD(exp->tuple.src.ip), port); 89 buffer, NIPQUAD(exp->tuple.src.ip), port);
90 90
91 ret = ip_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, 91 ret = ip_nat_mangle_tcp_packet(pskb, exp->master, ctinfo,
92 matchoff, matchlen, buffer, 92 matchoff, matchlen, buffer,
93 strlen(buffer)); 93 strlen(buffer));
94 if (ret != NF_ACCEPT) 94 if (ret != NF_ACCEPT)
95 ip_conntrack_unexpect_related(exp); 95 ip_conntrack_unexpect_related(exp);
diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c
index fb716edd5bc6..22a528ae0380 100644
--- a/net/ipv4/netfilter/ip_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_icmp.c
@@ -45,7 +45,7 @@ icmp_unique_tuple(struct ip_conntrack_tuple *tuple,
45 45
46 for (i = 0; i < range_size; i++, id++) { 46 for (i = 0; i < range_size; i++, id++) {
47 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + 47 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
48 (id % range_size)); 48 (id % range_size));
49 if (!ip_nat_used_tuple(tuple, conntrack)) 49 if (!ip_nat_used_tuple(tuple, conntrack))
50 return 1; 50 return 1;
51 } 51 }
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
index e1c8a05f3dc6..080eb1d92200 100644
--- a/net/ipv4/netfilter/ip_nat_rule.c
+++ b/net/ipv4/netfilter/ip_nat_rule.c
@@ -112,7 +112,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb,
112 112
113 /* Connection must be valid and new. */ 113 /* Connection must be valid and new. */
114 IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED 114 IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
115 || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 115 || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
116 IP_NF_ASSERT(out); 116 IP_NF_ASSERT(out);
117 117
118 return ip_nat_setup_info(ct, &mr->range[0], hooknum); 118 return ip_nat_setup_info(ct, &mr->range[0], hooknum);
@@ -223,8 +223,8 @@ alloc_null_binding(struct ip_conntrack *conntrack,
223 223
224unsigned int 224unsigned int
225alloc_null_binding_confirmed(struct ip_conntrack *conntrack, 225alloc_null_binding_confirmed(struct ip_conntrack *conntrack,
226 struct ip_nat_info *info, 226 struct ip_nat_info *info,
227 unsigned int hooknum) 227 unsigned int hooknum)
228{ 228{
229 __be32 ip 229 __be32 ip
230 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC 230 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
diff --git a/net/ipv4/netfilter/ip_nat_sip.c b/net/ipv4/netfilter/ip_nat_sip.c
index 6223abc924ff..325c5a9dc2ef 100644
--- a/net/ipv4/netfilter/ip_nat_sip.c
+++ b/net/ipv4/netfilter/ip_nat_sip.c
@@ -88,7 +88,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo,
88 return 1; 88 return 1;
89 89
90 if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, 90 if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
91 matchoff, matchlen, addr, addrlen)) 91 matchoff, matchlen, addr, addrlen))
92 return 0; 92 return 0;
93 *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); 93 *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
94 return 1; 94 return 1;
@@ -149,7 +149,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb,
149 return 0; 149 return 0;
150 150
151 if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, 151 if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
152 matchoff, matchlen, buffer, bufflen)) 152 matchoff, matchlen, buffer, bufflen))
153 return 0; 153 return 0;
154 154
155 /* We need to reload this. Thanks Patrick. */ 155 /* We need to reload this. Thanks Patrick. */
@@ -170,7 +170,7 @@ static int mangle_content_len(struct sk_buff **pskb,
170 170
171 /* Get actual SDP lenght */ 171 /* Get actual SDP lenght */
172 if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, 172 if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff,
173 &matchlen, POS_SDP_HEADER) > 0) { 173 &matchlen, POS_SDP_HEADER) > 0) {
174 174
175 /* since ct_sip_get_info() give us a pointer passing 'v=' 175 /* since ct_sip_get_info() give us a pointer passing 'v='
176 we need to add 2 bytes in this count. */ 176 we need to add 2 bytes in this count. */
@@ -178,7 +178,7 @@ static int mangle_content_len(struct sk_buff **pskb,
178 178
179 /* Now, update SDP lenght */ 179 /* Now, update SDP lenght */
180 if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, 180 if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff,
181 &matchlen, POS_CONTENT) > 0) { 181 &matchlen, POS_CONTENT) > 0) {
182 182
183 bufflen = sprintf(buffer, "%u", c_len); 183 bufflen = sprintf(buffer, "%u", c_len);
184 184
@@ -204,17 +204,17 @@ static unsigned int mangle_sdp(struct sk_buff **pskb,
204 /* Mangle owner and contact info. */ 204 /* Mangle owner and contact info. */
205 bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); 205 bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip));
206 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, 206 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
207 buffer, bufflen, POS_OWNER)) 207 buffer, bufflen, POS_OWNER))
208 return 0; 208 return 0;
209 209
210 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, 210 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
211 buffer, bufflen, POS_CONNECTION)) 211 buffer, bufflen, POS_CONNECTION))
212 return 0; 212 return 0;
213 213
214 /* Mangle media port. */ 214 /* Mangle media port. */
215 bufflen = sprintf(buffer, "%u", port); 215 bufflen = sprintf(buffer, "%u", port);
216 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, 216 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
217 buffer, bufflen, POS_MEDIA)) 217 buffer, bufflen, POS_MEDIA))
218 return 0; 218 return 0;
219 219
220 return mangle_content_len(pskb, ctinfo, ct, dptr); 220 return mangle_content_len(pskb, ctinfo, ct, dptr);
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index c3d9f3b090c4..e41d0efae515 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -3,11 +3,11 @@
3 * 3 *
4 * Basic SNMP Application Layer Gateway 4 * Basic SNMP Application Layer Gateway
5 * 5 *
6 * This IP NAT module is intended for use with SNMP network 6 * This IP NAT module is intended for use with SNMP network
7 * discovery and monitoring applications where target networks use 7 * discovery and monitoring applications where target networks use
8 * conflicting private address realms. 8 * conflicting private address realms.
9 * 9 *
10 * Static NAT is used to remap the networks from the view of the network 10 * Static NAT is used to remap the networks from the view of the network
11 * management system at the IP layer, and this module remaps some application 11 * management system at the IP layer, and this module remaps some application
12 * layer addresses to match. 12 * layer addresses to match.
13 * 13 *
@@ -20,7 +20,7 @@
20 * More information on ALG and associated issues can be found in 20 * More information on ALG and associated issues can be found in
21 * RFC 2962 21 * RFC 2962
22 * 22 *
23 * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory 23 * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
24 * McLean & Jochen Friedrich, stripped down for use in the kernel. 24 * McLean & Jochen Friedrich, stripped down for use in the kernel.
25 * 25 *
26 * Copyright (c) 2000 RP Internet (www.rpi.net.au). 26 * Copyright (c) 2000 RP Internet (www.rpi.net.au).
@@ -69,8 +69,8 @@ MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
69static int debug; 69static int debug;
70static DEFINE_SPINLOCK(snmp_lock); 70static DEFINE_SPINLOCK(snmp_lock);
71 71
72/* 72/*
73 * Application layer address mapping mimics the NAT mapping, but 73 * Application layer address mapping mimics the NAT mapping, but
74 * only for the first octet in this case (a more flexible system 74 * only for the first octet in this case (a more flexible system
75 * can be implemented if needed). 75 * can be implemented if needed).
76 */ 76 */
@@ -80,7 +80,7 @@ struct oct1_map
80 u_int8_t to; 80 u_int8_t to;
81}; 81};
82 82
83 83
84/***************************************************************************** 84/*****************************************************************************
85 * 85 *
86 * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse) 86 * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse)
@@ -129,7 +129,7 @@ struct oct1_map
129#define ASN1_ERR_DEC_LENGTH_MISMATCH 4 129#define ASN1_ERR_DEC_LENGTH_MISMATCH 4
130#define ASN1_ERR_DEC_BADVALUE 5 130#define ASN1_ERR_DEC_BADVALUE 5
131 131
132/* 132/*
133 * ASN.1 context. 133 * ASN.1 context.
134 */ 134 */
135struct asn1_ctx 135struct asn1_ctx
@@ -148,10 +148,10 @@ struct asn1_octstr
148 unsigned char *data; 148 unsigned char *data;
149 unsigned int len; 149 unsigned int len;
150}; 150};
151 151
152static void asn1_open(struct asn1_ctx *ctx, 152static void asn1_open(struct asn1_ctx *ctx,
153 unsigned char *buf, 153 unsigned char *buf,
154 unsigned int len) 154 unsigned int len)
155{ 155{
156 ctx->begin = buf; 156 ctx->begin = buf;
157 ctx->end = buf + len; 157 ctx->end = buf + len;
@@ -172,9 +172,9 @@ static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
172static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) 172static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
173{ 173{
174 unsigned char ch; 174 unsigned char ch;
175 175
176 *tag = 0; 176 *tag = 0;
177 177
178 do 178 do
179 { 179 {
180 if (!asn1_octet_decode(ctx, &ch)) 180 if (!asn1_octet_decode(ctx, &ch))
@@ -185,20 +185,20 @@ static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
185 return 1; 185 return 1;
186} 186}
187 187
188static unsigned char asn1_id_decode(struct asn1_ctx *ctx, 188static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
189 unsigned int *cls, 189 unsigned int *cls,
190 unsigned int *con, 190 unsigned int *con,
191 unsigned int *tag) 191 unsigned int *tag)
192{ 192{
193 unsigned char ch; 193 unsigned char ch;
194 194
195 if (!asn1_octet_decode(ctx, &ch)) 195 if (!asn1_octet_decode(ctx, &ch))
196 return 0; 196 return 0;
197 197
198 *cls = (ch & 0xC0) >> 6; 198 *cls = (ch & 0xC0) >> 6;
199 *con = (ch & 0x20) >> 5; 199 *con = (ch & 0x20) >> 5;
200 *tag = (ch & 0x1F); 200 *tag = (ch & 0x1F);
201 201
202 if (*tag == 0x1F) { 202 if (*tag == 0x1F) {
203 if (!asn1_tag_decode(ctx, tag)) 203 if (!asn1_tag_decode(ctx, tag))
204 return 0; 204 return 0;
@@ -207,25 +207,25 @@ static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
207} 207}
208 208
209static unsigned char asn1_length_decode(struct asn1_ctx *ctx, 209static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
210 unsigned int *def, 210 unsigned int *def,
211 unsigned int *len) 211 unsigned int *len)
212{ 212{
213 unsigned char ch, cnt; 213 unsigned char ch, cnt;
214 214
215 if (!asn1_octet_decode(ctx, &ch)) 215 if (!asn1_octet_decode(ctx, &ch))
216 return 0; 216 return 0;
217 217
218 if (ch == 0x80) 218 if (ch == 0x80)
219 *def = 0; 219 *def = 0;
220 else { 220 else {
221 *def = 1; 221 *def = 1;
222 222
223 if (ch < 0x80) 223 if (ch < 0x80)
224 *len = ch; 224 *len = ch;
225 else { 225 else {
226 cnt = (unsigned char) (ch & 0x7F); 226 cnt = (unsigned char) (ch & 0x7F);
227 *len = 0; 227 *len = 0;
228 228
229 while (cnt > 0) { 229 while (cnt > 0) {
230 if (!asn1_octet_decode(ctx, &ch)) 230 if (!asn1_octet_decode(ctx, &ch))
231 return 0; 231 return 0;
@@ -239,20 +239,20 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
239} 239}
240 240
241static unsigned char asn1_header_decode(struct asn1_ctx *ctx, 241static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
242 unsigned char **eoc, 242 unsigned char **eoc,
243 unsigned int *cls, 243 unsigned int *cls,
244 unsigned int *con, 244 unsigned int *con,
245 unsigned int *tag) 245 unsigned int *tag)
246{ 246{
247 unsigned int def, len; 247 unsigned int def, len;
248 248
249 if (!asn1_id_decode(ctx, cls, con, tag)) 249 if (!asn1_id_decode(ctx, cls, con, tag))
250 return 0; 250 return 0;
251 251
252 def = len = 0; 252 def = len = 0;
253 if (!asn1_length_decode(ctx, &def, &len)) 253 if (!asn1_length_decode(ctx, &def, &len))
254 return 0; 254 return 0;
255 255
256 if (def) 256 if (def)
257 *eoc = ctx->pointer + len; 257 *eoc = ctx->pointer + len;
258 else 258 else
@@ -263,19 +263,19 @@ static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
263static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) 263static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
264{ 264{
265 unsigned char ch; 265 unsigned char ch;
266 266
267 if (eoc == 0) { 267 if (eoc == 0) {
268 if (!asn1_octet_decode(ctx, &ch)) 268 if (!asn1_octet_decode(ctx, &ch))
269 return 0; 269 return 0;
270 270
271 if (ch != 0x00) { 271 if (ch != 0x00) {
272 ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; 272 ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
273 return 0; 273 return 0;
274 } 274 }
275 275
276 if (!asn1_octet_decode(ctx, &ch)) 276 if (!asn1_octet_decode(ctx, &ch))
277 return 0; 277 return 0;
278 278
279 if (ch != 0x00) { 279 if (ch != 0x00) {
280 ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; 280 ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
281 return 0; 281 return 0;
@@ -297,27 +297,27 @@ static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc)
297} 297}
298 298
299static unsigned char asn1_long_decode(struct asn1_ctx *ctx, 299static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
300 unsigned char *eoc, 300 unsigned char *eoc,
301 long *integer) 301 long *integer)
302{ 302{
303 unsigned char ch; 303 unsigned char ch;
304 unsigned int len; 304 unsigned int len;
305 305
306 if (!asn1_octet_decode(ctx, &ch)) 306 if (!asn1_octet_decode(ctx, &ch))
307 return 0; 307 return 0;
308 308
309 *integer = (signed char) ch; 309 *integer = (signed char) ch;
310 len = 1; 310 len = 1;
311 311
312 while (ctx->pointer < eoc) { 312 while (ctx->pointer < eoc) {
313 if (++len > sizeof (long)) { 313 if (++len > sizeof (long)) {
314 ctx->error = ASN1_ERR_DEC_BADVALUE; 314 ctx->error = ASN1_ERR_DEC_BADVALUE;
315 return 0; 315 return 0;
316 } 316 }
317 317
318 if (!asn1_octet_decode(ctx, &ch)) 318 if (!asn1_octet_decode(ctx, &ch))
319 return 0; 319 return 0;
320 320
321 *integer <<= 8; 321 *integer <<= 8;
322 *integer |= ch; 322 *integer |= ch;
323 } 323 }
@@ -325,28 +325,28 @@ static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
325} 325}
326 326
327static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, 327static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
328 unsigned char *eoc, 328 unsigned char *eoc,
329 unsigned int *integer) 329 unsigned int *integer)
330{ 330{
331 unsigned char ch; 331 unsigned char ch;
332 unsigned int len; 332 unsigned int len;
333 333
334 if (!asn1_octet_decode(ctx, &ch)) 334 if (!asn1_octet_decode(ctx, &ch))
335 return 0; 335 return 0;
336 336
337 *integer = ch; 337 *integer = ch;
338 if (ch == 0) len = 0; 338 if (ch == 0) len = 0;
339 else len = 1; 339 else len = 1;
340 340
341 while (ctx->pointer < eoc) { 341 while (ctx->pointer < eoc) {
342 if (++len > sizeof (unsigned int)) { 342 if (++len > sizeof (unsigned int)) {
343 ctx->error = ASN1_ERR_DEC_BADVALUE; 343 ctx->error = ASN1_ERR_DEC_BADVALUE;
344 return 0; 344 return 0;
345 } 345 }
346 346
347 if (!asn1_octet_decode(ctx, &ch)) 347 if (!asn1_octet_decode(ctx, &ch))
348 return 0; 348 return 0;
349 349
350 *integer <<= 8; 350 *integer <<= 8;
351 *integer |= ch; 351 *integer |= ch;
352 } 352 }
@@ -354,28 +354,28 @@ static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
354} 354}
355 355
356static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, 356static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
357 unsigned char *eoc, 357 unsigned char *eoc,
358 unsigned long *integer) 358 unsigned long *integer)
359{ 359{
360 unsigned char ch; 360 unsigned char ch;
361 unsigned int len; 361 unsigned int len;
362 362
363 if (!asn1_octet_decode(ctx, &ch)) 363 if (!asn1_octet_decode(ctx, &ch))
364 return 0; 364 return 0;
365 365
366 *integer = ch; 366 *integer = ch;
367 if (ch == 0) len = 0; 367 if (ch == 0) len = 0;
368 else len = 1; 368 else len = 1;
369 369
370 while (ctx->pointer < eoc) { 370 while (ctx->pointer < eoc) {
371 if (++len > sizeof (unsigned long)) { 371 if (++len > sizeof (unsigned long)) {
372 ctx->error = ASN1_ERR_DEC_BADVALUE; 372 ctx->error = ASN1_ERR_DEC_BADVALUE;
373 return 0; 373 return 0;
374 } 374 }
375 375
376 if (!asn1_octet_decode(ctx, &ch)) 376 if (!asn1_octet_decode(ctx, &ch))
377 return 0; 377 return 0;
378 378
379 *integer <<= 8; 379 *integer <<= 8;
380 *integer |= ch; 380 *integer |= ch;
381 } 381 }
@@ -383,21 +383,21 @@ static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
383} 383}
384 384
385static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, 385static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
386 unsigned char *eoc, 386 unsigned char *eoc,
387 unsigned char **octets, 387 unsigned char **octets,
388 unsigned int *len) 388 unsigned int *len)
389{ 389{
390 unsigned char *ptr; 390 unsigned char *ptr;
391 391
392 *len = 0; 392 *len = 0;
393 393
394 *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); 394 *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
395 if (*octets == NULL) { 395 if (*octets == NULL) {
396 if (net_ratelimit()) 396 if (net_ratelimit())
397 printk("OOM in bsalg (%d)\n", __LINE__); 397 printk("OOM in bsalg (%d)\n", __LINE__);
398 return 0; 398 return 0;
399 } 399 }
400 400
401 ptr = *octets; 401 ptr = *octets;
402 while (ctx->pointer < eoc) { 402 while (ctx->pointer < eoc) {
403 if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { 403 if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) {
@@ -411,16 +411,16 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
411} 411}
412 412
413static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, 413static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
414 unsigned long *subid) 414 unsigned long *subid)
415{ 415{
416 unsigned char ch; 416 unsigned char ch;
417 417
418 *subid = 0; 418 *subid = 0;
419 419
420 do { 420 do {
421 if (!asn1_octet_decode(ctx, &ch)) 421 if (!asn1_octet_decode(ctx, &ch))
422 return 0; 422 return 0;
423 423
424 *subid <<= 7; 424 *subid <<= 7;
425 *subid |= ch & 0x7F; 425 *subid |= ch & 0x7F;
426 } while ((ch & 0x80) == 0x80); 426 } while ((ch & 0x80) == 0x80);
@@ -428,14 +428,14 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
428} 428}
429 429
430static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, 430static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
431 unsigned char *eoc, 431 unsigned char *eoc,
432 unsigned long **oid, 432 unsigned long **oid,
433 unsigned int *len) 433 unsigned int *len)
434{ 434{
435 unsigned long subid; 435 unsigned long subid;
436 unsigned int size; 436 unsigned int size;
437 unsigned long *optr; 437 unsigned long *optr;
438 438
439 size = eoc - ctx->pointer + 1; 439 size = eoc - ctx->pointer + 1;
440 *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); 440 *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
441 if (*oid == NULL) { 441 if (*oid == NULL) {
@@ -443,15 +443,15 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
443 printk("OOM in bsalg (%d)\n", __LINE__); 443 printk("OOM in bsalg (%d)\n", __LINE__);
444 return 0; 444 return 0;
445 } 445 }
446 446
447 optr = *oid; 447 optr = *oid;
448 448
449 if (!asn1_subid_decode(ctx, &subid)) { 449 if (!asn1_subid_decode(ctx, &subid)) {
450 kfree(*oid); 450 kfree(*oid);
451 *oid = NULL; 451 *oid = NULL;
452 return 0; 452 return 0;
453 } 453 }
454 454
455 if (subid < 40) { 455 if (subid < 40) {
456 optr [0] = 0; 456 optr [0] = 0;
457 optr [1] = subid; 457 optr [1] = subid;
@@ -462,10 +462,10 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
462 optr [0] = 2; 462 optr [0] = 2;
463 optr [1] = subid - 80; 463 optr [1] = subid - 80;
464 } 464 }
465 465
466 *len = 2; 466 *len = 2;
467 optr += 2; 467 optr += 2;
468 468
469 while (ctx->pointer < eoc) { 469 while (ctx->pointer < eoc) {
470 if (++(*len) > size) { 470 if (++(*len) > size) {
471 ctx->error = ASN1_ERR_DEC_BADVALUE; 471 ctx->error = ASN1_ERR_DEC_BADVALUE;
@@ -473,7 +473,7 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
473 *oid = NULL; 473 *oid = NULL;
474 return 0; 474 return 0;
475 } 475 }
476 476
477 if (!asn1_subid_decode(ctx, optr++)) { 477 if (!asn1_subid_decode(ctx, optr++)) {
478 kfree(*oid); 478 kfree(*oid);
479 *oid = NULL; 479 *oid = NULL;
@@ -611,9 +611,9 @@ struct snmp_v1_trap
611#define SERR_EOM 2 611#define SERR_EOM 2
612 612
613static inline void mangle_address(unsigned char *begin, 613static inline void mangle_address(unsigned char *begin,
614 unsigned char *addr, 614 unsigned char *addr,
615 const struct oct1_map *map, 615 const struct oct1_map *map,
616 __sum16 *check); 616 __sum16 *check);
617struct snmp_cnv 617struct snmp_cnv
618{ 618{
619 unsigned int class; 619 unsigned int class;
@@ -633,7 +633,7 @@ static struct snmp_cnv snmp_conv [] =
633 {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */ 633 {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */
634 {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS}, 634 {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS},
635 {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE}, 635 {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE},
636 636
637 /* SNMPv2 data types and errors */ 637 /* SNMPv2 data types and errors */
638 {ASN1_UNI, ASN1_BTS, SNMP_BITSTR}, 638 {ASN1_UNI, ASN1_BTS, SNMP_BITSTR},
639 {ASN1_APL, SNMP_C64, SNMP_COUNTER64}, 639 {ASN1_APL, SNMP_C64, SNMP_COUNTER64},
@@ -644,13 +644,13 @@ static struct snmp_cnv snmp_conv [] =
644}; 644};
645 645
646static unsigned char snmp_tag_cls2syntax(unsigned int tag, 646static unsigned char snmp_tag_cls2syntax(unsigned int tag,
647 unsigned int cls, 647 unsigned int cls,
648 unsigned short *syntax) 648 unsigned short *syntax)
649{ 649{
650 struct snmp_cnv *cnv; 650 struct snmp_cnv *cnv;
651 651
652 cnv = snmp_conv; 652 cnv = snmp_conv;
653 653
654 while (cnv->syntax != -1) { 654 while (cnv->syntax != -1) {
655 if (cnv->tag == tag && cnv->class == cls) { 655 if (cnv->tag == tag && cnv->class == cls) {
656 *syntax = cnv->syntax; 656 *syntax = cnv->syntax;
@@ -662,7 +662,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag,
662} 662}
663 663
664static unsigned char snmp_object_decode(struct asn1_ctx *ctx, 664static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
665 struct snmp_object **obj) 665 struct snmp_object **obj)
666{ 666{
667 unsigned int cls, con, tag, len, idlen; 667 unsigned int cls, con, tag, len, idlen;
668 unsigned short type; 668 unsigned short type;
@@ -670,41 +670,41 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
670 unsigned long *lp, *id; 670 unsigned long *lp, *id;
671 unsigned long ul; 671 unsigned long ul;
672 long l; 672 long l;
673 673
674 *obj = NULL; 674 *obj = NULL;
675 id = NULL; 675 id = NULL;
676 676
677 if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag)) 677 if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag))
678 return 0; 678 return 0;
679 679
680 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) 680 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
681 return 0; 681 return 0;
682 682
683 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 683 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
684 return 0; 684 return 0;
685 685
686 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) 686 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
687 return 0; 687 return 0;
688 688
689 if (!asn1_oid_decode(ctx, end, &id, &idlen)) 689 if (!asn1_oid_decode(ctx, end, &id, &idlen))
690 return 0; 690 return 0;
691 691
692 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) { 692 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) {
693 kfree(id); 693 kfree(id);
694 return 0; 694 return 0;
695 } 695 }
696 696
697 if (con != ASN1_PRI) { 697 if (con != ASN1_PRI) {
698 kfree(id); 698 kfree(id);
699 return 0; 699 return 0;
700 } 700 }
701 701
702 type = 0; 702 type = 0;
703 if (!snmp_tag_cls2syntax(tag, cls, &type)) { 703 if (!snmp_tag_cls2syntax(tag, cls, &type)) {
704 kfree(id); 704 kfree(id);
705 return 0; 705 return 0;
706 } 706 }
707 707
708 l = 0; 708 l = 0;
709 switch (type) { 709 switch (type) {
710 case SNMP_INTEGER: 710 case SNMP_INTEGER:
@@ -714,7 +714,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
714 return 0; 714 return 0;
715 } 715 }
716 *obj = kmalloc(sizeof(struct snmp_object) + len, 716 *obj = kmalloc(sizeof(struct snmp_object) + len,
717 GFP_ATOMIC); 717 GFP_ATOMIC);
718 if (*obj == NULL) { 718 if (*obj == NULL) {
719 kfree(id); 719 kfree(id);
720 if (net_ratelimit()) 720 if (net_ratelimit())
@@ -730,7 +730,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
730 return 0; 730 return 0;
731 } 731 }
732 *obj = kmalloc(sizeof(struct snmp_object) + len, 732 *obj = kmalloc(sizeof(struct snmp_object) + len,
733 GFP_ATOMIC); 733 GFP_ATOMIC);
734 if (*obj == NULL) { 734 if (*obj == NULL) {
735 kfree(id); 735 kfree(id);
736 if (net_ratelimit()) 736 if (net_ratelimit())
@@ -818,12 +818,12 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
818 kfree(id); 818 kfree(id);
819 return 0; 819 return 0;
820 } 820 }
821 821
822 (*obj)->syntax_len = len; 822 (*obj)->syntax_len = len;
823 (*obj)->type = type; 823 (*obj)->type = type;
824 (*obj)->id = id; 824 (*obj)->id = id;
825 (*obj)->id_len = idlen; 825 (*obj)->id_len = idlen;
826 826
827 if (!asn1_eoc_decode(ctx, eoc)) { 827 if (!asn1_eoc_decode(ctx, eoc)) {
828 kfree(id); 828 kfree(id);
829 kfree(*obj); 829 kfree(*obj);
@@ -834,49 +834,49 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
834} 834}
835 835
836static unsigned char snmp_request_decode(struct asn1_ctx *ctx, 836static unsigned char snmp_request_decode(struct asn1_ctx *ctx,
837 struct snmp_request *request) 837 struct snmp_request *request)
838{ 838{
839 unsigned int cls, con, tag; 839 unsigned int cls, con, tag;
840 unsigned char *end; 840 unsigned char *end;
841 841
842 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 842 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
843 return 0; 843 return 0;
844 844
845 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) 845 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
846 return 0; 846 return 0;
847 847
848 if (!asn1_ulong_decode(ctx, end, &request->id)) 848 if (!asn1_ulong_decode(ctx, end, &request->id))
849 return 0; 849 return 0;
850 850
851 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 851 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
852 return 0; 852 return 0;
853 853
854 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) 854 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
855 return 0; 855 return 0;
856 856
857 if (!asn1_uint_decode(ctx, end, &request->error_status)) 857 if (!asn1_uint_decode(ctx, end, &request->error_status))
858 return 0; 858 return 0;
859 859
860 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 860 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
861 return 0; 861 return 0;
862 862
863 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) 863 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
864 return 0; 864 return 0;
865 865
866 if (!asn1_uint_decode(ctx, end, &request->error_index)) 866 if (!asn1_uint_decode(ctx, end, &request->error_index))
867 return 0; 867 return 0;
868 868
869 return 1; 869 return 1;
870} 870}
871 871
872/* 872/*
873 * Fast checksum update for possibly oddly-aligned UDP byte, from the 873 * Fast checksum update for possibly oddly-aligned UDP byte, from the
874 * code example in the draft. 874 * code example in the draft.
875 */ 875 */
876static void fast_csum(__sum16 *csum, 876static void fast_csum(__sum16 *csum,
877 const unsigned char *optr, 877 const unsigned char *optr,
878 const unsigned char *nptr, 878 const unsigned char *nptr,
879 int offset) 879 int offset)
880{ 880{
881 unsigned char s[4]; 881 unsigned char s[4];
882 882
@@ -893,30 +893,30 @@ static void fast_csum(__sum16 *csum,
893 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); 893 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
894} 894}
895 895
896/* 896/*
897 * Mangle IP address. 897 * Mangle IP address.
898 * - begin points to the start of the snmp messgae 898 * - begin points to the start of the snmp messgae
899 * - addr points to the start of the address 899 * - addr points to the start of the address
900 */ 900 */
901static inline void mangle_address(unsigned char *begin, 901static inline void mangle_address(unsigned char *begin,
902 unsigned char *addr, 902 unsigned char *addr,
903 const struct oct1_map *map, 903 const struct oct1_map *map,
904 __sum16 *check) 904 __sum16 *check)
905{ 905{
906 if (map->from == NOCT1(addr)) { 906 if (map->from == NOCT1(addr)) {
907 u_int32_t old; 907 u_int32_t old;
908 908
909 if (debug) 909 if (debug)
910 memcpy(&old, (unsigned char *)addr, sizeof(old)); 910 memcpy(&old, (unsigned char *)addr, sizeof(old));
911 911
912 *addr = map->to; 912 *addr = map->to;
913 913
914 /* Update UDP checksum if being used */ 914 /* Update UDP checksum if being used */
915 if (*check) { 915 if (*check) {
916 fast_csum(check, 916 fast_csum(check,
917 &map->from, &map->to, addr - begin); 917 &map->from, &map->to, addr - begin);
918 } 918 }
919 919
920 if (debug) 920 if (debug)
921 printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to " 921 printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
922 "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr)); 922 "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
@@ -924,66 +924,66 @@ static inline void mangle_address(unsigned char *begin,
924} 924}
925 925
926static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, 926static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
927 struct snmp_v1_trap *trap, 927 struct snmp_v1_trap *trap,
928 const struct oct1_map *map, 928 const struct oct1_map *map,
929 __sum16 *check) 929 __sum16 *check)
930{ 930{
931 unsigned int cls, con, tag, len; 931 unsigned int cls, con, tag, len;
932 unsigned char *end; 932 unsigned char *end;
933 933
934 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 934 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
935 return 0; 935 return 0;
936 936
937 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) 937 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
938 return 0; 938 return 0;
939 939
940 if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len)) 940 if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len))
941 return 0; 941 return 0;
942 942
943 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 943 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
944 goto err_id_free; 944 goto err_id_free;
945 945
946 if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) || 946 if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) ||
947 (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS))) 947 (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS)))
948 goto err_id_free; 948 goto err_id_free;
949 949
950 if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len)) 950 if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len))
951 goto err_id_free; 951 goto err_id_free;
952 952
953 /* IPv4 only */ 953 /* IPv4 only */
954 if (len != 4) 954 if (len != 4)
955 goto err_addr_free; 955 goto err_addr_free;
956 956
957 mangle_address(ctx->begin, ctx->pointer - 4, map, check); 957 mangle_address(ctx->begin, ctx->pointer - 4, map, check);
958 958
959 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 959 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
960 goto err_addr_free; 960 goto err_addr_free;
961 961
962 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) 962 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
963 goto err_addr_free; 963 goto err_addr_free;
964 964
965 if (!asn1_uint_decode(ctx, end, &trap->general)) 965 if (!asn1_uint_decode(ctx, end, &trap->general))
966 goto err_addr_free; 966 goto err_addr_free;
967 967
968 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 968 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
969 goto err_addr_free; 969 goto err_addr_free;
970 970
971 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) 971 if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
972 goto err_addr_free; 972 goto err_addr_free;
973 973
974 if (!asn1_uint_decode(ctx, end, &trap->specific)) 974 if (!asn1_uint_decode(ctx, end, &trap->specific))
975 goto err_addr_free; 975 goto err_addr_free;
976 976
977 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) 977 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
978 goto err_addr_free; 978 goto err_addr_free;
979 979
980 if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) || 980 if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) ||
981 (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT))) 981 (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT)))
982 goto err_addr_free; 982 goto err_addr_free;
983 983
984 if (!asn1_ulong_decode(ctx, end, &trap->time)) 984 if (!asn1_ulong_decode(ctx, end, &trap->time))
985 goto err_addr_free; 985 goto err_addr_free;
986 986
987 return 1; 987 return 1;
988 988
989err_addr_free: 989err_addr_free:
@@ -1004,7 +1004,7 @@ err_id_free:
1004static void hex_dump(unsigned char *buf, size_t len) 1004static void hex_dump(unsigned char *buf, size_t len)
1005{ 1005{
1006 size_t i; 1006 size_t i;
1007 1007
1008 for (i = 0; i < len; i++) { 1008 for (i = 0; i < len; i++) {
1009 if (i && !(i % 16)) 1009 if (i && !(i % 16))
1010 printk("\n"); 1010 printk("\n");
@@ -1018,30 +1018,30 @@ static void hex_dump(unsigned char *buf, size_t len)
1018 * (And this is the fucking 'basic' method). 1018 * (And this is the fucking 'basic' method).
1019 */ 1019 */
1020static int snmp_parse_mangle(unsigned char *msg, 1020static int snmp_parse_mangle(unsigned char *msg,
1021 u_int16_t len, 1021 u_int16_t len,
1022 const struct oct1_map *map, 1022 const struct oct1_map *map,
1023 __sum16 *check) 1023 __sum16 *check)
1024{ 1024{
1025 unsigned char *eoc, *end; 1025 unsigned char *eoc, *end;
1026 unsigned int cls, con, tag, vers, pdutype; 1026 unsigned int cls, con, tag, vers, pdutype;
1027 struct asn1_ctx ctx; 1027 struct asn1_ctx ctx;
1028 struct asn1_octstr comm; 1028 struct asn1_octstr comm;
1029 struct snmp_object **obj; 1029 struct snmp_object **obj;
1030 1030
1031 if (debug > 1) 1031 if (debug > 1)
1032 hex_dump(msg, len); 1032 hex_dump(msg, len);
1033 1033
1034 asn1_open(&ctx, msg, len); 1034 asn1_open(&ctx, msg, len);
1035 1035
1036 /* 1036 /*
1037 * Start of SNMP message. 1037 * Start of SNMP message.
1038 */ 1038 */
1039 if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) 1039 if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
1040 return 0; 1040 return 0;
1041 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) 1041 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
1042 return 0; 1042 return 0;
1043 1043
1044 /* 1044 /*
1045 * Version 1 or 2 handled. 1045 * Version 1 or 2 handled.
1046 */ 1046 */
1047 if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag)) 1047 if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag))
@@ -1054,7 +1054,7 @@ static int snmp_parse_mangle(unsigned char *msg,
1054 printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1); 1054 printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1);
1055 if (vers > 1) 1055 if (vers > 1)
1056 return 1; 1056 return 1;
1057 1057
1058 /* 1058 /*
1059 * Community. 1059 * Community.
1060 */ 1060 */
@@ -1066,14 +1066,14 @@ static int snmp_parse_mangle(unsigned char *msg,
1066 return 0; 1066 return 0;
1067 if (debug > 1) { 1067 if (debug > 1) {
1068 unsigned int i; 1068 unsigned int i;
1069 1069
1070 printk(KERN_DEBUG "bsalg: community: "); 1070 printk(KERN_DEBUG "bsalg: community: ");
1071 for (i = 0; i < comm.len; i++) 1071 for (i = 0; i < comm.len; i++)
1072 printk("%c", comm.data[i]); 1072 printk("%c", comm.data[i]);
1073 printk("\n"); 1073 printk("\n");
1074 } 1074 }
1075 kfree(comm.data); 1075 kfree(comm.data);
1076 1076
1077 /* 1077 /*
1078 * PDU type 1078 * PDU type
1079 */ 1079 */
@@ -1092,7 +1092,7 @@ static int snmp_parse_mangle(unsigned char *msg,
1092 [SNMP_PDU_INFORM] = "inform", 1092 [SNMP_PDU_INFORM] = "inform",
1093 [SNMP_PDU_TRAP2] = "trapv2" 1093 [SNMP_PDU_TRAP2] = "trapv2"
1094 }; 1094 };
1095 1095
1096 if (pdutype > SNMP_PDU_TRAP2) 1096 if (pdutype > SNMP_PDU_TRAP2)
1097 printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype); 1097 printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype);
1098 else 1098 else
@@ -1101,56 +1101,56 @@ static int snmp_parse_mangle(unsigned char *msg,
1101 if (pdutype != SNMP_PDU_RESPONSE && 1101 if (pdutype != SNMP_PDU_RESPONSE &&
1102 pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2) 1102 pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2)
1103 return 1; 1103 return 1;
1104 1104
1105 /* 1105 /*
1106 * Request header or v1 trap 1106 * Request header or v1 trap
1107 */ 1107 */
1108 if (pdutype == SNMP_PDU_TRAP1) { 1108 if (pdutype == SNMP_PDU_TRAP1) {
1109 struct snmp_v1_trap trap; 1109 struct snmp_v1_trap trap;
1110 unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); 1110 unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
1111 1111
1112 if (ret) { 1112 if (ret) {
1113 kfree(trap.id); 1113 kfree(trap.id);
1114 kfree((unsigned long *)trap.ip_address); 1114 kfree((unsigned long *)trap.ip_address);
1115 } else 1115 } else
1116 return ret; 1116 return ret;
1117 1117
1118 } else { 1118 } else {
1119 struct snmp_request req; 1119 struct snmp_request req;
1120 1120
1121 if (!snmp_request_decode(&ctx, &req)) 1121 if (!snmp_request_decode(&ctx, &req))
1122 return 0; 1122 return 0;
1123 1123
1124 if (debug > 1) 1124 if (debug > 1)
1125 printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u " 1125 printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u "
1126 "error_index=%u\n", req.id, req.error_status, 1126 "error_index=%u\n", req.id, req.error_status,
1127 req.error_index); 1127 req.error_index);
1128 } 1128 }
1129 1129
1130 /* 1130 /*
1131 * Loop through objects, look for IP addresses to mangle. 1131 * Loop through objects, look for IP addresses to mangle.
1132 */ 1132 */
1133 if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) 1133 if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
1134 return 0; 1134 return 0;
1135 1135
1136 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) 1136 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
1137 return 0; 1137 return 0;
1138 1138
1139 obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); 1139 obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
1140 if (obj == NULL) { 1140 if (obj == NULL) {
1141 if (net_ratelimit()) 1141 if (net_ratelimit())
1142 printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__); 1142 printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
1143 return 0; 1143 return 0;
1144 } 1144 }
1145 1145
1146 while (!asn1_eoc_decode(&ctx, eoc)) { 1146 while (!asn1_eoc_decode(&ctx, eoc)) {
1147 unsigned int i; 1147 unsigned int i;
1148 1148
1149 if (!snmp_object_decode(&ctx, obj)) { 1149 if (!snmp_object_decode(&ctx, obj)) {
1150 if (*obj) { 1150 if (*obj) {
1151 kfree((*obj)->id); 1151 kfree((*obj)->id);
1152 kfree(*obj); 1152 kfree(*obj);
1153 } 1153 }
1154 kfree(obj); 1154 kfree(obj);
1155 return 0; 1155 return 0;
1156 } 1156 }
@@ -1163,20 +1163,20 @@ static int snmp_parse_mangle(unsigned char *msg,
1163 printk("%lu", (*obj)->id[i]); 1163 printk("%lu", (*obj)->id[i]);
1164 } 1164 }
1165 printk(": type=%u\n", (*obj)->type); 1165 printk(": type=%u\n", (*obj)->type);
1166 1166
1167 } 1167 }
1168 1168
1169 if ((*obj)->type == SNMP_IPADDR) 1169 if ((*obj)->type == SNMP_IPADDR)
1170 mangle_address(ctx.begin, ctx.pointer - 4 , map, check); 1170 mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
1171 1171
1172 kfree((*obj)->id); 1172 kfree((*obj)->id);
1173 kfree(*obj); 1173 kfree(*obj);
1174 } 1174 }
1175 kfree(obj); 1175 kfree(obj);
1176 1176
1177 if (!asn1_eoc_decode(&ctx, eoc)) 1177 if (!asn1_eoc_decode(&ctx, eoc))
1178 return 0; 1178 return 0;
1179 1179
1180 return 1; 1180 return 1;
1181} 1181}
1182 1182
@@ -1186,12 +1186,12 @@ static int snmp_parse_mangle(unsigned char *msg,
1186 * 1186 *
1187 *****************************************************************************/ 1187 *****************************************************************************/
1188 1188
1189/* 1189/*
1190 * SNMP translation routine. 1190 * SNMP translation routine.
1191 */ 1191 */
1192static int snmp_translate(struct ip_conntrack *ct, 1192static int snmp_translate(struct ip_conntrack *ct,
1193 enum ip_conntrack_info ctinfo, 1193 enum ip_conntrack_info ctinfo,
1194 struct sk_buff **pskb) 1194 struct sk_buff **pskb)
1195{ 1195{
1196 struct iphdr *iph = (*pskb)->nh.iph; 1196 struct iphdr *iph = (*pskb)->nh.iph;
1197 struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); 1197 struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
@@ -1213,12 +1213,12 @@ static int snmp_translate(struct ip_conntrack *ct,
1213 map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); 1213 map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
1214 map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip); 1214 map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip);
1215 } 1215 }
1216 1216
1217 if (map.from == map.to) 1217 if (map.from == map.to)
1218 return NF_ACCEPT; 1218 return NF_ACCEPT;
1219 1219
1220 if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), 1220 if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
1221 paylen, &map, &udph->check)) { 1221 paylen, &map, &udph->check)) {
1222 if (net_ratelimit()) 1222 if (net_ratelimit())
1223 printk(KERN_WARNING "bsalg: parser failed\n"); 1223 printk(KERN_WARNING "bsalg: parser failed\n");
1224 return NF_DROP; 1224 return NF_DROP;
@@ -1247,7 +1247,7 @@ static int help(struct sk_buff **pskb,
1247 if (!(ct->status & IPS_NAT_MASK)) 1247 if (!(ct->status & IPS_NAT_MASK))
1248 return NF_ACCEPT; 1248 return NF_ACCEPT;
1249 1249
1250 /* 1250 /*
1251 * Make sure the packet length is ok. So far, we were only guaranteed 1251 * Make sure the packet length is ok. So far, we were only guaranteed
1252 * to have a valid length IP header plus 8 bytes, which means we have 1252 * to have a valid length IP header plus 8 bytes, which means we have
1253 * enough room for a UDP header. Just verify the UDP length field so we 1253 * enough room for a UDP header. Just verify the UDP length field so we
@@ -1305,7 +1305,7 @@ static struct ip_conntrack_helper snmp_trap_helper = {
1305 * Module stuff. 1305 * Module stuff.
1306 * 1306 *
1307 *****************************************************************************/ 1307 *****************************************************************************/
1308 1308
1309static int __init ip_nat_snmp_basic_init(void) 1309static int __init ip_nat_snmp_basic_init(void)
1310{ 1310{
1311 int ret = 0; 1311 int ret = 0;
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index ad66328baa5d..adf25f9f70e1 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -81,7 +81,7 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
81 } 81 }
82} 82}
83#endif 83#endif
84 84
85static unsigned int 85static unsigned int
86ip_nat_fn(unsigned int hooknum, 86ip_nat_fn(unsigned int hooknum,
87 struct sk_buff **pskb, 87 struct sk_buff **pskb,
@@ -107,8 +107,8 @@ ip_nat_fn(unsigned int hooknum,
107 protocol. 8) --RR */ 107 protocol. 8) --RR */
108 if (!ct) { 108 if (!ct) {
109 /* Exception: ICMP redirect to new connection (not in 109 /* Exception: ICMP redirect to new connection (not in
110 hash table yet). We must not let this through, in 110 hash table yet). We must not let this through, in
111 case we're doing NAT to the same network. */ 111 case we're doing NAT to the same network. */
112 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { 112 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
113 struct icmphdr _hdr, *hp; 113 struct icmphdr _hdr, *hp;
114 114
@@ -148,7 +148,7 @@ ip_nat_fn(unsigned int hooknum,
148 if (unlikely(is_confirmed(ct))) 148 if (unlikely(is_confirmed(ct)))
149 /* NAT module was loaded late */ 149 /* NAT module was loaded late */
150 ret = alloc_null_binding_confirmed(ct, info, 150 ret = alloc_null_binding_confirmed(ct, info,
151 hooknum); 151 hooknum);
152 else if (hooknum == NF_IP_LOCAL_IN) 152 else if (hooknum == NF_IP_LOCAL_IN)
153 /* LOCAL_IN hook doesn't have a chain! */ 153 /* LOCAL_IN hook doesn't have a chain! */
154 ret = alloc_null_binding(ct, info, hooknum); 154 ret = alloc_null_binding(ct, info, hooknum);
@@ -179,10 +179,10 @@ ip_nat_fn(unsigned int hooknum,
179 179
180static unsigned int 180static unsigned int
181ip_nat_in(unsigned int hooknum, 181ip_nat_in(unsigned int hooknum,
182 struct sk_buff **pskb, 182 struct sk_buff **pskb,
183 const struct net_device *in, 183 const struct net_device *in,
184 const struct net_device *out, 184 const struct net_device *out,
185 int (*okfn)(struct sk_buff *)) 185 int (*okfn)(struct sk_buff *))
186{ 186{
187 unsigned int ret; 187 unsigned int ret;
188 __be32 daddr = (*pskb)->nh.iph->daddr; 188 __be32 daddr = (*pskb)->nh.iph->daddr;
@@ -277,9 +277,9 @@ ip_nat_adjust(unsigned int hooknum,
277 277
278 ct = ip_conntrack_get(*pskb, &ctinfo); 278 ct = ip_conntrack_get(*pskb, &ctinfo);
279 if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { 279 if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
280 DEBUGP("ip_nat_standalone: adjusting sequence number\n"); 280 DEBUGP("ip_nat_standalone: adjusting sequence number\n");
281 if (!ip_nat_seq_adjust(pskb, ct, ctinfo)) 281 if (!ip_nat_seq_adjust(pskb, ct, ctinfo))
282 return NF_DROP; 282 return NF_DROP;
283 } 283 }
284 return NF_ACCEPT; 284 return NF_ACCEPT;
285} 285}
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index cd520df4dcf4..68bf19f3b01c 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -11,13 +11,13 @@
11 * 11 *
12 * 2000-03-27: Simplified code (thanks to Andi Kleen for clues). 12 * 2000-03-27: Simplified code (thanks to Andi Kleen for clues).
13 * 2000-05-20: Fixed notifier problems (following Miguel Freitas' report). 13 * 2000-05-20: Fixed notifier problems (following Miguel Freitas' report).
14 * 2000-06-19: Fixed so nfmark is copied to metadata (reported by Sebastian 14 * 2000-06-19: Fixed so nfmark is copied to metadata (reported by Sebastian
15 * Zander). 15 * Zander).
16 * 2000-08-01: Added Nick Williams' MAC support. 16 * 2000-08-01: Added Nick Williams' MAC support.
17 * 2002-06-25: Code cleanup. 17 * 2002-06-25: Code cleanup.
18 * 2005-01-10: Added /proc counter for dropped packets; fixed so 18 * 2005-01-10: Added /proc counter for dropped packets; fixed so
19 * packets aren't delivered to user space if they're going 19 * packets aren't delivered to user space if they're going
20 * to be dropped. 20 * to be dropped.
21 * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte) 21 * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte)
22 * 22 *
23 */ 23 */
@@ -97,7 +97,7 @@ __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data)
97 97
98 list_for_each_prev(p, &queue_list) { 98 list_for_each_prev(p, &queue_list) {
99 struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p; 99 struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p;
100 100
101 if (!cmpfn || cmpfn(entry, data)) 101 if (!cmpfn || cmpfn(entry, data))
102 return entry; 102 return entry;
103 } 103 }
@@ -129,7 +129,7 @@ static inline void
129__ipq_flush(int verdict) 129__ipq_flush(int verdict)
130{ 130{
131 struct ipq_queue_entry *entry; 131 struct ipq_queue_entry *entry;
132 132
133 while ((entry = __ipq_find_dequeue_entry(NULL, 0))) 133 while ((entry = __ipq_find_dequeue_entry(NULL, 0)))
134 ipq_issue_verdict(entry, verdict); 134 ipq_issue_verdict(entry, verdict);
135} 135}
@@ -138,21 +138,21 @@ static inline int
138__ipq_set_mode(unsigned char mode, unsigned int range) 138__ipq_set_mode(unsigned char mode, unsigned int range)
139{ 139{
140 int status = 0; 140 int status = 0;
141 141
142 switch(mode) { 142 switch(mode) {
143 case IPQ_COPY_NONE: 143 case IPQ_COPY_NONE:
144 case IPQ_COPY_META: 144 case IPQ_COPY_META:
145 copy_mode = mode; 145 copy_mode = mode;
146 copy_range = 0; 146 copy_range = 0;
147 break; 147 break;
148 148
149 case IPQ_COPY_PACKET: 149 case IPQ_COPY_PACKET:
150 copy_mode = mode; 150 copy_mode = mode;
151 copy_range = range; 151 copy_range = range;
152 if (copy_range > 0xFFFF) 152 if (copy_range > 0xFFFF)
153 copy_range = 0xFFFF; 153 copy_range = 0xFFFF;
154 break; 154 break;
155 155
156 default: 156 default:
157 status = -EINVAL; 157 status = -EINVAL;
158 158
@@ -173,7 +173,7 @@ static struct ipq_queue_entry *
173ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data) 173ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
174{ 174{
175 struct ipq_queue_entry *entry; 175 struct ipq_queue_entry *entry;
176 176
177 write_lock_bh(&queue_lock); 177 write_lock_bh(&queue_lock);
178 entry = __ipq_find_dequeue_entry(cmpfn, data); 178 entry = __ipq_find_dequeue_entry(cmpfn, data);
179 write_unlock_bh(&queue_lock); 179 write_unlock_bh(&queue_lock);
@@ -199,14 +199,14 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
199 struct nlmsghdr *nlh; 199 struct nlmsghdr *nlh;
200 200
201 read_lock_bh(&queue_lock); 201 read_lock_bh(&queue_lock);
202 202
203 switch (copy_mode) { 203 switch (copy_mode) {
204 case IPQ_COPY_META: 204 case IPQ_COPY_META:
205 case IPQ_COPY_NONE: 205 case IPQ_COPY_NONE:
206 size = NLMSG_SPACE(sizeof(*pmsg)); 206 size = NLMSG_SPACE(sizeof(*pmsg));
207 data_len = 0; 207 data_len = 0;
208 break; 208 break;
209 209
210 case IPQ_COPY_PACKET: 210 case IPQ_COPY_PACKET:
211 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL || 211 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
212 entry->skb->ip_summed == CHECKSUM_COMPLETE) && 212 entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
@@ -218,10 +218,10 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
218 data_len = entry->skb->len; 218 data_len = entry->skb->len;
219 else 219 else
220 data_len = copy_range; 220 data_len = copy_range;
221 221
222 size = NLMSG_SPACE(sizeof(*pmsg) + data_len); 222 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
223 break; 223 break;
224 224
225 default: 225 default:
226 *errp = -EINVAL; 226 *errp = -EINVAL;
227 read_unlock_bh(&queue_lock); 227 read_unlock_bh(&queue_lock);
@@ -233,7 +233,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
233 skb = alloc_skb(size, GFP_ATOMIC); 233 skb = alloc_skb(size, GFP_ATOMIC);
234 if (!skb) 234 if (!skb)
235 goto nlmsg_failure; 235 goto nlmsg_failure;
236 236
237 old_tail= skb->tail; 237 old_tail= skb->tail;
238 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); 238 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
239 pmsg = NLMSG_DATA(nlh); 239 pmsg = NLMSG_DATA(nlh);
@@ -246,29 +246,29 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
246 pmsg->mark = entry->skb->mark; 246 pmsg->mark = entry->skb->mark;
247 pmsg->hook = entry->info->hook; 247 pmsg->hook = entry->info->hook;
248 pmsg->hw_protocol = entry->skb->protocol; 248 pmsg->hw_protocol = entry->skb->protocol;
249 249
250 if (entry->info->indev) 250 if (entry->info->indev)
251 strcpy(pmsg->indev_name, entry->info->indev->name); 251 strcpy(pmsg->indev_name, entry->info->indev->name);
252 else 252 else
253 pmsg->indev_name[0] = '\0'; 253 pmsg->indev_name[0] = '\0';
254 254
255 if (entry->info->outdev) 255 if (entry->info->outdev)
256 strcpy(pmsg->outdev_name, entry->info->outdev->name); 256 strcpy(pmsg->outdev_name, entry->info->outdev->name);
257 else 257 else
258 pmsg->outdev_name[0] = '\0'; 258 pmsg->outdev_name[0] = '\0';
259 259
260 if (entry->info->indev && entry->skb->dev) { 260 if (entry->info->indev && entry->skb->dev) {
261 pmsg->hw_type = entry->skb->dev->type; 261 pmsg->hw_type = entry->skb->dev->type;
262 if (entry->skb->dev->hard_header_parse) 262 if (entry->skb->dev->hard_header_parse)
263 pmsg->hw_addrlen = 263 pmsg->hw_addrlen =
264 entry->skb->dev->hard_header_parse(entry->skb, 264 entry->skb->dev->hard_header_parse(entry->skb,
265 pmsg->hw_addr); 265 pmsg->hw_addr);
266 } 266 }
267 267
268 if (data_len) 268 if (data_len)
269 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len)) 269 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
270 BUG(); 270 BUG();
271 271
272 nlh->nlmsg_len = skb->tail - old_tail; 272 nlh->nlmsg_len = skb->tail - old_tail;
273 return skb; 273 return skb;
274 274
@@ -303,26 +303,26 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
303 nskb = ipq_build_packet_message(entry, &status); 303 nskb = ipq_build_packet_message(entry, &status);
304 if (nskb == NULL) 304 if (nskb == NULL)
305 goto err_out_free; 305 goto err_out_free;
306 306
307 write_lock_bh(&queue_lock); 307 write_lock_bh(&queue_lock);
308 308
309 if (!peer_pid) 309 if (!peer_pid)
310 goto err_out_free_nskb; 310 goto err_out_free_nskb;
311 311
312 if (queue_total >= queue_maxlen) { 312 if (queue_total >= queue_maxlen) {
313 queue_dropped++; 313 queue_dropped++;
314 status = -ENOSPC; 314 status = -ENOSPC;
315 if (net_ratelimit()) 315 if (net_ratelimit())
316 printk (KERN_WARNING "ip_queue: full at %d entries, " 316 printk (KERN_WARNING "ip_queue: full at %d entries, "
317 "dropping packets(s). Dropped: %d\n", queue_total, 317 "dropping packets(s). Dropped: %d\n", queue_total,
318 queue_dropped); 318 queue_dropped);
319 goto err_out_free_nskb; 319 goto err_out_free_nskb;
320 } 320 }
321 321
322 /* netlink_unicast will either free the nskb or attach it to a socket */ 322 /* netlink_unicast will either free the nskb or attach it to a socket */
323 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT); 323 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
324 if (status < 0) { 324 if (status < 0) {
325 queue_user_dropped++; 325 queue_user_dropped++;
326 goto err_out_unlock; 326 goto err_out_unlock;
327 } 327 }
328 328
@@ -332,8 +332,8 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
332 return status; 332 return status;
333 333
334err_out_free_nskb: 334err_out_free_nskb:
335 kfree_skb(nskb); 335 kfree_skb(nskb);
336 336
337err_out_unlock: 337err_out_unlock:
338 write_unlock_bh(&queue_lock); 338 write_unlock_bh(&queue_lock);
339 339
@@ -359,11 +359,11 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
359 return -EINVAL; 359 return -EINVAL;
360 if (diff > skb_tailroom(e->skb)) { 360 if (diff > skb_tailroom(e->skb)) {
361 struct sk_buff *newskb; 361 struct sk_buff *newskb;
362 362
363 newskb = skb_copy_expand(e->skb, 363 newskb = skb_copy_expand(e->skb,
364 skb_headroom(e->skb), 364 skb_headroom(e->skb),
365 diff, 365 diff,
366 GFP_ATOMIC); 366 GFP_ATOMIC);
367 if (newskb == NULL) { 367 if (newskb == NULL) {
368 printk(KERN_WARNING "ip_queue: OOM " 368 printk(KERN_WARNING "ip_queue: OOM "
369 "in mangle, dropping packet\n"); 369 "in mangle, dropping packet\n");
@@ -403,11 +403,11 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
403 return -ENOENT; 403 return -ENOENT;
404 else { 404 else {
405 int verdict = vmsg->value; 405 int verdict = vmsg->value;
406 406
407 if (vmsg->data_len && vmsg->data_len == len) 407 if (vmsg->data_len && vmsg->data_len == len)
408 if (ipq_mangle_ipv4(vmsg, entry) < 0) 408 if (ipq_mangle_ipv4(vmsg, entry) < 0)
409 verdict = NF_DROP; 409 verdict = NF_DROP;
410 410
411 ipq_issue_verdict(entry, verdict); 411 ipq_issue_verdict(entry, verdict);
412 return 0; 412 return 0;
413 } 413 }
@@ -426,7 +426,7 @@ ipq_set_mode(unsigned char mode, unsigned int range)
426 426
427static int 427static int
428ipq_receive_peer(struct ipq_peer_msg *pmsg, 428ipq_receive_peer(struct ipq_peer_msg *pmsg,
429 unsigned char type, unsigned int len) 429 unsigned char type, unsigned int len)
430{ 430{
431 int status = 0; 431 int status = 0;
432 432
@@ -436,15 +436,15 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
436 switch (type) { 436 switch (type) {
437 case IPQM_MODE: 437 case IPQM_MODE:
438 status = ipq_set_mode(pmsg->msg.mode.value, 438 status = ipq_set_mode(pmsg->msg.mode.value,
439 pmsg->msg.mode.range); 439 pmsg->msg.mode.range);
440 break; 440 break;
441 441
442 case IPQM_VERDICT: 442 case IPQM_VERDICT:
443 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 443 if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
444 status = -EINVAL; 444 status = -EINVAL;
445 else 445 else
446 status = ipq_set_verdict(&pmsg->msg.verdict, 446 status = ipq_set_verdict(&pmsg->msg.verdict,
447 len - sizeof(*pmsg)); 447 len - sizeof(*pmsg));
448 break; 448 break;
449 default: 449 default:
450 status = -EINVAL; 450 status = -EINVAL;
@@ -468,7 +468,7 @@ dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex)
468 return 1; 468 return 1;
469 if (entry->skb->nf_bridge->physoutdev && 469 if (entry->skb->nf_bridge->physoutdev &&
470 entry->skb->nf_bridge->physoutdev->ifindex == ifindex) 470 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
471 return 1; 471 return 1;
472 } 472 }
473#endif 473#endif
474 return 0; 474 return 0;
@@ -478,7 +478,7 @@ static void
478ipq_dev_drop(int ifindex) 478ipq_dev_drop(int ifindex)
479{ 479{
480 struct ipq_queue_entry *entry; 480 struct ipq_queue_entry *entry;
481 481
482 while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL) 482 while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL)
483 ipq_issue_verdict(entry, NF_DROP); 483 ipq_issue_verdict(entry, NF_DROP);
484} 484}
@@ -502,25 +502,25 @@ ipq_rcv_skb(struct sk_buff *skb)
502 502
503 pid = nlh->nlmsg_pid; 503 pid = nlh->nlmsg_pid;
504 flags = nlh->nlmsg_flags; 504 flags = nlh->nlmsg_flags;
505 505
506 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI) 506 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
507 RCV_SKB_FAIL(-EINVAL); 507 RCV_SKB_FAIL(-EINVAL);
508 508
509 if (flags & MSG_TRUNC) 509 if (flags & MSG_TRUNC)
510 RCV_SKB_FAIL(-ECOMM); 510 RCV_SKB_FAIL(-ECOMM);
511 511
512 type = nlh->nlmsg_type; 512 type = nlh->nlmsg_type;
513 if (type < NLMSG_NOOP || type >= IPQM_MAX) 513 if (type < NLMSG_NOOP || type >= IPQM_MAX)
514 RCV_SKB_FAIL(-EINVAL); 514 RCV_SKB_FAIL(-EINVAL);
515 515
516 if (type <= IPQM_BASE) 516 if (type <= IPQM_BASE)
517 return; 517 return;
518 518
519 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 519 if (security_netlink_recv(skb, CAP_NET_ADMIN))
520 RCV_SKB_FAIL(-EPERM); 520 RCV_SKB_FAIL(-EPERM);
521 521
522 write_lock_bh(&queue_lock); 522 write_lock_bh(&queue_lock);
523 523
524 if (peer_pid) { 524 if (peer_pid) {
525 if (peer_pid != pid) { 525 if (peer_pid != pid) {
526 write_unlock_bh(&queue_lock); 526 write_unlock_bh(&queue_lock);
@@ -530,17 +530,17 @@ ipq_rcv_skb(struct sk_buff *skb)
530 net_enable_timestamp(); 530 net_enable_timestamp();
531 peer_pid = pid; 531 peer_pid = pid;
532 } 532 }
533 533
534 write_unlock_bh(&queue_lock); 534 write_unlock_bh(&queue_lock);
535 535
536 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 536 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
537 nlmsglen - NLMSG_LENGTH(0)); 537 nlmsglen - NLMSG_LENGTH(0));
538 if (status < 0) 538 if (status < 0)
539 RCV_SKB_FAIL(status); 539 RCV_SKB_FAIL(status);
540 540
541 if (flags & NLM_F_ACK) 541 if (flags & NLM_F_ACK)
542 netlink_ack(skb, nlh, 0); 542 netlink_ack(skb, nlh, 0);
543 return; 543 return;
544} 544}
545 545
546static void 546static void
@@ -550,19 +550,19 @@ ipq_rcv_sk(struct sock *sk, int len)
550 unsigned int qlen; 550 unsigned int qlen;
551 551
552 mutex_lock(&ipqnl_mutex); 552 mutex_lock(&ipqnl_mutex);
553 553
554 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { 554 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
555 skb = skb_dequeue(&sk->sk_receive_queue); 555 skb = skb_dequeue(&sk->sk_receive_queue);
556 ipq_rcv_skb(skb); 556 ipq_rcv_skb(skb);
557 kfree_skb(skb); 557 kfree_skb(skb);
558 } 558 }
559 559
560 mutex_unlock(&ipqnl_mutex); 560 mutex_unlock(&ipqnl_mutex);
561} 561}
562 562
563static int 563static int
564ipq_rcv_dev_event(struct notifier_block *this, 564ipq_rcv_dev_event(struct notifier_block *this,
565 unsigned long event, void *ptr) 565 unsigned long event, void *ptr)
566{ 566{
567 struct net_device *dev = ptr; 567 struct net_device *dev = ptr;
568 568
@@ -578,7 +578,7 @@ static struct notifier_block ipq_dev_notifier = {
578 578
579static int 579static int
580ipq_rcv_nl_event(struct notifier_block *this, 580ipq_rcv_nl_event(struct notifier_block *this,
581 unsigned long event, void *ptr) 581 unsigned long event, void *ptr)
582{ 582{
583 struct netlink_notify *n = ptr; 583 struct netlink_notify *n = ptr;
584 584
@@ -607,7 +607,7 @@ static ctl_table ipq_table[] = {
607 .mode = 0644, 607 .mode = 0644,
608 .proc_handler = proc_dointvec 608 .proc_handler = proc_dointvec
609 }, 609 },
610 { .ctl_name = 0 } 610 { .ctl_name = 0 }
611}; 611};
612 612
613static ctl_table ipq_dir_table[] = { 613static ctl_table ipq_dir_table[] = {
@@ -637,25 +637,25 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
637 int len; 637 int len;
638 638
639 read_lock_bh(&queue_lock); 639 read_lock_bh(&queue_lock);
640 640
641 len = sprintf(buffer, 641 len = sprintf(buffer,
642 "Peer PID : %d\n" 642 "Peer PID : %d\n"
643 "Copy mode : %hu\n" 643 "Copy mode : %hu\n"
644 "Copy range : %u\n" 644 "Copy range : %u\n"
645 "Queue length : %u\n" 645 "Queue length : %u\n"
646 "Queue max. length : %u\n" 646 "Queue max. length : %u\n"
647 "Queue dropped : %u\n" 647 "Queue dropped : %u\n"
648 "Netlink dropped : %u\n", 648 "Netlink dropped : %u\n",
649 peer_pid, 649 peer_pid,
650 copy_mode, 650 copy_mode,
651 copy_range, 651 copy_range,
652 queue_total, 652 queue_total,
653 queue_maxlen, 653 queue_maxlen,
654 queue_dropped, 654 queue_dropped,
655 queue_user_dropped); 655 queue_user_dropped);
656 656
657 read_unlock_bh(&queue_lock); 657 read_unlock_bh(&queue_lock);
658 658
659 *start = buffer + offset; 659 *start = buffer + offset;
660 len -= offset; 660 len -= offset;
661 if (len > length) 661 if (len > length)
@@ -675,7 +675,7 @@ static int __init ip_queue_init(void)
675{ 675{
676 int status = -ENOMEM; 676 int status = -ENOMEM;
677 struct proc_dir_entry *proc; 677 struct proc_dir_entry *proc;
678 678
679 netlink_register_notifier(&ipq_nl_notifier); 679 netlink_register_notifier(&ipq_nl_notifier);
680 ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk, 680 ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk,
681 THIS_MODULE); 681 THIS_MODULE);
@@ -691,10 +691,10 @@ static int __init ip_queue_init(void)
691 printk(KERN_ERR "ip_queue: failed to create proc entry\n"); 691 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
692 goto cleanup_ipqnl; 692 goto cleanup_ipqnl;
693 } 693 }
694 694
695 register_netdevice_notifier(&ipq_dev_notifier); 695 register_netdevice_notifier(&ipq_dev_notifier);
696 ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0); 696 ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0);
697 697
698 status = nf_register_queue_handler(PF_INET, &nfqh); 698 status = nf_register_queue_handler(PF_INET, &nfqh);
699 if (status < 0) { 699 if (status < 0) {
700 printk(KERN_ERR "ip_queue: failed to register queue handler\n"); 700 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
@@ -706,12 +706,12 @@ cleanup_sysctl:
706 unregister_sysctl_table(ipq_sysctl_header); 706 unregister_sysctl_table(ipq_sysctl_header);
707 unregister_netdevice_notifier(&ipq_dev_notifier); 707 unregister_netdevice_notifier(&ipq_dev_notifier);
708 proc_net_remove(IPQ_PROC_FS_NAME); 708 proc_net_remove(IPQ_PROC_FS_NAME);
709 709
710cleanup_ipqnl: 710cleanup_ipqnl:
711 sock_release(ipqnl->sk_socket); 711 sock_release(ipqnl->sk_socket);
712 mutex_lock(&ipqnl_mutex); 712 mutex_lock(&ipqnl_mutex);
713 mutex_unlock(&ipqnl_mutex); 713 mutex_unlock(&ipqnl_mutex);
714 714
715cleanup_netlink_notifier: 715cleanup_netlink_notifier:
716 netlink_unregister_notifier(&ipq_nl_notifier); 716 netlink_unregister_notifier(&ipq_nl_notifier);
717 return status; 717 return status;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 5a7b3a341389..50cc4b92e284 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -297,7 +297,7 @@ ipt_do_table(struct sk_buff **pskb,
297 e = get_entry(table_base, v); 297 e = get_entry(table_base, v);
298 } else { 298 } else {
299 /* Targets which reenter must return 299 /* Targets which reenter must return
300 abs. verdicts */ 300 abs. verdicts */
301#ifdef CONFIG_NETFILTER_DEBUG 301#ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom 302 ((struct ipt_entry *)table_base)->comefrom
303 = 0xeeeeeeec; 303 = 0xeeeeeeec;
@@ -556,9 +556,9 @@ err:
556 556
557static inline int check_target(struct ipt_entry *e, const char *name) 557static inline int check_target(struct ipt_entry *e, const char *name)
558{ 558{
559 struct ipt_entry_target *t; 559 struct ipt_entry_target *t;
560 struct xt_target *target; 560 struct xt_target *target;
561 int ret; 561 int ret;
562 562
563 t = ipt_get_target(e); 563 t = ipt_get_target(e);
564 target = t->u.kernel.target; 564 target = t->u.kernel.target;
@@ -652,7 +652,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
652 } 652 }
653 653
654 /* FIXME: underflows must be unconditional, standard verdicts 654 /* FIXME: underflows must be unconditional, standard verdicts
655 < 0 (not IPT_RETURN). --RR */ 655 < 0 (not IPT_RETURN). --RR */
656 656
657 /* Clear counters and comefrom */ 657 /* Clear counters and comefrom */
658 e->counters = ((struct xt_counters) { 0, 0 }); 658 e->counters = ((struct xt_counters) { 0, 0 });
@@ -2057,7 +2057,7 @@ void ipt_unregister_table(struct xt_table *table)
2057 struct xt_table_info *private; 2057 struct xt_table_info *private;
2058 void *loc_cpu_entry; 2058 void *loc_cpu_entry;
2059 2059
2060 private = xt_unregister_table(table); 2060 private = xt_unregister_table(table);
2061 2061
2062 /* Decrease module usage counts and free resources */ 2062 /* Decrease module usage counts and free resources */
2063 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2063 loc_cpu_entry = private->entries[raw_smp_processor_id()];
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 343c2abdc1a0..4fe28f264475 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -1,4 +1,4 @@
1/* Cluster IP hashmark target 1/* Cluster IP hashmark target
2 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> 2 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
3 * based on ideas of Fabio Olive Leite <olive@unixforge.org> 3 * based on ideas of Fabio Olive Leite <olive@unixforge.org>
4 * 4 *
@@ -123,7 +123,7 @@ __clusterip_config_find(__be32 clusterip)
123 struct list_head *pos; 123 struct list_head *pos;
124 124
125 list_for_each(pos, &clusterip_configs) { 125 list_for_each(pos, &clusterip_configs) {
126 struct clusterip_config *c = list_entry(pos, 126 struct clusterip_config *c = list_entry(pos,
127 struct clusterip_config, list); 127 struct clusterip_config, list);
128 if (c->clusterip == clusterip) { 128 if (c->clusterip == clusterip) {
129 return c; 129 return c;
@@ -229,7 +229,7 @@ clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum)
229 if (nodenum == 0 || 229 if (nodenum == 0 ||
230 nodenum > c->num_total_nodes) 230 nodenum > c->num_total_nodes)
231 return 1; 231 return 1;
232 232
233 if (test_and_clear_bit(nodenum - 1, &c->local_nodes)) 233 if (test_and_clear_bit(nodenum - 1, &c->local_nodes))
234 return 0; 234 return 0;
235 235
@@ -270,7 +270,7 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
270 config->hash_initval); 270 config->hash_initval);
271 break; 271 break;
272 case CLUSTERIP_HASHMODE_SIP_SPT: 272 case CLUSTERIP_HASHMODE_SIP_SPT:
273 hashval = jhash_2words(ntohl(iph->saddr), sport, 273 hashval = jhash_2words(ntohl(iph->saddr), sport,
274 config->hash_initval); 274 config->hash_initval);
275 break; 275 break;
276 case CLUSTERIP_HASHMODE_SIP_SPT_DPT: 276 case CLUSTERIP_HASHMODE_SIP_SPT_DPT:
@@ -297,8 +297,8 @@ clusterip_responsible(struct clusterip_config *config, u_int32_t hash)
297 return test_bit(hash - 1, &config->local_nodes); 297 return test_bit(hash - 1, &config->local_nodes);
298} 298}
299 299
300/*********************************************************************** 300/***********************************************************************
301 * IPTABLES TARGET 301 * IPTABLES TARGET
302 ***********************************************************************/ 302 ***********************************************************************/
303 303
304static unsigned int 304static unsigned int
@@ -321,7 +321,7 @@ target(struct sk_buff **pskb,
321 if (mark == NULL) { 321 if (mark == NULL) {
322 printk(KERN_ERR "CLUSTERIP: no conntrack!\n"); 322 printk(KERN_ERR "CLUSTERIP: no conntrack!\n");
323 /* FIXME: need to drop invalid ones, since replies 323 /* FIXME: need to drop invalid ones, since replies
324 * to outgoing connections of other nodes will be 324 * to outgoing connections of other nodes will be
325 * marked as INVALID */ 325 * marked as INVALID */
326 return NF_DROP; 326 return NF_DROP;
327 } 327 }
@@ -329,11 +329,11 @@ target(struct sk_buff **pskb,
329 /* special case: ICMP error handling. conntrack distinguishes between 329 /* special case: ICMP error handling. conntrack distinguishes between
330 * error messages (RELATED) and information requests (see below) */ 330 * error messages (RELATED) and information requests (see below) */
331 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP 331 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP
332 && (ctinfo == IP_CT_RELATED 332 && (ctinfo == IP_CT_RELATED
333 || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) 333 || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY))
334 return XT_CONTINUE; 334 return XT_CONTINUE;
335 335
336 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, 336 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
337 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here 337 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here
338 * on, which all have an ID field [relevant for hashing]. */ 338 * on, which all have an ID field [relevant for hashing]. */
339 339
@@ -376,8 +376,8 @@ static int
376checkentry(const char *tablename, 376checkentry(const char *tablename,
377 const void *e_void, 377 const void *e_void,
378 const struct xt_target *target, 378 const struct xt_target *target,
379 void *targinfo, 379 void *targinfo,
380 unsigned int hook_mask) 380 unsigned int hook_mask)
381{ 381{
382 struct ipt_clusterip_tgt_info *cipinfo = targinfo; 382 struct ipt_clusterip_tgt_info *cipinfo = targinfo;
383 const struct ipt_entry *e = e_void; 383 const struct ipt_entry *e = e_void;
@@ -437,7 +437,7 @@ checkentry(const char *tablename,
437 return 0; 437 return 0;
438 } 438 }
439 439
440 config = clusterip_config_init(cipinfo, 440 config = clusterip_config_init(cipinfo,
441 e->ip.dst.s_addr, dev); 441 e->ip.dst.s_addr, dev);
442 if (!config) { 442 if (!config) {
443 printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n"); 443 printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n");
@@ -483,8 +483,8 @@ static struct xt_target clusterip_tgt = {
483}; 483};
484 484
485 485
486/*********************************************************************** 486/***********************************************************************
487 * ARP MANGLING CODE 487 * ARP MANGLING CODE
488 ***********************************************************************/ 488 ***********************************************************************/
489 489
490/* hardcoded for 48bit ethernet and 32bit ipv4 addresses */ 490/* hardcoded for 48bit ethernet and 32bit ipv4 addresses */
@@ -496,7 +496,7 @@ struct arp_payload {
496} __attribute__ ((packed)); 496} __attribute__ ((packed));
497 497
498#ifdef CLUSTERIP_DEBUG 498#ifdef CLUSTERIP_DEBUG
499static void arp_print(struct arp_payload *payload) 499static void arp_print(struct arp_payload *payload)
500{ 500{
501#define HBUFFERLEN 30 501#define HBUFFERLEN 30
502 char hbuffer[HBUFFERLEN]; 502 char hbuffer[HBUFFERLEN];
@@ -510,7 +510,7 @@ static void arp_print(struct arp_payload *payload)
510 } 510 }
511 hbuffer[--k]='\0'; 511 hbuffer[--k]='\0';
512 512
513 printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n", 513 printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n",
514 NIPQUAD(payload->src_ip), hbuffer, 514 NIPQUAD(payload->src_ip), hbuffer,
515 NIPQUAD(payload->dst_ip)); 515 NIPQUAD(payload->dst_ip));
516} 516}
@@ -540,13 +540,13 @@ arp_mangle(unsigned int hook,
540 540
541 payload = (void *)(arp+1); 541 payload = (void *)(arp+1);
542 542
543 /* if there is no clusterip configuration for the arp reply's 543 /* if there is no clusterip configuration for the arp reply's
544 * source ip, we don't want to mangle it */ 544 * source ip, we don't want to mangle it */
545 c = clusterip_config_find_get(payload->src_ip, 0); 545 c = clusterip_config_find_get(payload->src_ip, 0);
546 if (!c) 546 if (!c)
547 return NF_ACCEPT; 547 return NF_ACCEPT;
548 548
549 /* normally the linux kernel always replies to arp queries of 549 /* normally the linux kernel always replies to arp queries of
550 * addresses on different interfacs. However, in the CLUSTERIP case 550 * addresses on different interfacs. However, in the CLUSTERIP case
551 * this wouldn't work, since we didn't subscribe the mcast group on 551 * this wouldn't work, since we didn't subscribe the mcast group on
552 * other interfaces */ 552 * other interfaces */
@@ -577,8 +577,8 @@ static struct nf_hook_ops cip_arp_ops = {
577 .priority = -1 577 .priority = -1
578}; 578};
579 579
580/*********************************************************************** 580/***********************************************************************
581 * PROC DIR HANDLING 581 * PROC DIR HANDLING
582 ***********************************************************************/ 582 ***********************************************************************/
583 583
584#ifdef CONFIG_PROC_FS 584#ifdef CONFIG_PROC_FS
@@ -640,7 +640,7 @@ static int clusterip_seq_show(struct seq_file *s, void *v)
640{ 640{
641 struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v; 641 struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v;
642 642
643 if (idx->pos != 0) 643 if (idx->pos != 0)
644 seq_putc(s, ','); 644 seq_putc(s, ',');
645 645
646 seq_printf(s, "%u", idx->bit); 646 seq_printf(s, "%u", idx->bit);
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index b5ca5938d1fe..4f565633631d 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -1,9 +1,9 @@
1/* iptables module for the IPv4 and TCP ECN bits, Version 1.5 1/* iptables module for the IPv4 and TCP ECN bits, Version 1.5
2 * 2 *
3 * (C) 2002 by Harald Welte <laforge@netfilter.org> 3 * (C) 2002 by Harald Welte <laforge@netfilter.org>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp 9 * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp
@@ -40,7 +40,7 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
40 iph->tos &= ~IPT_ECN_IP_MASK; 40 iph->tos &= ~IPT_ECN_IP_MASK;
41 iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); 41 iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK);
42 nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); 42 nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
43 } 43 }
44 return 1; 44 return 1;
45} 45}
46 46
@@ -104,8 +104,8 @@ static int
104checkentry(const char *tablename, 104checkentry(const char *tablename,
105 const void *e_void, 105 const void *e_void,
106 const struct xt_target *target, 106 const struct xt_target *target,
107 void *targinfo, 107 void *targinfo,
108 unsigned int hook_mask) 108 unsigned int hook_mask)
109{ 109{
110 const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo; 110 const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo;
111 const struct ipt_entry *e = e_void; 111 const struct ipt_entry *e = e_void;
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index f68370ffb43f..f4a62f2522ff 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -289,7 +289,7 @@ static void dump_packet(const struct nf_loginfo *info,
289 289
290 if (ntohs(ih->frag_off) & IP_OFFSET) 290 if (ntohs(ih->frag_off) & IP_OFFSET)
291 break; 291 break;
292 292
293 /* Max length: 9 "PROTO=AH " */ 293 /* Max length: 9 "PROTO=AH " */
294 printk("PROTO=AH "); 294 printk("PROTO=AH ");
295 295
@@ -334,10 +334,10 @@ static void dump_packet(const struct nf_loginfo *info,
334 } 334 }
335 335
336 /* Max length: 15 "UID=4294967295 " */ 336 /* Max length: 15 "UID=4294967295 " */
337 if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) { 337 if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
338 read_lock_bh(&skb->sk->sk_callback_lock); 338 read_lock_bh(&skb->sk->sk_callback_lock);
339 if (skb->sk->sk_socket && skb->sk->sk_socket->file) 339 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
340 printk("UID=%u ", skb->sk->sk_socket->file->f_uid); 340 printk("UID=%u ", skb->sk->sk_socket->file->f_uid);
341 read_unlock_bh(&skb->sk->sk_callback_lock); 341 read_unlock_bh(&skb->sk->sk_callback_lock);
342 } 342 }
343 343
@@ -431,7 +431,7 @@ ipt_log_target(struct sk_buff **pskb,
431 li.u.log.logflags = loginfo->logflags; 431 li.u.log.logflags = loginfo->logflags;
432 432
433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
434 loginfo->prefix); 434 loginfo->prefix);
435 return XT_CONTINUE; 435 return XT_CONTINUE;
436} 436}
437 437
@@ -483,7 +483,7 @@ static int __init ipt_log_init(void)
483 /* we cannot make module load fail here, since otherwise 483 /* we cannot make module load fail here, since otherwise
484 * iptables userspace would abort */ 484 * iptables userspace would abort */
485 } 485 }
486 486
487 return 0; 487 return 0;
488} 488}
489 489
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 91c42efcd533..b5955f3a3f8f 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -86,7 +86,7 @@ masquerade_target(struct sk_buff **pskb,
86 nat = nfct_nat(ct); 86 nat = nfct_nat(ct);
87#endif 87#endif
88 IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED 88 IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
89 || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 89 || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
90 90
91 /* Source address is 0.0.0.0 - locally generated packet that is 91 /* Source address is 0.0.0.0 - locally generated packet that is
92 * probably not supposed to be masqueraded. 92 * probably not supposed to be masqueraded.
@@ -221,7 +221,7 @@ static void __exit ipt_masquerade_fini(void)
221{ 221{
222 xt_unregister_target(&masquerade); 222 xt_unregister_target(&masquerade);
223 unregister_netdevice_notifier(&masq_dev_notifier); 223 unregister_netdevice_notifier(&masq_dev_notifier);
224 unregister_inetaddr_notifier(&masq_inet_notifier); 224 unregister_inetaddr_notifier(&masq_inet_notifier);
225} 225}
226 226
227module_init(ipt_masquerade_init); 227module_init(ipt_masquerade_init);
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index b4acc241d898..fd7aaa347cd8 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -92,13 +92,13 @@ target(struct sk_buff **pskb,
92static struct xt_target target_module = { 92static struct xt_target target_module = {
93 .name = MODULENAME, 93 .name = MODULENAME,
94 .family = AF_INET, 94 .family = AF_INET,
95 .target = target, 95 .target = target,
96 .targetsize = sizeof(struct ip_nat_multi_range_compat), 96 .targetsize = sizeof(struct ip_nat_multi_range_compat),
97 .table = "nat", 97 .table = "nat",
98 .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING) | 98 .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING) |
99 (1 << NF_IP_LOCAL_OUT), 99 (1 << NF_IP_LOCAL_OUT),
100 .checkentry = check, 100 .checkentry = check,
101 .me = THIS_MODULE 101 .me = THIS_MODULE
102}; 102};
103 103
104static int __init ipt_netmap_init(void) 104static int __init ipt_netmap_init(void)
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 54cd021aa5a8..c2b6b80670f8 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -84,7 +84,7 @@ redirect_target(struct sk_buff **pskb,
84 struct in_ifaddr *ifa; 84 struct in_ifaddr *ifa;
85 85
86 newdst = 0; 86 newdst = 0;
87 87
88 rcu_read_lock(); 88 rcu_read_lock();
89 indev = __in_dev_get_rcu((*pskb)->dev); 89 indev = __in_dev_get_rcu((*pskb)->dev);
90 if (indev && (ifa = indev->ifa_list)) 90 if (indev && (ifa = indev->ifa_list))
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index e4a1ddb386a7..a9eb3635fff2 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -57,7 +57,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
57 oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4, 57 oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4,
58 sizeof(_otcph), &_otcph); 58 sizeof(_otcph), &_otcph);
59 if (oth == NULL) 59 if (oth == NULL)
60 return; 60 return;
61 61
62 /* No RST for RST. */ 62 /* No RST for RST. */
63 if (oth->rst) 63 if (oth->rst)
@@ -145,7 +145,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
145 145
146 /* Adjust IP checksum */ 146 /* Adjust IP checksum */
147 nskb->nh.iph->check = 0; 147 nskb->nh.iph->check = 0;
148 nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, 148 nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph,
149 nskb->nh.iph->ihl); 149 nskb->nh.iph->ihl);
150 150
151 /* "Never happens" */ 151 /* "Never happens" */
@@ -165,7 +165,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
165static inline void send_unreach(struct sk_buff *skb_in, int code) 165static inline void send_unreach(struct sk_buff *skb_in, int code)
166{ 166{
167 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); 167 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
168} 168}
169 169
170static unsigned int reject(struct sk_buff **pskb, 170static unsigned int reject(struct sk_buff **pskb,
171 const struct net_device *in, 171 const struct net_device *in,
@@ -177,33 +177,33 @@ static unsigned int reject(struct sk_buff **pskb,
177 const struct ipt_reject_info *reject = targinfo; 177 const struct ipt_reject_info *reject = targinfo;
178 178
179 /* Our naive response construction doesn't deal with IP 179 /* Our naive response construction doesn't deal with IP
180 options, and probably shouldn't try. */ 180 options, and probably shouldn't try. */
181 if ((*pskb)->nh.iph->ihl<<2 != sizeof(struct iphdr)) 181 if ((*pskb)->nh.iph->ihl<<2 != sizeof(struct iphdr))
182 return NF_DROP; 182 return NF_DROP;
183 183
184 /* WARNING: This code causes reentry within iptables. 184 /* WARNING: This code causes reentry within iptables.
185 This means that the iptables jump stack is now crap. We 185 This means that the iptables jump stack is now crap. We
186 must return an absolute verdict. --RR */ 186 must return an absolute verdict. --RR */
187 switch (reject->with) { 187 switch (reject->with) {
188 case IPT_ICMP_NET_UNREACHABLE: 188 case IPT_ICMP_NET_UNREACHABLE:
189 send_unreach(*pskb, ICMP_NET_UNREACH); 189 send_unreach(*pskb, ICMP_NET_UNREACH);
190 break; 190 break;
191 case IPT_ICMP_HOST_UNREACHABLE: 191 case IPT_ICMP_HOST_UNREACHABLE:
192 send_unreach(*pskb, ICMP_HOST_UNREACH); 192 send_unreach(*pskb, ICMP_HOST_UNREACH);
193 break; 193 break;
194 case IPT_ICMP_PROT_UNREACHABLE: 194 case IPT_ICMP_PROT_UNREACHABLE:
195 send_unreach(*pskb, ICMP_PROT_UNREACH); 195 send_unreach(*pskb, ICMP_PROT_UNREACH);
196 break; 196 break;
197 case IPT_ICMP_PORT_UNREACHABLE: 197 case IPT_ICMP_PORT_UNREACHABLE:
198 send_unreach(*pskb, ICMP_PORT_UNREACH); 198 send_unreach(*pskb, ICMP_PORT_UNREACH);
199 break; 199 break;
200 case IPT_ICMP_NET_PROHIBITED: 200 case IPT_ICMP_NET_PROHIBITED:
201 send_unreach(*pskb, ICMP_NET_ANO); 201 send_unreach(*pskb, ICMP_NET_ANO);
202 break; 202 break;
203 case IPT_ICMP_HOST_PROHIBITED: 203 case IPT_ICMP_HOST_PROHIBITED:
204 send_unreach(*pskb, ICMP_HOST_ANO); 204 send_unreach(*pskb, ICMP_HOST_ANO);
205 break; 205 break;
206 case IPT_ICMP_ADMIN_PROHIBITED: 206 case IPT_ICMP_ADMIN_PROHIBITED:
207 send_unreach(*pskb, ICMP_PKT_FILTERED); 207 send_unreach(*pskb, ICMP_PKT_FILTERED);
208 break; 208 break;
209 case IPT_TCP_RESET: 209 case IPT_TCP_RESET:
@@ -222,7 +222,7 @@ static int check(const char *tablename,
222 void *targinfo, 222 void *targinfo,
223 unsigned int hook_mask) 223 unsigned int hook_mask)
224{ 224{
225 const struct ipt_reject_info *rejinfo = targinfo; 225 const struct ipt_reject_info *rejinfo = targinfo;
226 const struct ipt_entry *e = e_void; 226 const struct ipt_entry *e = e_void;
227 227
228 if (rejinfo->with == IPT_ICMP_ECHOREPLY) { 228 if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c
index a1cdd1262de2..bd4404e5c688 100644
--- a/net/ipv4/netfilter/ipt_SAME.c
+++ b/net/ipv4/netfilter/ipt_SAME.c
@@ -87,24 +87,24 @@ same_check(const char *tablename,
87 DEBUGP("same_check: bad MAP_IPS.\n"); 87 DEBUGP("same_check: bad MAP_IPS.\n");
88 return 0; 88 return 0;
89 } 89 }
90 rangeip = (ntohl(mr->range[count].max_ip) - 90 rangeip = (ntohl(mr->range[count].max_ip) -
91 ntohl(mr->range[count].min_ip) + 1); 91 ntohl(mr->range[count].min_ip) + 1);
92 mr->ipnum += rangeip; 92 mr->ipnum += rangeip;
93 93
94 DEBUGP("same_check: range %u, ipnum = %u\n", count, rangeip); 94 DEBUGP("same_check: range %u, ipnum = %u\n", count, rangeip);
95 } 95 }
96 DEBUGP("same_check: total ipaddresses = %u\n", mr->ipnum); 96 DEBUGP("same_check: total ipaddresses = %u\n", mr->ipnum);
97 97
98 mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL); 98 mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL);
99 if (!mr->iparray) { 99 if (!mr->iparray) {
100 DEBUGP("same_check: Couldn't allocate %u bytes " 100 DEBUGP("same_check: Couldn't allocate %u bytes "
101 "for %u ipaddresses!\n", 101 "for %u ipaddresses!\n",
102 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); 102 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
103 return 0; 103 return 0;
104 } 104 }
105 DEBUGP("same_check: Allocated %u bytes for %u ipaddresses.\n", 105 DEBUGP("same_check: Allocated %u bytes for %u ipaddresses.\n",
106 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); 106 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
107 107
108 for (count = 0; count < mr->rangesize; count++) { 108 for (count = 0; count < mr->rangesize; count++) {
109 for (countess = ntohl(mr->range[count].min_ip); 109 for (countess = ntohl(mr->range[count].min_ip);
110 countess <= ntohl(mr->range[count].max_ip); 110 countess <= ntohl(mr->range[count].max_ip);
@@ -119,13 +119,13 @@ same_check(const char *tablename,
119 return 1; 119 return 1;
120} 120}
121 121
122static void 122static void
123same_destroy(const struct xt_target *target, void *targinfo) 123same_destroy(const struct xt_target *target, void *targinfo)
124{ 124{
125 struct ipt_same_info *mr = targinfo; 125 struct ipt_same_info *mr = targinfo;
126 126
127 kfree(mr->iparray); 127 kfree(mr->iparray);
128 128
129 DEBUGP("same_destroy: Deallocated %u bytes for %u ipaddresses.\n", 129 DEBUGP("same_destroy: Deallocated %u bytes for %u ipaddresses.\n",
130 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); 130 (sizeof(u_int32_t) * mr->ipnum), mr->ipnum);
131} 131}
@@ -156,7 +156,7 @@ same_target(struct sk_buff **pskb,
156 giving some hope for consistency across reboots. 156 giving some hope for consistency across reboots.
157 Here we calculate the index in same->iparray which 157 Here we calculate the index in same->iparray which
158 holds the ipaddress we should use */ 158 holds the ipaddress we should use */
159 159
160#ifdef CONFIG_NF_NAT_NEEDED 160#ifdef CONFIG_NF_NAT_NEEDED
161 tmpip = ntohl(t->src.u3.ip); 161 tmpip = ntohl(t->src.u3.ip);
162 162
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index 29b05a6bd108..cedf9f7d9d6e 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -47,8 +47,8 @@ static int
47checkentry(const char *tablename, 47checkentry(const char *tablename,
48 const void *e_void, 48 const void *e_void,
49 const struct xt_target *target, 49 const struct xt_target *target,
50 void *targinfo, 50 void *targinfo,
51 unsigned int hook_mask) 51 unsigned int hook_mask)
52{ 52{
53 const u_int8_t tos = ((struct ipt_tos_target_info *)targinfo)->tos; 53 const u_int8_t tos = ((struct ipt_tos_target_info *)targinfo)->tos;
54 54
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
index d2b6fa3f9dcd..64be31c22ba9 100644
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -19,7 +19,7 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
19MODULE_DESCRIPTION("IP tables TTL modification module"); 19MODULE_DESCRIPTION("IP tables TTL modification module");
20MODULE_LICENSE("GPL"); 20MODULE_LICENSE("GPL");
21 21
22static unsigned int 22static unsigned int
23ipt_ttl_target(struct sk_buff **pskb, 23ipt_ttl_target(struct sk_buff **pskb,
24 const struct net_device *in, const struct net_device *out, 24 const struct net_device *in, const struct net_device *out,
25 unsigned int hooknum, const struct xt_target *target, 25 unsigned int hooknum, const struct xt_target *target,
@@ -71,7 +71,7 @@ static int ipt_ttl_checkentry(const char *tablename,
71 struct ipt_TTL_info *info = targinfo; 71 struct ipt_TTL_info *info = targinfo;
72 72
73 if (info->mode > IPT_TTL_MAXMODE) { 73 if (info->mode > IPT_TTL_MAXMODE) {
74 printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", 74 printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
75 info->mode); 75 info->mode);
76 return 0; 76 return 0;
77 } 77 }
@@ -83,10 +83,10 @@ static int ipt_ttl_checkentry(const char *tablename,
83static struct xt_target ipt_TTL = { 83static struct xt_target ipt_TTL = {
84 .name = "TTL", 84 .name = "TTL",
85 .family = AF_INET, 85 .family = AF_INET,
86 .target = ipt_ttl_target, 86 .target = ipt_ttl_target,
87 .targetsize = sizeof(struct ipt_TTL_info), 87 .targetsize = sizeof(struct ipt_TTL_info),
88 .table = "mangle", 88 .table = "mangle",
89 .checkentry = ipt_ttl_checkentry, 89 .checkentry = ipt_ttl_checkentry,
90 .me = THIS_MODULE, 90 .me = THIS_MODULE,
91}; 91};
92 92
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 7af57a3a1f36..3a1eacc634b3 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -4,9 +4,9 @@
4 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org> 4 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
5 * 5 *
6 * 2000/09/22 ulog-cprange feature added 6 * 2000/09/22 ulog-cprange feature added
7 * 2001/01/04 in-kernel queue as proposed by Sebastian Zander 7 * 2001/01/04 in-kernel queue as proposed by Sebastian Zander
8 * <zander@fokus.gmd.de> 8 * <zander@fokus.gmd.de>
9 * 2001/01/30 per-rule nlgroup conflicts with global queue. 9 * 2001/01/30 per-rule nlgroup conflicts with global queue.
10 * nlgroup now global (sysctl) 10 * nlgroup now global (sysctl)
11 * 2001/04/19 ulog-queue reworked, now fixed buffer size specified at 11 * 2001/04/19 ulog-queue reworked, now fixed buffer size specified at
12 * module loadtime -HW 12 * module loadtime -HW
@@ -23,8 +23,8 @@
23 * it under the terms of the GNU General Public License version 2 as 23 * it under the terms of the GNU General Public License version 2 as
24 * published by the Free Software Foundation. 24 * published by the Free Software Foundation.
25 * 25 *
26 * This module accepts two parameters: 26 * This module accepts two parameters:
27 * 27 *
28 * nlbufsiz: 28 * nlbufsiz:
29 * The parameter specifies how big the buffer for each netlink multicast 29 * The parameter specifies how big the buffer for each netlink multicast
30 * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will 30 * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
@@ -72,7 +72,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
72 72
73#if 0 73#if 0
74#define DEBUGP(format, args...) printk("%s:%s:" format, \ 74#define DEBUGP(format, args...) printk("%s:%s:" format, \
75 __FILE__, __FUNCTION__ , ## args) 75 __FILE__, __FUNCTION__ , ## args)
76#else 76#else
77#define DEBUGP(format, args...) 77#define DEBUGP(format, args...)
78#endif 78#endif
@@ -162,7 +162,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
162 PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n); 162 PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
163 163
164 if (n > size) { 164 if (n > size) {
165 /* try to allocate only as much as we need for 165 /* try to allocate only as much as we need for
166 * current packet */ 166 * current packet */
167 167
168 skb = alloc_skb(size, GFP_ATOMIC); 168 skb = alloc_skb(size, GFP_ATOMIC);
@@ -203,7 +203,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
203 size = NLMSG_SPACE(sizeof(*pm) + copy_len); 203 size = NLMSG_SPACE(sizeof(*pm) + copy_len);
204 204
205 ub = &ulog_buffers[groupnum]; 205 ub = &ulog_buffers[groupnum];
206 206
207 spin_lock_bh(&ulog_lock); 207 spin_lock_bh(&ulog_lock);
208 208
209 if (!ub->skb) { 209 if (!ub->skb) {
@@ -211,7 +211,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
211 goto alloc_failure; 211 goto alloc_failure;
212 } else if (ub->qlen >= loginfo->qthreshold || 212 } else if (ub->qlen >= loginfo->qthreshold ||
213 size > skb_tailroom(ub->skb)) { 213 size > skb_tailroom(ub->skb)) {
214 /* either the queue len is too high or we don't have 214 /* either the queue len is too high or we don't have
215 * enough room in nlskb left. send it to userspace. */ 215 * enough room in nlskb left. send it to userspace. */
216 216
217 ulog_send(groupnum); 217 ulog_send(groupnum);
@@ -220,11 +220,11 @@ static void ipt_ulog_packet(unsigned int hooknum,
220 goto alloc_failure; 220 goto alloc_failure;
221 } 221 }
222 222
223 DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, 223 DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen,
224 loginfo->qthreshold); 224 loginfo->qthreshold);
225 225
226 /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ 226 /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
227 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, 227 nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
228 sizeof(*pm)+copy_len); 228 sizeof(*pm)+copy_len);
229 ub->qlen++; 229 ub->qlen++;
230 230
@@ -268,7 +268,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
268 /* copy_len <= skb->len, so can't fail. */ 268 /* copy_len <= skb->len, so can't fail. */
269 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) 269 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
270 BUG(); 270 BUG();
271 271
272 /* check if we are building multi-part messages */ 272 /* check if we are building multi-part messages */
273 if (ub->qlen > 1) { 273 if (ub->qlen > 1) {
274 ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; 274 ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
@@ -312,10 +312,10 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
312 struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo; 312 struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;
313 313
314 ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL); 314 ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL);
315 315
316 return XT_CONTINUE; 316 return XT_CONTINUE;
317} 317}
318 318
319static void ipt_logfn(unsigned int pf, 319static void ipt_logfn(unsigned int pf,
320 unsigned int hooknum, 320 unsigned int hooknum,
321 const struct sk_buff *skb, 321 const struct sk_buff *skb,
@@ -396,7 +396,7 @@ static int __init ipt_ulog_init(void)
396 } 396 }
397 397
398 nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL, 398 nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL,
399 THIS_MODULE); 399 THIS_MODULE);
400 if (!nflognl) 400 if (!nflognl)
401 return -ENOMEM; 401 return -ENOMEM;
402 402
@@ -407,7 +407,7 @@ static int __init ipt_ulog_init(void)
407 } 407 }
408 if (nflog) 408 if (nflog)
409 nf_log_register(PF_INET, &ipt_ulog_logger); 409 nf_log_register(PF_INET, &ipt_ulog_logger);
410 410
411 return 0; 411 return 0;
412} 412}
413 413
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 648f555c4d16..cfa0472617f6 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -40,7 +40,7 @@ static int match(const struct sk_buff *skb,
40 ret &= match_type(iph->saddr, info->source)^info->invert_source; 40 ret &= match_type(iph->saddr, info->source)^info->invert_source;
41 if (info->dest) 41 if (info->dest)
42 ret &= match_type(iph->daddr, info->dest)^info->invert_dest; 42 ret &= match_type(iph->daddr, info->dest)^info->invert_dest;
43 43
44 return ret; 44 return ret;
45} 45}
46 46
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index 42f41224a43a..18a16782cf40 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -29,8 +29,8 @@ static inline int
29spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) 29spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert)
30{ 30{
31 int r=0; 31 int r=0;
32 duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', 32 duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
33 min,spi,max); 33 min,spi,max);
34 r=(spi >= min && spi <= max) ^ invert; 34 r=(spi >= min && spi <= max) ^ invert;
35 duprintf(" result %s\n",r? "PASS" : "FAILED"); 35 duprintf(" result %s\n",r? "PASS" : "FAILED");
36 return r; 36 return r;
diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c
index 05de593be94c..bc5d5e6091e4 100644
--- a/net/ipv4/netfilter/ipt_iprange.c
+++ b/net/ipv4/netfilter/ipt_iprange.c
@@ -41,7 +41,7 @@ match(const struct sk_buff *skb,
41 DEBUGP("src IP %u.%u.%u.%u NOT in range %s" 41 DEBUGP("src IP %u.%u.%u.%u NOT in range %s"
42 "%u.%u.%u.%u-%u.%u.%u.%u\n", 42 "%u.%u.%u.%u-%u.%u.%u.%u\n",
43 NIPQUAD(iph->saddr), 43 NIPQUAD(iph->saddr),
44 info->flags & IPRANGE_SRC_INV ? "(INV) " : "", 44 info->flags & IPRANGE_SRC_INV ? "(INV) " : "",
45 NIPQUAD(info->src.min_ip), 45 NIPQUAD(info->src.min_ip),
46 NIPQUAD(info->src.max_ip)); 46 NIPQUAD(info->src.max_ip));
47 return 0; 47 return 0;
@@ -54,7 +54,7 @@ match(const struct sk_buff *skb,
54 DEBUGP("dst IP %u.%u.%u.%u NOT in range %s" 54 DEBUGP("dst IP %u.%u.%u.%u NOT in range %s"
55 "%u.%u.%u.%u-%u.%u.%u.%u\n", 55 "%u.%u.%u.%u-%u.%u.%u.%u\n",
56 NIPQUAD(iph->daddr), 56 NIPQUAD(iph->daddr),
57 info->flags & IPRANGE_DST_INV ? "(INV) " : "", 57 info->flags & IPRANGE_DST_INV ? "(INV) " : "",
58 NIPQUAD(info->dst.min_ip), 58 NIPQUAD(info->dst.min_ip),
59 NIPQUAD(info->dst.max_ip)); 59 NIPQUAD(info->dst.max_ip));
60 return 0; 60 return 0;
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c
index 9f496ac834b5..7fae9aa8944c 100644
--- a/net/ipv4/netfilter/ipt_owner.c
+++ b/net/ipv4/netfilter/ipt_owner.c
@@ -53,10 +53,10 @@ match(const struct sk_buff *skb,
53 53
54static int 54static int
55checkentry(const char *tablename, 55checkentry(const char *tablename,
56 const void *ip, 56 const void *ip,
57 const struct xt_match *match, 57 const struct xt_match *match,
58 void *matchinfo, 58 void *matchinfo,
59 unsigned int hook_mask) 59 unsigned int hook_mask)
60{ 60{
61 const struct ipt_owner_info *info = matchinfo; 61 const struct ipt_owner_info *info = matchinfo;
62 62
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c
index d5cd984e5ed2..1eca9f400374 100644
--- a/net/ipv4/netfilter/ipt_ttl.c
+++ b/net/ipv4/netfilter/ipt_ttl.c
@@ -1,4 +1,4 @@
1/* IP tables module for matching the value of the TTL 1/* IP tables module for matching the value of the TTL
2 * 2 *
3 * ipt_ttl.c,v 1.5 2000/11/13 11:16:08 laforge Exp 3 * ipt_ttl.c,v 1.5 2000/11/13 11:16:08 laforge Exp
4 * 4 *
@@ -41,7 +41,7 @@ static int match(const struct sk_buff *skb,
41 return (skb->nh.iph->ttl > info->ttl); 41 return (skb->nh.iph->ttl > info->ttl);
42 break; 42 break;
43 default: 43 default:
44 printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", 44 printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
45 info->mode); 45 info->mode);
46 return 0; 46 return 0;
47 } 47 }
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 51053cb42f43..d1d61e97b976 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -25,7 +25,7 @@ static struct
25 struct ipt_replace repl; 25 struct ipt_replace repl;
26 struct ipt_standard entries[3]; 26 struct ipt_standard entries[3];
27 struct ipt_error term; 27 struct ipt_error term;
28} initial_table __initdata 28} initial_table __initdata
29= { { "filter", FILTER_VALID_HOOKS, 4, 29= { { "filter", FILTER_VALID_HOOKS, 4,
30 sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), 30 sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
31 { [NF_IP_LOCAL_IN] = 0, 31 { [NF_IP_LOCAL_IN] = 0,
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index a532e4d84332..98b66ef0c714 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -58,7 +58,7 @@ static struct
58 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, 58 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
59 -NF_ACCEPT - 1 } }, 59 -NF_ACCEPT - 1 } },
60 /* LOCAL_IN */ 60 /* LOCAL_IN */
61 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 61 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
62 0, 62 0,
63 sizeof(struct ipt_entry), 63 sizeof(struct ipt_entry),
64 sizeof(struct ipt_standard), 64 sizeof(struct ipt_standard),
@@ -66,7 +66,7 @@ static struct
66 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, 66 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
67 -NF_ACCEPT - 1 } }, 67 -NF_ACCEPT - 1 } },
68 /* FORWARD */ 68 /* FORWARD */
69 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 69 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
70 0, 70 0,
71 sizeof(struct ipt_entry), 71 sizeof(struct ipt_entry),
72 sizeof(struct ipt_standard), 72 sizeof(struct ipt_standard),
@@ -166,7 +166,7 @@ static struct nf_hook_ops ipt_ops[] = {
166 .hook = ipt_route_hook, 166 .hook = ipt_route_hook,
167 .owner = THIS_MODULE, 167 .owner = THIS_MODULE,
168 .pf = PF_INET, 168 .pf = PF_INET,
169 .hooknum = NF_IP_PRE_ROUTING, 169 .hooknum = NF_IP_PRE_ROUTING,
170 .priority = NF_IP_PRI_MANGLE, 170 .priority = NF_IP_PRI_MANGLE,
171 }, 171 },
172 { 172 {
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 5277550fa6b5..18c3d4c9ff51 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT . 2 * 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT .
3 * 3 *
4 * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> 4 * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
@@ -15,26 +15,26 @@ static struct
15 struct ipt_error term; 15 struct ipt_error term;
16} initial_table __initdata = { 16} initial_table __initdata = {
17 .repl = { 17 .repl = {
18 .name = "raw", 18 .name = "raw",
19 .valid_hooks = RAW_VALID_HOOKS, 19 .valid_hooks = RAW_VALID_HOOKS,
20 .num_entries = 3, 20 .num_entries = 3,
21 .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error), 21 .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
22 .hook_entry = { 22 .hook_entry = {
23 [NF_IP_PRE_ROUTING] = 0, 23 [NF_IP_PRE_ROUTING] = 0,
24 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, 24 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) },
25 .underflow = { 25 .underflow = {
26 [NF_IP_PRE_ROUTING] = 0, 26 [NF_IP_PRE_ROUTING] = 0,
27 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, 27 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) },
28 }, 28 },
29 .entries = { 29 .entries = {
30 /* PRE_ROUTING */ 30 /* PRE_ROUTING */
31 { 31 {
32 .entry = { 32 .entry = {
33 .target_offset = sizeof(struct ipt_entry), 33 .target_offset = sizeof(struct ipt_entry),
34 .next_offset = sizeof(struct ipt_standard), 34 .next_offset = sizeof(struct ipt_standard),
35 }, 35 },
36 .target = { 36 .target = {
37 .target = { 37 .target = {
38 .u = { 38 .u = {
39 .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), 39 .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
40 }, 40 },
@@ -69,7 +69,7 @@ static struct
69 .target = { 69 .target = {
70 .u = { 70 .u = {
71 .user = { 71 .user = {
72 .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)), 72 .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)),
73 .name = IPT_ERROR_TARGET, 73 .name = IPT_ERROR_TARGET,
74 }, 74 },
75 }, 75 },
@@ -80,9 +80,9 @@ static struct
80}; 80};
81 81
82static struct xt_table packet_raw = { 82static struct xt_table packet_raw = {
83 .name = "raw", 83 .name = "raw",
84 .valid_hooks = RAW_VALID_HOOKS, 84 .valid_hooks = RAW_VALID_HOOKS,
85 .lock = RW_LOCK_UNLOCKED, 85 .lock = RW_LOCK_UNLOCKED,
86 .me = THIS_MODULE, 86 .me = THIS_MODULE,
87 .af = AF_INET, 87 .af = AF_INET,
88}; 88};
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 471b638cedec..b984db771258 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -66,7 +66,7 @@ static int ipv4_print_tuple(struct seq_file *s,
66 const struct nf_conntrack_tuple *tuple) 66 const struct nf_conntrack_tuple *tuple)
67{ 67{
68 return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ", 68 return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
69 NIPQUAD(tuple->src.u3.ip), 69 NIPQUAD(tuple->src.u3.ip),
70 NIPQUAD(tuple->dst.u3.ip)); 70 NIPQUAD(tuple->dst.u3.ip));
71} 71}
72 72
@@ -82,14 +82,14 @@ nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
82{ 82{
83 skb_orphan(skb); 83 skb_orphan(skb);
84 84
85 local_bh_disable(); 85 local_bh_disable();
86 skb = ip_defrag(skb, user); 86 skb = ip_defrag(skb, user);
87 local_bh_enable(); 87 local_bh_enable();
88 88
89 if (skb) 89 if (skb)
90 ip_send_check(skb->nh.iph); 90 ip_send_check(skb->nh.iph);
91 91
92 return skb; 92 return skb;
93} 93}
94 94
95static int 95static int
@@ -192,10 +192,10 @@ static unsigned int ipv4_conntrack_in(unsigned int hooknum,
192} 192}
193 193
194static unsigned int ipv4_conntrack_local(unsigned int hooknum, 194static unsigned int ipv4_conntrack_local(unsigned int hooknum,
195 struct sk_buff **pskb, 195 struct sk_buff **pskb,
196 const struct net_device *in, 196 const struct net_device *in,
197 const struct net_device *out, 197 const struct net_device *out,
198 int (*okfn)(struct sk_buff *)) 198 int (*okfn)(struct sk_buff *))
199{ 199{
200 /* root is playing with raw sockets. */ 200 /* root is playing with raw sockets. */
201 if ((*pskb)->len < sizeof(struct iphdr) 201 if ((*pskb)->len < sizeof(struct iphdr)
@@ -332,7 +332,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
332 struct inet_sock *inet = inet_sk(sk); 332 struct inet_sock *inet = inet_sk(sk);
333 struct nf_conntrack_tuple_hash *h; 333 struct nf_conntrack_tuple_hash *h;
334 struct nf_conntrack_tuple tuple; 334 struct nf_conntrack_tuple tuple;
335 335
336 NF_CT_TUPLE_U_BLANK(&tuple); 336 NF_CT_TUPLE_U_BLANK(&tuple);
337 tuple.src.u3.ip = inet->rcv_saddr; 337 tuple.src.u3.ip = inet->rcv_saddr;
338 tuple.src.u.tcp.port = inet->sport; 338 tuple.src.u.tcp.port = inet->sport;
@@ -501,7 +501,7 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
501 return ret; 501 return ret;
502#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 502#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
503 cleanup_hooks: 503 cleanup_hooks:
504 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); 504 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
505#endif 505#endif
506 cleanup_ipv4: 506 cleanup_ipv4:
507 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); 507 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 3b31bc649608..14a93a738418 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -135,7 +135,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
135 l3proto, l4proto)) 135 l3proto, l4proto))
136 return -ENOSPC; 136 return -ENOSPC;
137 137
138 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL])) 138 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL]))
139 return -ENOSPC; 139 return -ENOSPC;
140 140
141 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) 141 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
@@ -146,7 +146,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
146 l3proto, l4proto)) 146 l3proto, l4proto))
147 return -ENOSPC; 147 return -ENOSPC;
148 148
149 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY])) 149 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY]))
150 return -ENOSPC; 150 return -ENOSPC;
151 151
152 if (test_bit(IPS_ASSURED_BIT, &ct->status)) 152 if (test_bit(IPS_ASSURED_BIT, &ct->status))
@@ -228,7 +228,7 @@ static void *exp_seq_start(struct seq_file *s, loff_t *pos)
228 228
229static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) 229static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
230{ 230{
231 struct list_head *e = v; 231 struct list_head *e = v;
232 232
233 ++*pos; 233 ++*pos;
234 e = e->next; 234 e = e->next;
@@ -262,7 +262,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
262 print_tuple(s, &exp->tuple, 262 print_tuple(s, &exp->tuple,
263 __nf_ct_l3proto_find(exp->tuple.src.l3num), 263 __nf_ct_l3proto_find(exp->tuple.src.l3num),
264 __nf_ct_l4proto_find(exp->tuple.src.l3num, 264 __nf_ct_l4proto_find(exp->tuple.src.l3num,
265 exp->tuple.dst.protonum)); 265 exp->tuple.dst.protonum));
266 return seq_putc(s, '\n'); 266 return seq_putc(s, '\n');
267} 267}
268 268
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index db9e7c45d3b4..677b6c80c618 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -101,9 +101,9 @@ static int icmp_packet(struct nf_conn *ct,
101 unsigned int hooknum) 101 unsigned int hooknum)
102{ 102{
103 /* Try to delete connection immediately after all replies: 103 /* Try to delete connection immediately after all replies:
104 won't actually vanish as we still have skb, and del_timer 104 won't actually vanish as we still have skb, and del_timer
105 means this will only run once even if count hits zero twice 105 means this will only run once even if count hits zero twice
106 (theoretically possible with SMP) */ 106 (theoretically possible with SMP) */
107 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 107 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
108 if (atomic_dec_and_test(&ct->proto.icmp.count) 108 if (atomic_dec_and_test(&ct->proto.icmp.count)
109 && del_timer(&ct->timeout)) 109 && del_timer(&ct->timeout))
@@ -144,8 +144,8 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
144/* Returns conntrack if it dealt with ICMP, and filled in skb fields */ 144/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
145static int 145static int
146icmp_error_message(struct sk_buff *skb, 146icmp_error_message(struct sk_buff *skb,
147 enum ip_conntrack_info *ctinfo, 147 enum ip_conntrack_info *ctinfo,
148 unsigned int hooknum) 148 unsigned int hooknum)
149{ 149{
150 struct nf_conntrack_tuple innertuple, origtuple; 150 struct nf_conntrack_tuple innertuple, origtuple;
151 struct { 151 struct {
@@ -181,9 +181,9 @@ icmp_error_message(struct sk_buff *skb,
181 return -NF_ACCEPT; 181 return -NF_ACCEPT;
182 } 182 }
183 183
184 /* Ordinarily, we'd expect the inverted tupleproto, but it's 184 /* Ordinarily, we'd expect the inverted tupleproto, but it's
185 been preserved inside the ICMP. */ 185 been preserved inside the ICMP. */
186 if (!nf_ct_invert_tuple(&innertuple, &origtuple, 186 if (!nf_ct_invert_tuple(&innertuple, &origtuple,
187 &nf_conntrack_l3proto_ipv4, innerproto)) { 187 &nf_conntrack_l3proto_ipv4, innerproto)) {
188 DEBUGP("icmp_error_message: no match\n"); 188 DEBUGP("icmp_error_message: no match\n");
189 return -NF_ACCEPT; 189 return -NF_ACCEPT;
@@ -212,10 +212,10 @@ icmp_error_message(struct sk_buff *skb,
212 *ctinfo += IP_CT_IS_REPLY; 212 *ctinfo += IP_CT_IS_REPLY;
213 } 213 }
214 214
215 /* Update skb to refer to this connection */ 215 /* Update skb to refer to this connection */
216 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 216 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
217 skb->nfctinfo = *ctinfo; 217 skb->nfctinfo = *ctinfo;
218 return -NF_ACCEPT; 218 return -NF_ACCEPT;
219} 219}
220 220
221/* Small and modified version of icmp_rcv */ 221/* Small and modified version of icmp_rcv */
@@ -306,7 +306,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
306 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) 306 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
307 return -EINVAL; 307 return -EINVAL;
308 308
309 tuple->dst.u.icmp.type = 309 tuple->dst.u.icmp.type =
310 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]); 310 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]);
311 tuple->dst.u.icmp.code = 311 tuple->dst.u.icmp.code =
312 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); 312 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
@@ -332,7 +332,7 @@ static struct ctl_table icmp_sysctl_table[] = {
332 .mode = 0644, 332 .mode = 0644,
333 .proc_handler = &proc_dointvec_jiffies, 333 .proc_handler = &proc_dointvec_jiffies,
334 }, 334 },
335 { 335 {
336 .ctl_name = 0 336 .ctl_name = 0
337 } 337 }
338}; 338};
@@ -346,7 +346,7 @@ static struct ctl_table icmp_compat_sysctl_table[] = {
346 .mode = 0644, 346 .mode = 0644,
347 .proc_handler = &proc_dointvec_jiffies, 347 .proc_handler = &proc_dointvec_jiffies,
348 }, 348 },
349 { 349 {
350 .ctl_name = 0 350 .ctl_name = 0
351 } 351 }
352}; 352};
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 998b2557692c..cf1010827be1 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -452,8 +452,8 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
452 (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); 452 (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
453 453
454 /* Redirects on non-null nats must be dropped, else they'll 454 /* Redirects on non-null nats must be dropped, else they'll
455 start talking to each other without our translation, and be 455 start talking to each other without our translation, and be
456 confused... --RR */ 456 confused... --RR */
457 if (inside->icmp.type == ICMP_REDIRECT) { 457 if (inside->icmp.type == ICMP_REDIRECT) {
458 /* If NAT isn't finished, assume it and drop. */ 458 /* If NAT isn't finished, assume it and drop. */
459 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) 459 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
@@ -469,13 +469,13 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
469 if (!nf_ct_get_tuple(*pskb, 469 if (!nf_ct_get_tuple(*pskb,
470 (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr), 470 (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr),
471 (*pskb)->nh.iph->ihl*4 + 471 (*pskb)->nh.iph->ihl*4 +
472 sizeof(struct icmphdr) + inside->ip.ihl*4, 472 sizeof(struct icmphdr) + inside->ip.ihl*4,
473 (u_int16_t)AF_INET, 473 (u_int16_t)AF_INET,
474 inside->ip.protocol, 474 inside->ip.protocol,
475 &inner, 475 &inner,
476 l3proto, 476 l3proto,
477 __nf_ct_l4proto_find((u_int16_t)PF_INET, 477 __nf_ct_l4proto_find((u_int16_t)PF_INET,
478 inside->ip.protocol))) 478 inside->ip.protocol)))
479 return 0; 479 return 0;
480 480
481 /* Change inner back to look like incoming packet. We do the 481 /* Change inner back to look like incoming packet. We do the
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index fb9ab0114c23..9cbf3f9be13b 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -256,7 +256,7 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
256 if (set_h245_addr(pskb, data, dataoff, taddr, 256 if (set_h245_addr(pskb, data, dataoff, taddr,
257 &ct->tuplehash[!dir].tuple.dst.u3, 257 &ct->tuplehash[!dir].tuple.dst.u3,
258 htons((port & htons(1)) ? nated_port + 1 : 258 htons((port & htons(1)) ? nated_port + 1 :
259 nated_port)) == 0) { 259 nated_port)) == 0) {
260 /* Save ports */ 260 /* Save ports */
261 info->rtp_port[i][dir] = rtp_port; 261 info->rtp_port[i][dir] = rtp_port;
262 info->rtp_port[i][!dir] = htons(nated_port); 262 info->rtp_port[i][!dir] = htons(nated_port);
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index dc6738bdfab7..49a90c39ffce 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -179,7 +179,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
179 tcph->check = tcp_v4_check(datalen, 179 tcph->check = tcp_v4_check(datalen,
180 iph->saddr, iph->daddr, 180 iph->saddr, iph->daddr,
181 csum_partial((char *)tcph, 181 csum_partial((char *)tcph,
182 datalen, 0)); 182 datalen, 0));
183 } else 183 } else
184 nf_proto_csum_replace2(&tcph->check, *pskb, 184 nf_proto_csum_replace2(&tcph->check, *pskb,
185 htons(oldlen), htons(datalen), 1); 185 htons(oldlen), htons(datalen), 1);
@@ -223,7 +223,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
223 /* UDP helpers might accidentally mangle the wrong packet */ 223 /* UDP helpers might accidentally mangle the wrong packet */
224 iph = (*pskb)->nh.iph; 224 iph = (*pskb)->nh.iph;
225 if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + 225 if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
226 match_offset + match_len) 226 match_offset + match_len)
227 return 0; 227 return 0;
228 228
229 if (!skb_make_writable(pskb, (*pskb)->len)) 229 if (!skb_make_writable(pskb, (*pskb)->len))
@@ -252,9 +252,9 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb,
252 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { 252 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
253 udph->check = 0; 253 udph->check = 0;
254 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 254 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
255 datalen, IPPROTO_UDP, 255 datalen, IPPROTO_UDP,
256 csum_partial((char *)udph, 256 csum_partial((char *)udph,
257 datalen, 0)); 257 datalen, 0));
258 if (!udph->check) 258 if (!udph->check)
259 udph->check = CSUM_MANGLED_0; 259 udph->check = CSUM_MANGLED_0;
260 } else 260 } else
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 5df4fcae3ab6..7ba341c22eaa 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -184,10 +184,10 @@ pptp_outbound_pkt(struct sk_buff **pskb,
184 184
185 /* mangle packet */ 185 /* mangle packet */
186 if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, 186 if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
187 cid_off + sizeof(struct pptp_pkt_hdr) + 187 cid_off + sizeof(struct pptp_pkt_hdr) +
188 sizeof(struct PptpControlHeader), 188 sizeof(struct PptpControlHeader),
189 sizeof(new_callid), (char *)&new_callid, 189 sizeof(new_callid), (char *)&new_callid,
190 sizeof(new_callid)) == 0) 190 sizeof(new_callid)) == 0)
191 return NF_DROP; 191 return NF_DROP;
192 return NF_ACCEPT; 192 return NF_ACCEPT;
193} 193}
@@ -276,7 +276,7 @@ pptp_inbound_pkt(struct sk_buff **pskb,
276 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); 276 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
277 277
278 if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, 278 if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
279 pcid_off + sizeof(struct pptp_pkt_hdr) + 279 pcid_off + sizeof(struct pptp_pkt_hdr) +
280 sizeof(struct PptpControlHeader), 280 sizeof(struct PptpControlHeader),
281 sizeof(new_pcid), (char *)&new_pcid, 281 sizeof(new_pcid), (char *)&new_pcid,
282 sizeof(new_pcid)) == 0) 282 sizeof(new_pcid)) == 0)
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index dcfd772972d7..6bc2f06de055 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -44,7 +44,7 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
44 44
45 for (i = 0; i < range_size; i++, id++) { 45 for (i = 0; i < range_size; i++, id++) {
46 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + 46 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
47 (id % range_size)); 47 (id % range_size));
48 if (!nf_nat_used_tuple(tuple, ct)) 48 if (!nf_nat_used_tuple(tuple, ct))
49 return 1; 49 return 1;
50 } 50 }
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 7f95b4e2eb31..147a4370cf03 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -56,8 +56,8 @@ static struct
56 /* PRE_ROUTING */ 56 /* PRE_ROUTING */
57 { 57 {
58 .entry = { 58 .entry = {
59 .target_offset = sizeof(struct ipt_entry), 59 .target_offset = sizeof(struct ipt_entry),
60 .next_offset = sizeof(struct ipt_standard), 60 .next_offset = sizeof(struct ipt_standard),
61 }, 61 },
62 .target = { 62 .target = {
63 .target = { 63 .target = {
@@ -71,8 +71,8 @@ static struct
71 /* POST_ROUTING */ 71 /* POST_ROUTING */
72 { 72 {
73 .entry = { 73 .entry = {
74 .target_offset = sizeof(struct ipt_entry), 74 .target_offset = sizeof(struct ipt_entry),
75 .next_offset = sizeof(struct ipt_standard), 75 .next_offset = sizeof(struct ipt_standard),
76 }, 76 },
77 .target = { 77 .target = {
78 .target = { 78 .target = {
@@ -86,8 +86,8 @@ static struct
86 /* LOCAL_OUT */ 86 /* LOCAL_OUT */
87 { 87 {
88 .entry = { 88 .entry = {
89 .target_offset = sizeof(struct ipt_entry), 89 .target_offset = sizeof(struct ipt_entry),
90 .next_offset = sizeof(struct ipt_standard), 90 .next_offset = sizeof(struct ipt_standard),
91 }, 91 },
92 .target = { 92 .target = {
93 .target = { 93 .target = {
@@ -145,7 +145,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb,
145 145
146 /* Connection must be valid and new. */ 146 /* Connection must be valid and new. */
147 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 147 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
148 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 148 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
149 NF_CT_ASSERT(out); 149 NF_CT_ASSERT(out);
150 150
151 return nf_nat_setup_info(ct, &mr->range[0], hooknum); 151 return nf_nat_setup_info(ct, &mr->range[0], hooknum);
@@ -256,8 +256,8 @@ alloc_null_binding(struct nf_conn *ct,
256 256
257unsigned int 257unsigned int
258alloc_null_binding_confirmed(struct nf_conn *ct, 258alloc_null_binding_confirmed(struct nf_conn *ct,
259 struct nf_nat_info *info, 259 struct nf_nat_info *info,
260 unsigned int hooknum) 260 unsigned int hooknum)
261{ 261{
262 __be32 ip 262 __be32 ip
263 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC 263 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 3d524b957310..b12cd7c314ca 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -90,7 +90,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo,
90 return 1; 90 return 1;
91 91
92 if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, 92 if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
93 matchoff, matchlen, addr, addrlen)) 93 matchoff, matchlen, addr, addrlen))
94 return 0; 94 return 0;
95 *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); 95 *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
96 return 1; 96 return 1;
@@ -151,7 +151,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb,
151 return 0; 151 return 0;
152 152
153 if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, 153 if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
154 matchoff, matchlen, buffer, bufflen)) 154 matchoff, matchlen, buffer, bufflen))
155 return 0; 155 return 0;
156 156
157 /* We need to reload this. Thanks Patrick. */ 157 /* We need to reload this. Thanks Patrick. */
@@ -172,7 +172,7 @@ static int mangle_content_len(struct sk_buff **pskb,
172 172
173 /* Get actual SDP lenght */ 173 /* Get actual SDP lenght */
174 if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, 174 if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff,
175 &matchlen, POS_SDP_HEADER) > 0) { 175 &matchlen, POS_SDP_HEADER) > 0) {
176 176
177 /* since ct_sip_get_info() give us a pointer passing 'v=' 177 /* since ct_sip_get_info() give us a pointer passing 'v='
178 we need to add 2 bytes in this count. */ 178 we need to add 2 bytes in this count. */
@@ -180,7 +180,7 @@ static int mangle_content_len(struct sk_buff **pskb,
180 180
181 /* Now, update SDP length */ 181 /* Now, update SDP length */
182 if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, 182 if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff,
183 &matchlen, POS_CONTENT) > 0) { 183 &matchlen, POS_CONTENT) > 0) {
184 184
185 bufflen = sprintf(buffer, "%u", c_len); 185 bufflen = sprintf(buffer, "%u", c_len);
186 return nf_nat_mangle_udp_packet(pskb, ct, ctinfo, 186 return nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
@@ -205,17 +205,17 @@ static unsigned int mangle_sdp(struct sk_buff **pskb,
205 /* Mangle owner and contact info. */ 205 /* Mangle owner and contact info. */
206 bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); 206 bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip));
207 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, 207 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
208 buffer, bufflen, POS_OWNER_IP4)) 208 buffer, bufflen, POS_OWNER_IP4))
209 return 0; 209 return 0;
210 210
211 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, 211 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
212 buffer, bufflen, POS_CONNECTION_IP4)) 212 buffer, bufflen, POS_CONNECTION_IP4))
213 return 0; 213 return 0;
214 214
215 /* Mangle media port. */ 215 /* Mangle media port. */
216 bufflen = sprintf(buffer, "%u", port); 216 bufflen = sprintf(buffer, "%u", port);
217 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, 217 if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
218 buffer, bufflen, POS_MEDIA)) 218 buffer, bufflen, POS_MEDIA))
219 return 0; 219 return 0;
220 220
221 return mangle_content_len(pskb, ctinfo, ct, dptr); 221 return mangle_content_len(pskb, ctinfo, ct, dptr);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index f12528fe1bf9..ce5c4939a6ee 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -150,8 +150,8 @@ struct asn1_octstr
150}; 150};
151 151
152static void asn1_open(struct asn1_ctx *ctx, 152static void asn1_open(struct asn1_ctx *ctx,
153 unsigned char *buf, 153 unsigned char *buf,
154 unsigned int len) 154 unsigned int len)
155{ 155{
156 ctx->begin = buf; 156 ctx->begin = buf;
157 ctx->end = buf + len; 157 ctx->end = buf + len;
@@ -186,9 +186,9 @@ static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
186} 186}
187 187
188static unsigned char asn1_id_decode(struct asn1_ctx *ctx, 188static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
189 unsigned int *cls, 189 unsigned int *cls,
190 unsigned int *con, 190 unsigned int *con,
191 unsigned int *tag) 191 unsigned int *tag)
192{ 192{
193 unsigned char ch; 193 unsigned char ch;
194 194
@@ -207,8 +207,8 @@ static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
207} 207}
208 208
209static unsigned char asn1_length_decode(struct asn1_ctx *ctx, 209static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
210 unsigned int *def, 210 unsigned int *def,
211 unsigned int *len) 211 unsigned int *len)
212{ 212{
213 unsigned char ch, cnt; 213 unsigned char ch, cnt;
214 214
@@ -239,10 +239,10 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
239} 239}
240 240
241static unsigned char asn1_header_decode(struct asn1_ctx *ctx, 241static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
242 unsigned char **eoc, 242 unsigned char **eoc,
243 unsigned int *cls, 243 unsigned int *cls,
244 unsigned int *con, 244 unsigned int *con,
245 unsigned int *tag) 245 unsigned int *tag)
246{ 246{
247 unsigned int def, len; 247 unsigned int def, len;
248 248
@@ -297,8 +297,8 @@ static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc)
297} 297}
298 298
299static unsigned char asn1_long_decode(struct asn1_ctx *ctx, 299static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
300 unsigned char *eoc, 300 unsigned char *eoc,
301 long *integer) 301 long *integer)
302{ 302{
303 unsigned char ch; 303 unsigned char ch;
304 unsigned int len; 304 unsigned int len;
@@ -325,8 +325,8 @@ static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
325} 325}
326 326
327static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, 327static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
328 unsigned char *eoc, 328 unsigned char *eoc,
329 unsigned int *integer) 329 unsigned int *integer)
330{ 330{
331 unsigned char ch; 331 unsigned char ch;
332 unsigned int len; 332 unsigned int len;
@@ -354,8 +354,8 @@ static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
354} 354}
355 355
356static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, 356static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
357 unsigned char *eoc, 357 unsigned char *eoc,
358 unsigned long *integer) 358 unsigned long *integer)
359{ 359{
360 unsigned char ch; 360 unsigned char ch;
361 unsigned int len; 361 unsigned int len;
@@ -383,9 +383,9 @@ static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
383} 383}
384 384
385static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, 385static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
386 unsigned char *eoc, 386 unsigned char *eoc,
387 unsigned char **octets, 387 unsigned char **octets,
388 unsigned int *len) 388 unsigned int *len)
389{ 389{
390 unsigned char *ptr; 390 unsigned char *ptr;
391 391
@@ -411,7 +411,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
411} 411}
412 412
413static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, 413static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
414 unsigned long *subid) 414 unsigned long *subid)
415{ 415{
416 unsigned char ch; 416 unsigned char ch;
417 417
@@ -428,9 +428,9 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
428} 428}
429 429
430static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, 430static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
431 unsigned char *eoc, 431 unsigned char *eoc,
432 unsigned long **oid, 432 unsigned long **oid,
433 unsigned int *len) 433 unsigned int *len)
434{ 434{
435 unsigned long subid; 435 unsigned long subid;
436 unsigned int size; 436 unsigned int size;
@@ -611,9 +611,9 @@ struct snmp_v1_trap
611#define SERR_EOM 2 611#define SERR_EOM 2
612 612
613static inline void mangle_address(unsigned char *begin, 613static inline void mangle_address(unsigned char *begin,
614 unsigned char *addr, 614 unsigned char *addr,
615 const struct oct1_map *map, 615 const struct oct1_map *map,
616 __sum16 *check); 616 __sum16 *check);
617struct snmp_cnv 617struct snmp_cnv
618{ 618{
619 unsigned int class; 619 unsigned int class;
@@ -644,8 +644,8 @@ static struct snmp_cnv snmp_conv [] =
644}; 644};
645 645
646static unsigned char snmp_tag_cls2syntax(unsigned int tag, 646static unsigned char snmp_tag_cls2syntax(unsigned int tag,
647 unsigned int cls, 647 unsigned int cls,
648 unsigned short *syntax) 648 unsigned short *syntax)
649{ 649{
650 struct snmp_cnv *cnv; 650 struct snmp_cnv *cnv;
651 651
@@ -662,7 +662,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag,
662} 662}
663 663
664static unsigned char snmp_object_decode(struct asn1_ctx *ctx, 664static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
665 struct snmp_object **obj) 665 struct snmp_object **obj)
666{ 666{
667 unsigned int cls, con, tag, len, idlen; 667 unsigned int cls, con, tag, len, idlen;
668 unsigned short type; 668 unsigned short type;
@@ -714,7 +714,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
714 return 0; 714 return 0;
715 } 715 }
716 *obj = kmalloc(sizeof(struct snmp_object) + len, 716 *obj = kmalloc(sizeof(struct snmp_object) + len,
717 GFP_ATOMIC); 717 GFP_ATOMIC);
718 if (*obj == NULL) { 718 if (*obj == NULL) {
719 kfree(id); 719 kfree(id);
720 if (net_ratelimit()) 720 if (net_ratelimit())
@@ -730,7 +730,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
730 return 0; 730 return 0;
731 } 731 }
732 *obj = kmalloc(sizeof(struct snmp_object) + len, 732 *obj = kmalloc(sizeof(struct snmp_object) + len,
733 GFP_ATOMIC); 733 GFP_ATOMIC);
734 if (*obj == NULL) { 734 if (*obj == NULL) {
735 kfree(id); 735 kfree(id);
736 if (net_ratelimit()) 736 if (net_ratelimit())
@@ -834,7 +834,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
834} 834}
835 835
836static unsigned char snmp_request_decode(struct asn1_ctx *ctx, 836static unsigned char snmp_request_decode(struct asn1_ctx *ctx,
837 struct snmp_request *request) 837 struct snmp_request *request)
838{ 838{
839 unsigned int cls, con, tag; 839 unsigned int cls, con, tag;
840 unsigned char *end; 840 unsigned char *end;
@@ -874,9 +874,9 @@ static unsigned char snmp_request_decode(struct asn1_ctx *ctx,
874 * code example in the draft. 874 * code example in the draft.
875 */ 875 */
876static void fast_csum(__sum16 *csum, 876static void fast_csum(__sum16 *csum,
877 const unsigned char *optr, 877 const unsigned char *optr,
878 const unsigned char *nptr, 878 const unsigned char *nptr,
879 int offset) 879 int offset)
880{ 880{
881 unsigned char s[4]; 881 unsigned char s[4];
882 882
@@ -899,9 +899,9 @@ static void fast_csum(__sum16 *csum,
899 * - addr points to the start of the address 899 * - addr points to the start of the address
900 */ 900 */
901static inline void mangle_address(unsigned char *begin, 901static inline void mangle_address(unsigned char *begin,
902 unsigned char *addr, 902 unsigned char *addr,
903 const struct oct1_map *map, 903 const struct oct1_map *map,
904 __sum16 *check) 904 __sum16 *check)
905{ 905{
906 if (map->from == NOCT1(addr)) { 906 if (map->from == NOCT1(addr)) {
907 u_int32_t old; 907 u_int32_t old;
@@ -914,7 +914,7 @@ static inline void mangle_address(unsigned char *begin,
914 /* Update UDP checksum if being used */ 914 /* Update UDP checksum if being used */
915 if (*check) { 915 if (*check) {
916 fast_csum(check, 916 fast_csum(check,
917 &map->from, &map->to, addr - begin); 917 &map->from, &map->to, addr - begin);
918 918
919 } 919 }
920 920
@@ -925,9 +925,9 @@ static inline void mangle_address(unsigned char *begin,
925} 925}
926 926
927static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, 927static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
928 struct snmp_v1_trap *trap, 928 struct snmp_v1_trap *trap,
929 const struct oct1_map *map, 929 const struct oct1_map *map,
930 __sum16 *check) 930 __sum16 *check)
931{ 931{
932 unsigned int cls, con, tag, len; 932 unsigned int cls, con, tag, len;
933 unsigned char *end; 933 unsigned char *end;
@@ -1019,9 +1019,9 @@ static void hex_dump(unsigned char *buf, size_t len)
1019 * (And this is the fucking 'basic' method). 1019 * (And this is the fucking 'basic' method).
1020 */ 1020 */
1021static int snmp_parse_mangle(unsigned char *msg, 1021static int snmp_parse_mangle(unsigned char *msg,
1022 u_int16_t len, 1022 u_int16_t len,
1023 const struct oct1_map *map, 1023 const struct oct1_map *map,
1024 __sum16 *check) 1024 __sum16 *check)
1025{ 1025{
1026 unsigned char *eoc, *end; 1026 unsigned char *eoc, *end;
1027 unsigned int cls, con, tag, vers, pdutype; 1027 unsigned int cls, con, tag, vers, pdutype;
@@ -1191,8 +1191,8 @@ static int snmp_parse_mangle(unsigned char *msg,
1191 * SNMP translation routine. 1191 * SNMP translation routine.
1192 */ 1192 */
1193static int snmp_translate(struct nf_conn *ct, 1193static int snmp_translate(struct nf_conn *ct,
1194 enum ip_conntrack_info ctinfo, 1194 enum ip_conntrack_info ctinfo,
1195 struct sk_buff **pskb) 1195 struct sk_buff **pskb)
1196{ 1196{
1197 struct iphdr *iph = (*pskb)->nh.iph; 1197 struct iphdr *iph = (*pskb)->nh.iph;
1198 struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); 1198 struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
@@ -1219,7 +1219,7 @@ static int snmp_translate(struct nf_conn *ct,
1219 return NF_ACCEPT; 1219 return NF_ACCEPT;
1220 1220
1221 if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), 1221 if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
1222 paylen, &map, &udph->check)) { 1222 paylen, &map, &udph->check)) {
1223 if (net_ratelimit()) 1223 if (net_ratelimit())
1224 printk(KERN_WARNING "bsalg: parser failed\n"); 1224 printk(KERN_WARNING "bsalg: parser failed\n");
1225 return NF_DROP; 1225 return NF_DROP;
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 5a964a167c13..e4d3ef17d45b 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -96,8 +96,8 @@ nf_nat_fn(unsigned int hooknum,
96 protocol. 8) --RR */ 96 protocol. 8) --RR */
97 if (!ct) { 97 if (!ct) {
98 /* Exception: ICMP redirect to new connection (not in 98 /* Exception: ICMP redirect to new connection (not in
99 hash table yet). We must not let this through, in 99 hash table yet). We must not let this through, in
100 case we're doing NAT to the same network. */ 100 case we're doing NAT to the same network. */
101 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { 101 if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
102 struct icmphdr _hdr, *hp; 102 struct icmphdr _hdr, *hp;
103 103
@@ -141,7 +141,7 @@ nf_nat_fn(unsigned int hooknum,
141 if (unlikely(nf_ct_is_confirmed(ct))) 141 if (unlikely(nf_ct_is_confirmed(ct)))
142 /* NAT module was loaded late */ 142 /* NAT module was loaded late */
143 ret = alloc_null_binding_confirmed(ct, info, 143 ret = alloc_null_binding_confirmed(ct, info,
144 hooknum); 144 hooknum);
145 else if (hooknum == NF_IP_LOCAL_IN) 145 else if (hooknum == NF_IP_LOCAL_IN)
146 /* LOCAL_IN hook doesn't have a chain! */ 146 /* LOCAL_IN hook doesn't have a chain! */
147 ret = alloc_null_binding(ct, info, hooknum); 147 ret = alloc_null_binding(ct, info, hooknum);
@@ -171,10 +171,10 @@ nf_nat_fn(unsigned int hooknum,
171 171
172static unsigned int 172static unsigned int
173nf_nat_in(unsigned int hooknum, 173nf_nat_in(unsigned int hooknum,
174 struct sk_buff **pskb, 174 struct sk_buff **pskb,
175 const struct net_device *in, 175 const struct net_device *in,
176 const struct net_device *out, 176 const struct net_device *out,
177 int (*okfn)(struct sk_buff *)) 177 int (*okfn)(struct sk_buff *))
178{ 178{
179 unsigned int ret; 179 unsigned int ret;
180 __be32 daddr = (*pskb)->nh.iph->daddr; 180 __be32 daddr = (*pskb)->nh.iph->daddr;
@@ -269,9 +269,9 @@ nf_nat_adjust(unsigned int hooknum,
269 269
270 ct = nf_ct_get(*pskb, &ctinfo); 270 ct = nf_ct_get(*pskb, &ctinfo);
271 if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { 271 if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
272 DEBUGP("nf_nat_standalone: adjusting sequence number\n"); 272 DEBUGP("nf_nat_standalone: adjusting sequence number\n");
273 if (!nf_nat_seq_adjust(pskb, ct, ctinfo)) 273 if (!nf_nat_seq_adjust(pskb, ct, ctinfo))
274 return NF_DROP; 274 return NF_DROP;
275 } 275 }
276 return NF_ACCEPT; 276 return NF_ACCEPT;
277} 277}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index cd873da54cbe..ccb199e9dd8b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -266,7 +266,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
266 266
267 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) 267 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
268 seq_printf(seq, " %lu", 268 seq_printf(seq, " %lu",
269 fold_field((void **) ip_statistics, 269 fold_field((void **) ip_statistics,
270 snmp4_ipstats_list[i].entry)); 270 snmp4_ipstats_list[i].entry));
271 271
272 seq_puts(seq, "\nIcmp:"); 272 seq_puts(seq, "\nIcmp:");
@@ -276,7 +276,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
276 seq_puts(seq, "\nIcmp:"); 276 seq_puts(seq, "\nIcmp:");
277 for (i = 0; snmp4_icmp_list[i].name != NULL; i++) 277 for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
278 seq_printf(seq, " %lu", 278 seq_printf(seq, " %lu",
279 fold_field((void **) icmp_statistics, 279 fold_field((void **) icmp_statistics,
280 snmp4_icmp_list[i].entry)); 280 snmp4_icmp_list[i].entry));
281 281
282 seq_puts(seq, "\nTcp:"); 282 seq_puts(seq, "\nTcp:");
@@ -288,7 +288,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
288 /* MaxConn field is signed, RFC 2012 */ 288 /* MaxConn field is signed, RFC 2012 */
289 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) 289 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
290 seq_printf(seq, " %ld", 290 seq_printf(seq, " %ld",
291 fold_field((void **) tcp_statistics, 291 fold_field((void **) tcp_statistics,
292 snmp4_tcp_list[i].entry)); 292 snmp4_tcp_list[i].entry));
293 else 293 else
294 seq_printf(seq, " %lu", 294 seq_printf(seq, " %lu",
@@ -303,7 +303,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
303 seq_puts(seq, "\nUdp:"); 303 seq_puts(seq, "\nUdp:");
304 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 304 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
305 seq_printf(seq, " %lu", 305 seq_printf(seq, " %lu",
306 fold_field((void **) udp_statistics, 306 fold_field((void **) udp_statistics,
307 snmp4_udp_list[i].entry)); 307 snmp4_udp_list[i].entry));
308 308
309 /* the UDP and UDP-Lite MIBs are the same */ 309 /* the UDP and UDP-Lite MIBs are the same */
@@ -348,7 +348,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
348 seq_puts(seq, "\nTcpExt:"); 348 seq_puts(seq, "\nTcpExt:");
349 for (i = 0; snmp4_net_list[i].name != NULL; i++) 349 for (i = 0; snmp4_net_list[i].name != NULL; i++)
350 seq_printf(seq, " %lu", 350 seq_printf(seq, " %lu",
351 fold_field((void **) net_statistics, 351 fold_field((void **) net_statistics,
352 snmp4_net_list[i].entry)); 352 snmp4_net_list[i].entry));
353 353
354 seq_putc(seq, '\n'); 354 seq_putc(seq, '\n');
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 05f5114828ea..6cd6340de8bd 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -74,7 +74,7 @@ int inet_add_protocol(struct net_protocol *prot, unsigned char protocol)
74/* 74/*
75 * Remove a protocol from the hash tables. 75 * Remove a protocol from the hash tables.
76 */ 76 */
77 77
78int inet_del_protocol(struct net_protocol *prot, unsigned char protocol) 78int inet_del_protocol(struct net_protocol *prot, unsigned char protocol)
79{ 79{
80 int hash, ret; 80 int hash, ret;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index fed6a1e7af9e..931084bfb572 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -95,7 +95,7 @@ static void raw_v4_hash(struct sock *sk)
95 95
96static void raw_v4_unhash(struct sock *sk) 96static void raw_v4_unhash(struct sock *sk)
97{ 97{
98 write_lock_bh(&raw_v4_lock); 98 write_lock_bh(&raw_v4_lock);
99 if (sk_del_node_init(sk)) 99 if (sk_del_node_init(sk))
100 sock_prot_dec_use(sk->sk_prot); 100 sock_prot_dec_use(sk->sk_prot);
101 write_unlock_bh(&raw_v4_lock); 101 write_unlock_bh(&raw_v4_lock);
@@ -238,7 +238,7 @@ void raw_err (struct sock *sk, struct sk_buff *skb, u32 info)
238static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) 238static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
239{ 239{
240 /* Charge it to the socket. */ 240 /* Charge it to the socket. */
241 241
242 if (sock_queue_rcv_skb(sk, skb) < 0) { 242 if (sock_queue_rcv_skb(sk, skb) < 0) {
243 /* FIXME: increment a raw drops counter here */ 243 /* FIXME: increment a raw drops counter here */
244 kfree_skb(skb); 244 kfree_skb(skb);
@@ -263,7 +263,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
263} 263}
264 264
265static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, 265static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
266 struct rtable *rt, 266 struct rtable *rt,
267 unsigned int flags) 267 unsigned int flags)
268{ 268{
269 struct inet_sock *inet = inet_sk(sk); 269 struct inet_sock *inet = inet_sk(sk);
@@ -285,7 +285,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
285 skb = sock_alloc_send_skb(sk, length+hh_len+15, 285 skb = sock_alloc_send_skb(sk, length+hh_len+15,
286 flags&MSG_DONTWAIT, &err); 286 flags&MSG_DONTWAIT, &err);
287 if (skb == NULL) 287 if (skb == NULL)
288 goto error; 288 goto error;
289 skb_reserve(skb, hh_len); 289 skb_reserve(skb, hh_len);
290 290
291 skb->priority = sk->sk_priority; 291 skb->priority = sk->sk_priority;
@@ -326,7 +326,7 @@ error_fault:
326 kfree_skb(skb); 326 kfree_skb(skb);
327error: 327error:
328 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); 328 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
329 return err; 329 return err;
330} 330}
331 331
332static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg) 332static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
@@ -399,9 +399,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
399 err = -EOPNOTSUPP; 399 err = -EOPNOTSUPP;
400 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ 400 if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
401 goto out; /* compatibility */ 401 goto out; /* compatibility */
402 402
403 /* 403 /*
404 * Get and verify the address. 404 * Get and verify the address.
405 */ 405 */
406 406
407 if (msg->msg_namelen) { 407 if (msg->msg_namelen) {
@@ -426,7 +426,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
426 */ 426 */
427 } else { 427 } else {
428 err = -EDESTADDRREQ; 428 err = -EDESTADDRREQ;
429 if (sk->sk_state != TCP_ESTABLISHED) 429 if (sk->sk_state != TCP_ESTABLISHED)
430 goto out; 430 goto out;
431 daddr = inet->daddr; 431 daddr = inet->daddr;
432 } 432 }
@@ -480,7 +480,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
480 .saddr = saddr, 480 .saddr = saddr,
481 .tos = tos } }, 481 .tos = tos } },
482 .proto = inet->hdrincl ? IPPROTO_RAW : 482 .proto = inet->hdrincl ? IPPROTO_RAW :
483 sk->sk_protocol, 483 sk->sk_protocol,
484 }; 484 };
485 if (!inet->hdrincl) { 485 if (!inet->hdrincl) {
486 err = raw_probe_proto_opt(&fl, msg); 486 err = raw_probe_proto_opt(&fl, msg);
@@ -503,9 +503,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
503back_from_confirm: 503back_from_confirm:
504 504
505 if (inet->hdrincl) 505 if (inet->hdrincl)
506 err = raw_send_hdrinc(sk, msg->msg_iov, len, 506 err = raw_send_hdrinc(sk, msg->msg_iov, len,
507 rt, msg->msg_flags); 507 rt, msg->msg_flags);
508 508
509 else { 509 else {
510 if (!ipc.addr) 510 if (!ipc.addr)
511 ipc.addr = rt->rt_dst; 511 ipc.addr = rt->rt_dst;
@@ -538,7 +538,7 @@ do_confirm:
538 538
539static void raw_close(struct sock *sk, long timeout) 539static void raw_close(struct sock *sk, long timeout)
540{ 540{
541 /* 541 /*
542 * Raw sockets may have direct kernel refereneces. Kill them. 542 * Raw sockets may have direct kernel refereneces. Kill them.
543 */ 543 */
544 ip_ra_control(sk, 0, NULL); 544 ip_ra_control(sk, 0, NULL);
@@ -861,7 +861,7 @@ static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
861 861
862 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" 862 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
863 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", 863 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
864 i, src, srcp, dest, destp, sp->sk_state, 864 i, src, srcp, dest, destp, sp->sk_state,
865 atomic_read(&sp->sk_wmem_alloc), 865 atomic_read(&sp->sk_wmem_alloc),
866 atomic_read(&sp->sk_rmem_alloc), 866 atomic_read(&sp->sk_rmem_alloc),
867 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 867 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index baee304a3cb7..5b3834b38a2d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -20,7 +20,7 @@
20 * (rco@di.uminho.pt) Routing table insertion and update 20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible 21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics 22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K 23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table 24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window 25 * Alan Cox : MSS actually. Also added the window
26 * clamper. 26 * clamper.
@@ -38,7 +38,7 @@
38 * Alan Cox : Faster /proc handling 38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing, 39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour. 40 * routing caches and better behaviour.
41 * 41 *
42 * Olaf Erb : irtt wasn't being copied right. 42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support. 43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope) 44 * Alan Cox : Multicast fixed (I hope)
@@ -289,7 +289,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
289{ 289{
290 struct rt_cache_iter_state *st = rcu_dereference(seq->private); 290 struct rt_cache_iter_state *st = rcu_dereference(seq->private);
291 291
292 r = r->u.rt_next; 292 r = r->u.dst.rt_next;
293 while (!r) { 293 while (!r) {
294 rcu_read_unlock_bh(); 294 rcu_read_unlock_bh();
295 if (--st->bucket < 0) 295 if (--st->bucket < 0)
@@ -361,8 +361,8 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
361 dev_queue_xmit) : 0, 361 dev_queue_xmit) : 0,
362 r->rt_spec_dst); 362 r->rt_spec_dst);
363 seq_printf(seq, "%-127s\n", temp); 363 seq_printf(seq, "%-127s\n", temp);
364 } 364 }
365 return 0; 365 return 0;
366} 366}
367 367
368static struct seq_operations rt_cache_seq_ops = { 368static struct seq_operations rt_cache_seq_ops = {
@@ -429,7 +429,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
429 return &per_cpu(rt_cache_stat, cpu); 429 return &per_cpu(rt_cache_stat, cpu);
430 } 430 }
431 return NULL; 431 return NULL;
432 432
433} 433}
434 434
435static void rt_cpu_seq_stop(struct seq_file *seq, void *v) 435static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
@@ -445,7 +445,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
445 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); 445 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
446 return 0; 446 return 0;
447 } 447 }
448 448
449 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " 449 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
450 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", 450 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
451 atomic_read(&ipv4_dst_ops.entries), 451 atomic_read(&ipv4_dst_ops.entries),
@@ -459,7 +459,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
459 459
460 st->out_hit, 460 st->out_hit,
461 st->out_slow_tot, 461 st->out_slow_tot,
462 st->out_slow_mc, 462 st->out_slow_mc,
463 463
464 st->gc_total, 464 st->gc_total,
465 st->gc_ignored, 465 st->gc_ignored,
@@ -493,7 +493,7 @@ static struct file_operations rt_cpu_seq_fops = {
493}; 493};
494 494
495#endif /* CONFIG_PROC_FS */ 495#endif /* CONFIG_PROC_FS */
496 496
497static __inline__ void rt_free(struct rtable *rt) 497static __inline__ void rt_free(struct rtable *rt)
498{ 498{
499 multipath_remove(rt); 499 multipath_remove(rt);
@@ -512,7 +512,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
512 /* Kill broadcast/multicast entries very aggresively, if they 512 /* Kill broadcast/multicast entries very aggresively, if they
513 collide in hash table with more useful entries */ 513 collide in hash table with more useful entries */
514 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 514 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
515 rth->fl.iif && rth->u.rt_next; 515 rth->fl.iif && rth->u.dst.rt_next;
516} 516}
517 517
518static __inline__ int rt_valuable(struct rtable *rth) 518static __inline__ int rt_valuable(struct rtable *rth)
@@ -595,10 +595,10 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
595 if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 && 595 if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
596 compare_keys(&(*rthp)->fl, &expentry->fl)) { 596 compare_keys(&(*rthp)->fl, &expentry->fl)) {
597 if (*rthp == expentry) { 597 if (*rthp == expentry) {
598 *rthp = rth->u.rt_next; 598 *rthp = rth->u.dst.rt_next;
599 continue; 599 continue;
600 } else { 600 } else {
601 *rthp = rth->u.rt_next; 601 *rthp = rth->u.dst.rt_next;
602 rt_free(rth); 602 rt_free(rth);
603 if (removed_count) 603 if (removed_count)
604 ++(*removed_count); 604 ++(*removed_count);
@@ -606,9 +606,9 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
606 } else { 606 } else {
607 if (!((*rthp)->u.dst.flags & DST_BALANCED) && 607 if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
608 passedexpired && !nextstep) 608 passedexpired && !nextstep)
609 nextstep = &rth->u.rt_next; 609 nextstep = &rth->u.dst.rt_next;
610 610
611 rthp = &rth->u.rt_next; 611 rthp = &rth->u.dst.rt_next;
612 } 612 }
613 } 613 }
614 614
@@ -649,12 +649,12 @@ static void rt_check_expire(unsigned long dummy)
649 /* Entry is expired even if it is in use */ 649 /* Entry is expired even if it is in use */
650 if (time_before_eq(now, rth->u.dst.expires)) { 650 if (time_before_eq(now, rth->u.dst.expires)) {
651 tmo >>= 1; 651 tmo >>= 1;
652 rthp = &rth->u.rt_next; 652 rthp = &rth->u.dst.rt_next;
653 continue; 653 continue;
654 } 654 }
655 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { 655 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
656 tmo >>= 1; 656 tmo >>= 1;
657 rthp = &rth->u.rt_next; 657 rthp = &rth->u.dst.rt_next;
658 continue; 658 continue;
659 } 659 }
660 660
@@ -668,12 +668,12 @@ static void rt_check_expire(unsigned long dummy)
668 if (!rthp) 668 if (!rthp)
669 break; 669 break;
670 } else { 670 } else {
671 *rthp = rth->u.rt_next; 671 *rthp = rth->u.dst.rt_next;
672 rt_free(rth); 672 rt_free(rth);
673 } 673 }
674#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 674#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
675 *rthp = rth->u.rt_next; 675 *rthp = rth->u.dst.rt_next;
676 rt_free(rth); 676 rt_free(rth);
677#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 677#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
678 } 678 }
679 spin_unlock(rt_hash_lock_addr(i)); 679 spin_unlock(rt_hash_lock_addr(i));
@@ -706,7 +706,7 @@ static void rt_run_flush(unsigned long dummy)
706 spin_unlock_bh(rt_hash_lock_addr(i)); 706 spin_unlock_bh(rt_hash_lock_addr(i));
707 707
708 for (; rth; rth = next) { 708 for (; rth; rth = next) {
709 next = rth->u.rt_next; 709 next = rth->u.dst.rt_next;
710 rt_free(rth); 710 rt_free(rth);
711 } 711 }
712 } 712 }
@@ -739,7 +739,7 @@ void rt_cache_flush(int delay)
739 739
740 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay) 740 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
741 tmo = 0; 741 tmo = 0;
742 742
743 if (delay > tmo) 743 if (delay > tmo)
744 delay = tmo; 744 delay = tmo;
745 } 745 }
@@ -840,7 +840,7 @@ static int rt_garbage_collect(void)
840 while ((rth = *rthp) != NULL) { 840 while ((rth = *rthp) != NULL) {
841 if (!rt_may_expire(rth, tmo, expire)) { 841 if (!rt_may_expire(rth, tmo, expire)) {
842 tmo >>= 1; 842 tmo >>= 1;
843 rthp = &rth->u.rt_next; 843 rthp = &rth->u.dst.rt_next;
844 continue; 844 continue;
845 } 845 }
846#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED 846#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
@@ -858,12 +858,12 @@ static int rt_garbage_collect(void)
858 if (!rthp) 858 if (!rthp)
859 break; 859 break;
860 } else { 860 } else {
861 *rthp = rth->u.rt_next; 861 *rthp = rth->u.dst.rt_next;
862 rt_free(rth); 862 rt_free(rth);
863 goal--; 863 goal--;
864 } 864 }
865#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 865#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
866 *rthp = rth->u.rt_next; 866 *rthp = rth->u.dst.rt_next;
867 rt_free(rth); 867 rt_free(rth);
868 goal--; 868 goal--;
869#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 869#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
@@ -947,13 +947,13 @@ restart:
947 if (compare_keys(&rth->fl, &rt->fl)) { 947 if (compare_keys(&rth->fl, &rt->fl)) {
948#endif 948#endif
949 /* Put it first */ 949 /* Put it first */
950 *rthp = rth->u.rt_next; 950 *rthp = rth->u.dst.rt_next;
951 /* 951 /*
952 * Since lookup is lockfree, the deletion 952 * Since lookup is lockfree, the deletion
953 * must be visible to another weakly ordered CPU before 953 * must be visible to another weakly ordered CPU before
954 * the insertion at the start of the hash chain. 954 * the insertion at the start of the hash chain.
955 */ 955 */
956 rcu_assign_pointer(rth->u.rt_next, 956 rcu_assign_pointer(rth->u.dst.rt_next,
957 rt_hash_table[hash].chain); 957 rt_hash_table[hash].chain);
958 /* 958 /*
959 * Since lookup is lockfree, the update writes 959 * Since lookup is lockfree, the update writes
@@ -983,7 +983,7 @@ restart:
983 983
984 chain_length++; 984 chain_length++;
985 985
986 rthp = &rth->u.rt_next; 986 rthp = &rth->u.dst.rt_next;
987 } 987 }
988 988
989 if (cand) { 989 if (cand) {
@@ -994,7 +994,7 @@ restart:
994 * only 2 entries per bucket. We will see. 994 * only 2 entries per bucket. We will see.
995 */ 995 */
996 if (chain_length > ip_rt_gc_elasticity) { 996 if (chain_length > ip_rt_gc_elasticity) {
997 *candp = cand->u.rt_next; 997 *candp = cand->u.dst.rt_next;
998 rt_free(cand); 998 rt_free(cand);
999 } 999 }
1000 } 1000 }
@@ -1034,13 +1034,13 @@ restart:
1034 } 1034 }
1035 } 1035 }
1036 1036
1037 rt->u.rt_next = rt_hash_table[hash].chain; 1037 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1038#if RT_CACHE_DEBUG >= 2 1038#if RT_CACHE_DEBUG >= 2
1039 if (rt->u.rt_next) { 1039 if (rt->u.dst.rt_next) {
1040 struct rtable *trt; 1040 struct rtable *trt;
1041 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash, 1041 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
1042 NIPQUAD(rt->rt_dst)); 1042 NIPQUAD(rt->rt_dst));
1043 for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next) 1043 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1044 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst)); 1044 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
1045 printk("\n"); 1045 printk("\n");
1046 } 1046 }
@@ -1104,7 +1104,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1104 return; 1104 return;
1105 } 1105 }
1106 } else 1106 } else
1107 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", 1107 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1108 __builtin_return_address(0)); 1108 __builtin_return_address(0));
1109 1109
1110 ip_select_fb_ident(iph); 1110 ip_select_fb_ident(iph);
@@ -1117,9 +1117,9 @@ static void rt_del(unsigned hash, struct rtable *rt)
1117 spin_lock_bh(rt_hash_lock_addr(hash)); 1117 spin_lock_bh(rt_hash_lock_addr(hash));
1118 ip_rt_put(rt); 1118 ip_rt_put(rt);
1119 for (rthp = &rt_hash_table[hash].chain; *rthp; 1119 for (rthp = &rt_hash_table[hash].chain; *rthp;
1120 rthp = &(*rthp)->u.rt_next) 1120 rthp = &(*rthp)->u.dst.rt_next)
1121 if (*rthp == rt) { 1121 if (*rthp == rt) {
1122 *rthp = rt->u.rt_next; 1122 *rthp = rt->u.dst.rt_next;
1123 rt_free(rt); 1123 rt_free(rt);
1124 break; 1124 break;
1125 } 1125 }
@@ -1167,7 +1167,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1167 rth->fl.fl4_src != skeys[i] || 1167 rth->fl.fl4_src != skeys[i] ||
1168 rth->fl.oif != ikeys[k] || 1168 rth->fl.oif != ikeys[k] ||
1169 rth->fl.iif != 0) { 1169 rth->fl.iif != 0) {
1170 rthp = &rth->u.rt_next; 1170 rthp = &rth->u.dst.rt_next;
1171 continue; 1171 continue;
1172 } 1172 }
1173 1173
@@ -1190,7 +1190,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1190 1190
1191 /* Copy all the information. */ 1191 /* Copy all the information. */
1192 *rt = *rth; 1192 *rt = *rth;
1193 INIT_RCU_HEAD(&rt->u.dst.rcu_head); 1193 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1194 rt->u.dst.__use = 1; 1194 rt->u.dst.__use = 1;
1195 atomic_set(&rt->u.dst.__refcnt, 1); 1195 atomic_set(&rt->u.dst.__refcnt, 1);
1196 rt->u.dst.child = NULL; 1196 rt->u.dst.child = NULL;
@@ -1225,11 +1225,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1225 rt_drop(rt); 1225 rt_drop(rt);
1226 goto do_next; 1226 goto do_next;
1227 } 1227 }
1228 1228
1229 netevent.old = &rth->u.dst; 1229 netevent.old = &rth->u.dst;
1230 netevent.new = &rt->u.dst; 1230 netevent.new = &rt->u.dst;
1231 call_netevent_notifiers(NETEVENT_REDIRECT, 1231 call_netevent_notifiers(NETEVENT_REDIRECT,
1232 &netevent); 1232 &netevent);
1233 1233
1234 rt_del(hash, rth); 1234 rt_del(hash, rth);
1235 if (!rt_intern_hash(hash, rt, &rt)) 1235 if (!rt_intern_hash(hash, rt, &rt))
@@ -1343,7 +1343,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1343#endif 1343#endif
1344 } 1344 }
1345out: 1345out:
1346 in_dev_put(in_dev); 1346 in_dev_put(in_dev);
1347} 1347}
1348 1348
1349static int ip_error(struct sk_buff *skb) 1349static int ip_error(struct sk_buff *skb)
@@ -1379,7 +1379,7 @@ static int ip_error(struct sk_buff *skb)
1379 1379
1380out: kfree_skb(skb); 1380out: kfree_skb(skb);
1381 return 0; 1381 return 0;
1382} 1382}
1383 1383
1384/* 1384/*
1385 * The last two values are not from the RFC but 1385 * The last two values are not from the RFC but
@@ -1392,7 +1392,7 @@ static const unsigned short mtu_plateau[] =
1392static __inline__ unsigned short guess_mtu(unsigned short old_mtu) 1392static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
1393{ 1393{
1394 int i; 1394 int i;
1395 1395
1396 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++) 1396 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1397 if (old_mtu > mtu_plateau[i]) 1397 if (old_mtu > mtu_plateau[i])
1398 return mtu_plateau[i]; 1398 return mtu_plateau[i];
@@ -1416,7 +1416,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1416 1416
1417 rcu_read_lock(); 1417 rcu_read_lock();
1418 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1418 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1419 rth = rcu_dereference(rth->u.rt_next)) { 1419 rth = rcu_dereference(rth->u.dst.rt_next)) {
1420 if (rth->fl.fl4_dst == daddr && 1420 if (rth->fl.fl4_dst == daddr &&
1421 rth->fl.fl4_src == skeys[i] && 1421 rth->fl.fl4_src == skeys[i] &&
1422 rth->rt_dst == daddr && 1422 rth->rt_dst == daddr &&
@@ -1436,7 +1436,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1436 mtu = guess_mtu(old_mtu); 1436 mtu = guess_mtu(old_mtu);
1437 } 1437 }
1438 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) { 1438 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
1439 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { 1439 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
1440 dst_confirm(&rth->u.dst); 1440 dst_confirm(&rth->u.dst);
1441 if (mtu < ip_rt_min_pmtu) { 1441 if (mtu < ip_rt_min_pmtu) {
1442 mtu = ip_rt_min_pmtu; 1442 mtu = ip_rt_min_pmtu;
@@ -1600,7 +1600,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1600#endif 1600#endif
1601 set_class_tag(rt, itag); 1601 set_class_tag(rt, itag);
1602#endif 1602#endif
1603 rt->rt_type = res->type; 1603 rt->rt_type = res->type;
1604} 1604}
1605 1605
1606static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1606static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -1714,11 +1714,11 @@ static void ip_handle_martian_source(struct net_device *dev,
1714#endif 1714#endif
1715} 1715}
1716 1716
1717static inline int __mkroute_input(struct sk_buff *skb, 1717static inline int __mkroute_input(struct sk_buff *skb,
1718 struct fib_result* res, 1718 struct fib_result* res,
1719 struct in_device *in_dev, 1719 struct in_device *in_dev,
1720 __be32 daddr, __be32 saddr, u32 tos, 1720 __be32 daddr, __be32 saddr, u32 tos,
1721 struct rtable **result) 1721 struct rtable **result)
1722{ 1722{
1723 1723
1724 struct rtable *rth; 1724 struct rtable *rth;
@@ -1738,12 +1738,12 @@ static inline int __mkroute_input(struct sk_buff *skb,
1738 } 1738 }
1739 1739
1740 1740
1741 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 1741 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1742 in_dev->dev, &spec_dst, &itag); 1742 in_dev->dev, &spec_dst, &itag);
1743 if (err < 0) { 1743 if (err < 0) {
1744 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1744 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1745 saddr); 1745 saddr);
1746 1746
1747 err = -EINVAL; 1747 err = -EINVAL;
1748 goto cleanup; 1748 goto cleanup;
1749 } 1749 }
@@ -1811,10 +1811,10 @@ static inline int __mkroute_input(struct sk_buff *skb,
1811 /* release the working reference to the output device */ 1811 /* release the working reference to the output device */
1812 in_dev_put(out_dev); 1812 in_dev_put(out_dev);
1813 return err; 1813 return err;
1814} 1814}
1815 1815
1816static inline int ip_mkroute_input_def(struct sk_buff *skb, 1816static inline int ip_mkroute_input_def(struct sk_buff *skb,
1817 struct fib_result* res, 1817 struct fib_result* res,
1818 const struct flowi *fl, 1818 const struct flowi *fl,
1819 struct in_device *in_dev, 1819 struct in_device *in_dev,
1820 __be32 daddr, __be32 saddr, u32 tos) 1820 __be32 daddr, __be32 saddr, u32 tos)
@@ -1835,11 +1835,11 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
1835 1835
1836 /* put it into the cache */ 1836 /* put it into the cache */
1837 hash = rt_hash(daddr, saddr, fl->iif); 1837 hash = rt_hash(daddr, saddr, fl->iif);
1838 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1838 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1839} 1839}
1840 1840
1841static inline int ip_mkroute_input(struct sk_buff *skb, 1841static inline int ip_mkroute_input(struct sk_buff *skb,
1842 struct fib_result* res, 1842 struct fib_result* res,
1843 const struct flowi *fl, 1843 const struct flowi *fl,
1844 struct in_device *in_dev, 1844 struct in_device *in_dev,
1845 __be32 daddr, __be32 saddr, u32 tos) 1845 __be32 daddr, __be32 saddr, u32 tos)
@@ -1859,7 +1859,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1859 if (hopcount < 2) 1859 if (hopcount < 2)
1860 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, 1860 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
1861 saddr, tos); 1861 saddr, tos);
1862 1862
1863 /* add all alternatives to the routing cache */ 1863 /* add all alternatives to the routing cache */
1864 for (hop = 0; hop < hopcount; hop++) { 1864 for (hop = 0; hop < hopcount; hop++) {
1865 res->nh_sel = hop; 1865 res->nh_sel = hop;
@@ -1988,7 +1988,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1988 goto e_nobufs; 1988 goto e_nobufs;
1989 if (err == -EINVAL) 1989 if (err == -EINVAL)
1990 goto e_inval; 1990 goto e_inval;
1991 1991
1992done: 1992done:
1993 in_dev_put(in_dev); 1993 in_dev_put(in_dev);
1994 if (free_res) 1994 if (free_res)
@@ -2071,8 +2071,8 @@ martian_destination:
2071#endif 2071#endif
2072 2072
2073e_hostunreach: 2073e_hostunreach:
2074 err = -EHOSTUNREACH; 2074 err = -EHOSTUNREACH;
2075 goto done; 2075 goto done;
2076 2076
2077e_inval: 2077e_inval:
2078 err = -EINVAL; 2078 err = -EINVAL;
@@ -2099,7 +2099,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2099 2099
2100 rcu_read_lock(); 2100 rcu_read_lock();
2101 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2101 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2102 rth = rcu_dereference(rth->u.rt_next)) { 2102 rth = rcu_dereference(rth->u.dst.rt_next)) {
2103 if (rth->fl.fl4_dst == daddr && 2103 if (rth->fl.fl4_dst == daddr &&
2104 rth->fl.fl4_src == saddr && 2104 rth->fl.fl4_src == saddr &&
2105 rth->fl.iif == iif && 2105 rth->fl.iif == iif &&
@@ -2153,11 +2153,11 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2153} 2153}
2154 2154
2155static inline int __mkroute_output(struct rtable **result, 2155static inline int __mkroute_output(struct rtable **result,
2156 struct fib_result* res, 2156 struct fib_result* res,
2157 const struct flowi *fl, 2157 const struct flowi *fl,
2158 const struct flowi *oldflp, 2158 const struct flowi *oldflp,
2159 struct net_device *dev_out, 2159 struct net_device *dev_out,
2160 unsigned flags) 2160 unsigned flags)
2161{ 2161{
2162 struct rtable *rth; 2162 struct rtable *rth;
2163 struct in_device *in_dev; 2163 struct in_device *in_dev;
@@ -2190,7 +2190,7 @@ static inline int __mkroute_output(struct rtable **result,
2190 } 2190 }
2191 } else if (res->type == RTN_MULTICAST) { 2191 } else if (res->type == RTN_MULTICAST) {
2192 flags |= RTCF_MULTICAST|RTCF_LOCAL; 2192 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2193 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src, 2193 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2194 oldflp->proto)) 2194 oldflp->proto))
2195 flags &= ~RTCF_LOCAL; 2195 flags &= ~RTCF_LOCAL;
2196 /* If multicast route do not exist use 2196 /* If multicast route do not exist use
@@ -2208,7 +2208,7 @@ static inline int __mkroute_output(struct rtable **result,
2208 if (!rth) { 2208 if (!rth) {
2209 err = -ENOBUFS; 2209 err = -ENOBUFS;
2210 goto cleanup; 2210 goto cleanup;
2211 } 2211 }
2212 2212
2213 atomic_set(&rth->u.dst.__refcnt, 1); 2213 atomic_set(&rth->u.dst.__refcnt, 1);
2214 rth->u.dst.flags= DST_HOST; 2214 rth->u.dst.flags= DST_HOST;
@@ -2232,7 +2232,7 @@ static inline int __mkroute_output(struct rtable **result,
2232 rth->rt_dst = fl->fl4_dst; 2232 rth->rt_dst = fl->fl4_dst;
2233 rth->rt_src = fl->fl4_src; 2233 rth->rt_src = fl->fl4_src;
2234 rth->rt_iif = oldflp->oif ? : dev_out->ifindex; 2234 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2235 /* get references to the devices that are to be hold by the routing 2235 /* get references to the devices that are to be hold by the routing
2236 cache entry */ 2236 cache entry */
2237 rth->u.dst.dev = dev_out; 2237 rth->u.dst.dev = dev_out;
2238 dev_hold(dev_out); 2238 dev_hold(dev_out);
@@ -2250,7 +2250,7 @@ static inline int __mkroute_output(struct rtable **result,
2250 } 2250 }
2251 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2251 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2252 rth->rt_spec_dst = fl->fl4_src; 2252 rth->rt_spec_dst = fl->fl4_src;
2253 if (flags & RTCF_LOCAL && 2253 if (flags & RTCF_LOCAL &&
2254 !(dev_out->flags & IFF_LOOPBACK)) { 2254 !(dev_out->flags & IFF_LOOPBACK)) {
2255 rth->u.dst.output = ip_mc_output; 2255 rth->u.dst.output = ip_mc_output;
2256 RT_CACHE_STAT_INC(out_slow_mc); 2256 RT_CACHE_STAT_INC(out_slow_mc);
@@ -2292,7 +2292,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
2292 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); 2292 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
2293 err = rt_intern_hash(hash, rth, rp); 2293 err = rt_intern_hash(hash, rth, rp);
2294 } 2294 }
2295 2295
2296 return err; 2296 return err;
2297} 2297}
2298 2298
@@ -2563,7 +2563,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2563 2563
2564 rcu_read_lock_bh(); 2564 rcu_read_lock_bh();
2565 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2565 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2566 rth = rcu_dereference(rth->u.rt_next)) { 2566 rth = rcu_dereference(rth->u.dst.rt_next)) {
2567 if (rth->fl.fl4_dst == flp->fl4_dst && 2567 if (rth->fl.fl4_dst == flp->fl4_dst &&
2568 rth->fl.fl4_src == flp->fl4_src && 2568 rth->fl.fl4_src == flp->fl4_src &&
2569 rth->fl.iif == 0 && 2569 rth->fl.iif == 0 &&
@@ -2825,12 +2825,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2825 s_idx = 0; 2825 s_idx = 0;
2826 rcu_read_lock_bh(); 2826 rcu_read_lock_bh();
2827 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2827 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2828 rt = rcu_dereference(rt->u.rt_next), idx++) { 2828 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2829 if (idx < s_idx) 2829 if (idx < s_idx)
2830 continue; 2830 continue;
2831 skb->dst = dst_clone(&rt->u.dst); 2831 skb->dst = dst_clone(&rt->u.dst);
2832 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 2832 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2833 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 2833 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2834 1, NLM_F_MULTI) <= 0) { 2834 1, NLM_F_MULTI) <= 0) {
2835 dst_release(xchg(&skb->dst, NULL)); 2835 dst_release(xchg(&skb->dst, NULL));
2836 rcu_read_unlock_bh(); 2836 rcu_read_unlock_bh();
@@ -2863,7 +2863,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2863 proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 2863 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2864 rt_cache_flush(flush_delay); 2864 rt_cache_flush(flush_delay);
2865 return 0; 2865 return 0;
2866 } 2866 }
2867 2867
2868 return -EINVAL; 2868 return -EINVAL;
2869} 2869}
@@ -2880,13 +2880,13 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2880 if (newlen != sizeof(int)) 2880 if (newlen != sizeof(int))
2881 return -EINVAL; 2881 return -EINVAL;
2882 if (get_user(delay, (int __user *)newval)) 2882 if (get_user(delay, (int __user *)newval))
2883 return -EFAULT; 2883 return -EFAULT;
2884 rt_cache_flush(delay); 2884 rt_cache_flush(delay);
2885 return 0; 2885 return 0;
2886} 2886}
2887 2887
2888ctl_table ipv4_route_table[] = { 2888ctl_table ipv4_route_table[] = {
2889 { 2889 {
2890 .ctl_name = NET_IPV4_ROUTE_FLUSH, 2890 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2891 .procname = "flush", 2891 .procname = "flush",
2892 .data = &flush_delay, 2892 .data = &flush_delay,
@@ -2931,7 +2931,7 @@ ctl_table ipv4_route_table[] = {
2931 }, 2931 },
2932 { 2932 {
2933 /* Deprecated. Use gc_min_interval_ms */ 2933 /* Deprecated. Use gc_min_interval_ms */
2934 2934
2935 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL, 2935 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
2936 .procname = "gc_min_interval", 2936 .procname = "gc_min_interval",
2937 .data = &ip_rt_gc_min_interval, 2937 .data = &ip_rt_gc_min_interval,
@@ -3180,8 +3180,8 @@ int __init ip_rt_init(void)
3180 { 3180 {
3181 struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */ 3181 struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
3182 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || 3182 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
3183 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 3183 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
3184 proc_net_stat))) { 3184 proc_net_stat))) {
3185 return -ENOMEM; 3185 return -ENOMEM;
3186 } 3186 }
3187 rtstat_pde->proc_fops = &rt_cpu_seq_fops; 3187 rtstat_pde->proc_fops = &rt_cpu_seq_fops;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 6b19530905af..33016cc90f0b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -2,16 +2,16 @@
2 * Syncookies implementation for the Linux kernel 2 * Syncookies implementation for the Linux kernel
3 * 3 *
4 * Copyright (C) 1997 Andi Kleen 4 * Copyright (C) 1997 Andi Kleen
5 * Based on ideas by D.J.Bernstein and Eric Schenk. 5 * Based on ideas by D.J.Bernstein and Eric Schenk.
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $ 12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $
13 * 13 *
14 * Missing: IPv6 support. 14 * Missing: IPv6 support.
15 */ 15 */
16 16
17#include <linux/tcp.h> 17#include <linux/tcp.h>
@@ -57,7 +57,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
57 /* 57 /*
58 * Compute the secure sequence number. 58 * Compute the secure sequence number.
59 * The output should be: 59 * The output should be:
60 * HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24) 60 * HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24)
61 * + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24). 61 * + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24).
62 * Where sseq is their sequence number and count increases every 62 * Where sseq is their sequence number and count increases every
63 * minute by 1. 63 * minute by 1.
@@ -99,17 +99,17 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
99 & COOKIEMASK; /* Leaving the data behind */ 99 & COOKIEMASK; /* Leaving the data behind */
100} 100}
101 101
102/* 102/*
103 * This table has to be sorted and terminated with (__u16)-1. 103 * This table has to be sorted and terminated with (__u16)-1.
104 * XXX generate a better table. 104 * XXX generate a better table.
105 * Unresolved Issues: HIPPI with a 64k MSS is not well supported. 105 * Unresolved Issues: HIPPI with a 64k MSS is not well supported.
106 */ 106 */
107static __u16 const msstab[] = { 107static __u16 const msstab[] = {
108 64 - 1, 108 64 - 1,
109 256 - 1, 109 256 - 1,
110 512 - 1, 110 512 - 1,
111 536 - 1, 111 536 - 1,
112 1024 - 1, 112 1024 - 1,
113 1440 - 1, 113 1440 - 1,
114 1460 - 1, 114 1460 - 1,
115 4312 - 1, 115 4312 - 1,
@@ -128,7 +128,7 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
128 int mssind; 128 int mssind;
129 const __u16 mss = *mssp; 129 const __u16 mss = *mssp;
130 130
131 131
132 tp->last_synq_overflow = jiffies; 132 tp->last_synq_overflow = jiffies;
133 133
134 /* XXX sort msstab[] by probability? Binary search? */ 134 /* XXX sort msstab[] by probability? Binary search? */
@@ -144,23 +144,23 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
144 jiffies / (HZ * 60), mssind); 144 jiffies / (HZ * 60), mssind);
145} 145}
146 146
147/* 147/*
148 * This (misnamed) value is the age of syncookie which is permitted. 148 * This (misnamed) value is the age of syncookie which is permitted.
149 * Its ideal value should be dependent on TCP_TIMEOUT_INIT and 149 * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
150 * sysctl_tcp_retries1. It's a rather complicated formula (exponential 150 * sysctl_tcp_retries1. It's a rather complicated formula (exponential
151 * backoff) to compute at runtime so it's currently hardcoded here. 151 * backoff) to compute at runtime so it's currently hardcoded here.
152 */ 152 */
153#define COUNTER_TRIES 4 153#define COUNTER_TRIES 4
154/* 154/*
155 * Check if a ack sequence number is a valid syncookie. 155 * Check if a ack sequence number is a valid syncookie.
156 * Return the decoded mss if it is, or 0 if not. 156 * Return the decoded mss if it is, or 0 if not.
157 */ 157 */
158static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 158static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
159{ 159{
160 __u32 seq; 160 __u32 seq;
161 __u32 mssind; 161 __u32 mssind;
162 162
163 seq = ntohl(skb->h.th->seq)-1; 163 seq = ntohl(skb->h.th->seq)-1;
164 mssind = check_tcp_syn_cookie(cookie, 164 mssind = check_tcp_syn_cookie(cookie,
165 skb->nh.iph->saddr, skb->nh.iph->daddr, 165 skb->nh.iph->saddr, skb->nh.iph->daddr,
166 skb->h.th->source, skb->h.th->dest, 166 skb->h.th->source, skb->h.th->dest,
@@ -191,19 +191,19 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
191 struct inet_request_sock *ireq; 191 struct inet_request_sock *ireq;
192 struct tcp_request_sock *treq; 192 struct tcp_request_sock *treq;
193 struct tcp_sock *tp = tcp_sk(sk); 193 struct tcp_sock *tp = tcp_sk(sk);
194 __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; 194 __u32 cookie = ntohl(skb->h.th->ack_seq) - 1;
195 struct sock *ret = sk; 195 struct sock *ret = sk;
196 struct request_sock *req; 196 struct request_sock *req;
197 int mss; 197 int mss;
198 struct rtable *rt; 198 struct rtable *rt;
199 __u8 rcv_wscale; 199 __u8 rcv_wscale;
200 200
201 if (!sysctl_tcp_syncookies || !skb->h.th->ack) 201 if (!sysctl_tcp_syncookies || !skb->h.th->ack)
202 goto out; 202 goto out;
203 203
204 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 204 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
205 (mss = cookie_check(skb, cookie)) == 0) { 205 (mss = cookie_check(skb, cookie)) == 0) {
206 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); 206 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED);
207 goto out; 207 goto out;
208 } 208 }
209 209
@@ -221,9 +221,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
221 ireq = inet_rsk(req); 221 ireq = inet_rsk(req);
222 treq = tcp_rsk(req); 222 treq = tcp_rsk(req);
223 treq->rcv_isn = ntohl(skb->h.th->seq) - 1; 223 treq->rcv_isn = ntohl(skb->h.th->seq) - 1;
224 treq->snt_isn = cookie; 224 treq->snt_isn = cookie;
225 req->mss = mss; 225 req->mss = mss;
226 ireq->rmt_port = skb->h.th->source; 226 ireq->rmt_port = skb->h.th->source;
227 ireq->loc_addr = skb->nh.iph->daddr; 227 ireq->loc_addr = skb->nh.iph->daddr;
228 ireq->rmt_addr = skb->nh.iph->saddr; 228 ireq->rmt_addr = skb->nh.iph->saddr;
229 ireq->opt = NULL; 229 ireq->opt = NULL;
@@ -242,15 +242,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
242 } 242 }
243 243
244 ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0; 244 ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0;
245 ireq->wscale_ok = ireq->sack_ok = 0; 245 ireq->wscale_ok = ireq->sack_ok = 0;
246 req->expires = 0UL; 246 req->expires = 0UL;
247 req->retrans = 0; 247 req->retrans = 0;
248 248
249 /* 249 /*
250 * We need to lookup the route here to get at the correct 250 * We need to lookup the route here to get at the correct
251 * window size. We should better make sure that the window size 251 * window size. We should better make sure that the window size
252 * hasn't changed since we received the original syn, but I see 252 * hasn't changed since we received the original syn, but I see
253 * no easy way to do this. 253 * no easy way to do this.
254 */ 254 */
255 { 255 {
256 struct flowi fl = { .nl_u = { .ip4_u = 256 struct flowi fl = { .nl_u = { .ip4_u =
@@ -266,17 +266,17 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
266 security_req_classify_flow(req, &fl); 266 security_req_classify_flow(req, &fl);
267 if (ip_route_output_key(&rt, &fl)) { 267 if (ip_route_output_key(&rt, &fl)) {
268 reqsk_free(req); 268 reqsk_free(req);
269 goto out; 269 goto out;
270 } 270 }
271 } 271 }
272 272
273 /* Try to redo what tcp_v4_send_synack did. */ 273 /* Try to redo what tcp_v4_send_synack did. */
274 req->window_clamp = dst_metric(&rt->u.dst, RTAX_WINDOW); 274 req->window_clamp = dst_metric(&rt->u.dst, RTAX_WINDOW);
275 tcp_select_initial_window(tcp_full_space(sk), req->mss, 275 tcp_select_initial_window(tcp_full_space(sk), req->mss,
276 &req->rcv_wnd, &req->window_clamp, 276 &req->rcv_wnd, &req->window_clamp,
277 0, &rcv_wscale); 277 0, &rcv_wscale);
278 /* BTW win scale with syncookies is 0 by definition */ 278 /* BTW win scale with syncookies is 0 by definition */
279 ireq->rcv_wscale = rcv_wscale; 279 ireq->rcv_wscale = rcv_wscale;
280 280
281 ret = get_cookie_sock(sk, skb, req, &rt->u.dst); 281 ret = get_cookie_sock(sk, skb, req, &rt->u.dst);
282out: return ret; 282out: return ret;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index fabf69a9108c..0aa304711a96 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -24,7 +24,7 @@ extern int sysctl_ip_nonlocal_bind;
24 24
25#ifdef CONFIG_SYSCTL 25#ifdef CONFIG_SYSCTL
26static int zero; 26static int zero;
27static int tcp_retr1_max = 255; 27static int tcp_retr1_max = 255;
28static int ip_local_port_range_min[] = { 1, 1 }; 28static int ip_local_port_range_min[] = { 1, 1 };
29static int ip_local_port_range_max[] = { 65535, 65535 }; 29static int ip_local_port_range_max[] = { 65535, 65535 };
30#endif 30#endif
@@ -187,7 +187,7 @@ static int strategy_allowed_congestion_control(ctl_table *table, int __user *nam
187} 187}
188 188
189ctl_table ipv4_table[] = { 189ctl_table ipv4_table[] = {
190 { 190 {
191 .ctl_name = NET_IPV4_TCP_TIMESTAMPS, 191 .ctl_name = NET_IPV4_TCP_TIMESTAMPS,
192 .procname = "tcp_timestamps", 192 .procname = "tcp_timestamps",
193 .data = &sysctl_tcp_timestamps, 193 .data = &sysctl_tcp_timestamps,
@@ -195,7 +195,7 @@ ctl_table ipv4_table[] = {
195 .mode = 0644, 195 .mode = 0644,
196 .proc_handler = &proc_dointvec 196 .proc_handler = &proc_dointvec
197 }, 197 },
198 { 198 {
199 .ctl_name = NET_IPV4_TCP_WINDOW_SCALING, 199 .ctl_name = NET_IPV4_TCP_WINDOW_SCALING,
200 .procname = "tcp_window_scaling", 200 .procname = "tcp_window_scaling",
201 .data = &sysctl_tcp_window_scaling, 201 .data = &sysctl_tcp_window_scaling,
@@ -203,7 +203,7 @@ ctl_table ipv4_table[] = {
203 .mode = 0644, 203 .mode = 0644,
204 .proc_handler = &proc_dointvec 204 .proc_handler = &proc_dointvec
205 }, 205 },
206 { 206 {
207 .ctl_name = NET_IPV4_TCP_SACK, 207 .ctl_name = NET_IPV4_TCP_SACK,
208 .procname = "tcp_sack", 208 .procname = "tcp_sack",
209 .data = &sysctl_tcp_sack, 209 .data = &sysctl_tcp_sack,
@@ -211,7 +211,7 @@ ctl_table ipv4_table[] = {
211 .mode = 0644, 211 .mode = 0644,
212 .proc_handler = &proc_dointvec 212 .proc_handler = &proc_dointvec
213 }, 213 },
214 { 214 {
215 .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE, 215 .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE,
216 .procname = "tcp_retrans_collapse", 216 .procname = "tcp_retrans_collapse",
217 .data = &sysctl_tcp_retrans_collapse, 217 .data = &sysctl_tcp_retrans_collapse,
@@ -219,7 +219,7 @@ ctl_table ipv4_table[] = {
219 .mode = 0644, 219 .mode = 0644,
220 .proc_handler = &proc_dointvec 220 .proc_handler = &proc_dointvec
221 }, 221 },
222 { 222 {
223 .ctl_name = NET_IPV4_FORWARD, 223 .ctl_name = NET_IPV4_FORWARD,
224 .procname = "ip_forward", 224 .procname = "ip_forward",
225 .data = &ipv4_devconf.forwarding, 225 .data = &ipv4_devconf.forwarding,
@@ -228,16 +228,16 @@ ctl_table ipv4_table[] = {
228 .proc_handler = &ipv4_sysctl_forward, 228 .proc_handler = &ipv4_sysctl_forward,
229 .strategy = &ipv4_sysctl_forward_strategy 229 .strategy = &ipv4_sysctl_forward_strategy
230 }, 230 },
231 { 231 {
232 .ctl_name = NET_IPV4_DEFAULT_TTL, 232 .ctl_name = NET_IPV4_DEFAULT_TTL,
233 .procname = "ip_default_ttl", 233 .procname = "ip_default_ttl",
234 .data = &sysctl_ip_default_ttl, 234 .data = &sysctl_ip_default_ttl,
235 .maxlen = sizeof(int), 235 .maxlen = sizeof(int),
236 .mode = 0644, 236 .mode = 0644,
237 .proc_handler = &ipv4_doint_and_flush, 237 .proc_handler = &ipv4_doint_and_flush,
238 .strategy = &ipv4_doint_and_flush_strategy, 238 .strategy = &ipv4_doint_and_flush_strategy,
239 }, 239 },
240 { 240 {
241 .ctl_name = NET_IPV4_NO_PMTU_DISC, 241 .ctl_name = NET_IPV4_NO_PMTU_DISC,
242 .procname = "ip_no_pmtu_disc", 242 .procname = "ip_no_pmtu_disc",
243 .data = &ipv4_config.no_pmtu_disc, 243 .data = &ipv4_config.no_pmtu_disc,
@@ -728,7 +728,7 @@ ctl_table ipv4_table[] = {
728 .mode = 0644, 728 .mode = 0644,
729 .proc_handler = &proc_dointvec, 729 .proc_handler = &proc_dointvec,
730 }, 730 },
731 { 731 {
732 .ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, 732 .ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS,
733 .procname = "tcp_workaround_signed_windows", 733 .procname = "tcp_workaround_signed_windows",
734 .data = &sysctl_tcp_workaround_signed_windows, 734 .data = &sysctl_tcp_workaround_signed_windows,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5bd43d7294fd..ac6516c642a1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -475,7 +475,7 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
475 if (!sk->sk_send_head) 475 if (!sk->sk_send_head)
476 sk->sk_send_head = skb; 476 sk->sk_send_head = skb;
477 if (tp->nonagle & TCP_NAGLE_PUSH) 477 if (tp->nonagle & TCP_NAGLE_PUSH)
478 tp->nonagle &= ~TCP_NAGLE_PUSH; 478 tp->nonagle &= ~TCP_NAGLE_PUSH;
479} 479}
480 480
481static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, 481static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
@@ -557,7 +557,7 @@ new_segment:
557 } 557 }
558 if (!sk_stream_wmem_schedule(sk, copy)) 558 if (!sk_stream_wmem_schedule(sk, copy))
559 goto wait_for_memory; 559 goto wait_for_memory;
560 560
561 if (can_coalesce) { 561 if (can_coalesce) {
562 skb_shinfo(skb)->frags[i - 1].size += copy; 562 skb_shinfo(skb)->frags[i - 1].size += copy;
563 } else { 563 } else {
@@ -1439,12 +1439,12 @@ skip_copy:
1439 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); 1439 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1440 1440
1441 while (dma_async_memcpy_complete(tp->ucopy.dma_chan, 1441 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1442 tp->ucopy.dma_cookie, &done, 1442 tp->ucopy.dma_cookie, &done,
1443 &used) == DMA_IN_PROGRESS) { 1443 &used) == DMA_IN_PROGRESS) {
1444 /* do partial cleanup of sk_async_wait_queue */ 1444 /* do partial cleanup of sk_async_wait_queue */
1445 while ((skb = skb_peek(&sk->sk_async_wait_queue)) && 1445 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1446 (dma_async_is_complete(skb->dma_cookie, done, 1446 (dma_async_is_complete(skb->dma_cookie, done,
1447 used) == DMA_SUCCESS)) { 1447 used) == DMA_SUCCESS)) {
1448 __skb_dequeue(&sk->sk_async_wait_queue); 1448 __skb_dequeue(&sk->sk_async_wait_queue);
1449 kfree_skb(skb); 1449 kfree_skb(skb);
1450 } 1450 }
@@ -2006,7 +2006,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2006 info->tcpi_options |= TCPI_OPT_WSCALE; 2006 info->tcpi_options |= TCPI_OPT_WSCALE;
2007 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 2007 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2008 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2008 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2009 } 2009 }
2010 2010
2011 if (tp->ecn_flags&TCP_ECN_OK) 2011 if (tp->ecn_flags&TCP_ECN_OK)
2012 info->tcpi_options |= TCPI_OPT_ECN; 2012 info->tcpi_options |= TCPI_OPT_ECN;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 5ca7723d0798..c1b34f1edb32 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -313,28 +313,28 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
313 return; 313 return;
314 314
315 /* In "safe" area, increase. */ 315 /* In "safe" area, increase. */
316 if (tp->snd_cwnd <= tp->snd_ssthresh) 316 if (tp->snd_cwnd <= tp->snd_ssthresh)
317 tcp_slow_start(tp); 317 tcp_slow_start(tp);
318 318
319 /* In dangerous area, increase slowly. */ 319 /* In dangerous area, increase slowly. */
320 else if (sysctl_tcp_abc) { 320 else if (sysctl_tcp_abc) {
321 /* RFC3465: Appropriate Byte Count 321 /* RFC3465: Appropriate Byte Count
322 * increase once for each full cwnd acked 322 * increase once for each full cwnd acked
323 */ 323 */
324 if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { 324 if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
325 tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; 325 tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
326 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 326 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
327 tp->snd_cwnd++; 327 tp->snd_cwnd++;
328 } 328 }
329 } else { 329 } else {
330 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */ 330 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */
331 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 331 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
332 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 332 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
333 tp->snd_cwnd++; 333 tp->snd_cwnd++;
334 tp->snd_cwnd_cnt = 0; 334 tp->snd_cwnd_cnt = 0;
335 } else 335 } else
336 tp->snd_cwnd_cnt++; 336 tp->snd_cwnd_cnt++;
337 } 337 }
338} 338}
339EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 339EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
340 340
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 6ad184802266..5ce6cd85680b 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -175,42 +175,42 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
175 } 175 }
176 } 176 }
177 177
178 /* cubic function - calc*/ 178 /* cubic function - calc*/
179 /* calculate c * time^3 / rtt, 179 /* calculate c * time^3 / rtt,
180 * while considering overflow in calculation of time^3 180 * while considering overflow in calculation of time^3
181 * (so time^3 is done by using 64 bit) 181 * (so time^3 is done by using 64 bit)
182 * and without the support of division of 64bit numbers 182 * and without the support of division of 64bit numbers
183 * (so all divisions are done by using 32 bit) 183 * (so all divisions are done by using 32 bit)
184 * also NOTE the unit of those veriables 184 * also NOTE the unit of those veriables
185 * time = (t - K) / 2^bictcp_HZ 185 * time = (t - K) / 2^bictcp_HZ
186 * c = bic_scale >> 10 186 * c = bic_scale >> 10
187 * rtt = (srtt >> 3) / HZ 187 * rtt = (srtt >> 3) / HZ
188 * !!! The following code does not have overflow problems, 188 * !!! The following code does not have overflow problems,
189 * if the cwnd < 1 million packets !!! 189 * if the cwnd < 1 million packets !!!
190 */ 190 */
191 191
192 /* change the unit from HZ to bictcp_HZ */ 192 /* change the unit from HZ to bictcp_HZ */
193 t = ((tcp_time_stamp + (ca->delay_min>>3) - ca->epoch_start) 193 t = ((tcp_time_stamp + (ca->delay_min>>3) - ca->epoch_start)
194 << BICTCP_HZ) / HZ; 194 << BICTCP_HZ) / HZ;
195 195
196 if (t < ca->bic_K) /* t - K */ 196 if (t < ca->bic_K) /* t - K */
197 offs = ca->bic_K - t; 197 offs = ca->bic_K - t;
198 else 198 else
199 offs = t - ca->bic_K; 199 offs = t - ca->bic_K;
200 200
201 /* c/rtt * (t-K)^3 */ 201 /* c/rtt * (t-K)^3 */
202 delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); 202 delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
203 if (t < ca->bic_K) /* below origin*/ 203 if (t < ca->bic_K) /* below origin*/
204 bic_target = ca->bic_origin_point - delta; 204 bic_target = ca->bic_origin_point - delta;
205 else /* above origin*/ 205 else /* above origin*/
206 bic_target = ca->bic_origin_point + delta; 206 bic_target = ca->bic_origin_point + delta;
207 207
208 /* cubic function - calc bictcp_cnt*/ 208 /* cubic function - calc bictcp_cnt*/
209 if (bic_target > cwnd) { 209 if (bic_target > cwnd) {
210 ca->cnt = cwnd / (bic_target - cwnd); 210 ca->cnt = cwnd / (bic_target - cwnd);
211 } else { 211 } else {
212 ca->cnt = 100 * cwnd; /* very small increment*/ 212 ca->cnt = 100 * cwnd; /* very small increment*/
213 } 213 }
214 214
215 if (ca->delay_min > 0) { 215 if (ca->delay_min > 0) {
216 /* max increment = Smax * rtt / 0.1 */ 216 /* max increment = Smax * rtt / 0.1 */
@@ -219,7 +219,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
219 ca->cnt = min_cnt; 219 ca->cnt = min_cnt;
220 } 220 }
221 221
222 /* slow start and low utilization */ 222 /* slow start and low utilization */
223 if (ca->loss_cwnd == 0) /* could be aggressive in slow start */ 223 if (ca->loss_cwnd == 0) /* could be aggressive in slow start */
224 ca->cnt = 50; 224 ca->cnt = 50;
225 225
@@ -227,9 +227,9 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
227 if (tcp_friendliness) { 227 if (tcp_friendliness) {
228 u32 scale = beta_scale; 228 u32 scale = beta_scale;
229 delta = (cwnd * scale) >> 3; 229 delta = (cwnd * scale) >> 3;
230 while (ca->ack_cnt > delta) { /* update tcp cwnd */ 230 while (ca->ack_cnt > delta) { /* update tcp cwnd */
231 ca->ack_cnt -= delta; 231 ca->ack_cnt -= delta;
232 ca->tcp_cwnd++; 232 ca->tcp_cwnd++;
233 } 233 }
234 234
235 if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */ 235 if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */
@@ -238,7 +238,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
238 if (ca->cnt > max_cnt) 238 if (ca->cnt > max_cnt)
239 ca->cnt = max_cnt; 239 ca->cnt = max_cnt;
240 } 240 }
241 } 241 }
242 242
243 ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; 243 ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
244 if (ca->cnt == 0) /* cannot be zero */ 244 if (ca->cnt == 0) /* cannot be zero */
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index c4fc811bf377..a291097fcc0a 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -14,8 +14,8 @@
14 * with fixed-point MD scaled <<8. 14 * with fixed-point MD scaled <<8.
15 */ 15 */
16static const struct hstcp_aimd_val { 16static const struct hstcp_aimd_val {
17 unsigned int cwnd; 17 unsigned int cwnd;
18 unsigned int md; 18 unsigned int md;
19} hstcp_aimd_vals[] = { 19} hstcp_aimd_vals[] = {
20 { 38, 128, /* 0.50 */ }, 20 { 38, 128, /* 0.50 */ },
21 { 118, 112, /* 0.44 */ }, 21 { 118, 112, /* 0.44 */ },
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 753987a1048f..63318b6e9d51 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -224,7 +224,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
224 if (!tcp_is_cwnd_limited(sk, in_flight)) 224 if (!tcp_is_cwnd_limited(sk, in_flight))
225 return; 225 return;
226 226
227 if (tp->snd_cwnd <= tp->snd_ssthresh) 227 if (tp->snd_cwnd <= tp->snd_ssthresh)
228 tcp_slow_start(tp); 228 tcp_slow_start(tp);
229 else { 229 else {
230 230
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c6109895bb5e..1a14191687ac 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -50,9 +50,9 @@
50 * Andi Kleen: Make sure we never ack data there is not 50 * Andi Kleen: Make sure we never ack data there is not
51 * enough room for. Also make this condition 51 * enough room for. Also make this condition
52 * a fatal error if it might still happen. 52 * a fatal error if it might still happen.
53 * Andi Kleen: Add tcp_measure_rcv_mss to make 53 * Andi Kleen: Add tcp_measure_rcv_mss to make
54 * connections with MSS<min(MTU,ann. MSS) 54 * connections with MSS<min(MTU,ann. MSS)
55 * work without delayed acks. 55 * work without delayed acks.
56 * Andi Kleen: Process packets with PSH set in the 56 * Andi Kleen: Process packets with PSH set in the
57 * fast path. 57 * fast path.
58 * J Hadi Salim: ECN support 58 * J Hadi Salim: ECN support
@@ -112,17 +112,17 @@ int sysctl_tcp_abc __read_mostly;
112 112
113#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 113#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
114 114
115/* Adapt the MSS value used to make delayed ack decision to the 115/* Adapt the MSS value used to make delayed ack decision to the
116 * real world. 116 * real world.
117 */ 117 */
118static void tcp_measure_rcv_mss(struct sock *sk, 118static void tcp_measure_rcv_mss(struct sock *sk,
119 const struct sk_buff *skb) 119 const struct sk_buff *skb)
120{ 120{
121 struct inet_connection_sock *icsk = inet_csk(sk); 121 struct inet_connection_sock *icsk = inet_csk(sk);
122 const unsigned int lss = icsk->icsk_ack.last_seg_size; 122 const unsigned int lss = icsk->icsk_ack.last_seg_size;
123 unsigned int len; 123 unsigned int len;
124 124
125 icsk->icsk_ack.last_seg_size = 0; 125 icsk->icsk_ack.last_seg_size = 0;
126 126
127 /* skb->len may jitter because of SACKs, even if peer 127 /* skb->len may jitter because of SACKs, even if peer
128 * sends good full-sized frames. 128 * sends good full-sized frames.
@@ -440,15 +440,15 @@ void tcp_rcv_space_adjust(struct sock *sk)
440 struct tcp_sock *tp = tcp_sk(sk); 440 struct tcp_sock *tp = tcp_sk(sk);
441 int time; 441 int time;
442 int space; 442 int space;
443 443
444 if (tp->rcvq_space.time == 0) 444 if (tp->rcvq_space.time == 0)
445 goto new_measure; 445 goto new_measure;
446 446
447 time = tcp_time_stamp - tp->rcvq_space.time; 447 time = tcp_time_stamp - tp->rcvq_space.time;
448 if (time < (tp->rcv_rtt_est.rtt >> 3) || 448 if (time < (tp->rcv_rtt_est.rtt >> 3) ||
449 tp->rcv_rtt_est.rtt == 0) 449 tp->rcv_rtt_est.rtt == 0)
450 return; 450 return;
451 451
452 space = 2 * (tp->copied_seq - tp->rcvq_space.seq); 452 space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
453 453
454 space = max(tp->rcvq_space.space, space); 454 space = max(tp->rcvq_space.space, space);
@@ -483,7 +483,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
483 } 483 }
484 } 484 }
485 } 485 }
486 486
487new_measure: 487new_measure:
488 tp->rcvq_space.seq = tp->copied_seq; 488 tp->rcvq_space.seq = tp->copied_seq;
489 tp->rcvq_space.time = tcp_time_stamp; 489 tp->rcvq_space.time = tcp_time_stamp;
@@ -509,7 +509,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
509 tcp_measure_rcv_mss(sk, skb); 509 tcp_measure_rcv_mss(sk, skb);
510 510
511 tcp_rcv_rtt_measure(tp); 511 tcp_rcv_rtt_measure(tp);
512 512
513 now = tcp_time_stamp; 513 now = tcp_time_stamp;
514 514
515 if (!icsk->icsk_ack.ato) { 515 if (!icsk->icsk_ack.ato) {
@@ -561,7 +561,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
561 /* The following amusing code comes from Jacobson's 561 /* The following amusing code comes from Jacobson's
562 * article in SIGCOMM '88. Note that rtt and mdev 562 * article in SIGCOMM '88. Note that rtt and mdev
563 * are scaled versions of rtt and mean deviation. 563 * are scaled versions of rtt and mean deviation.
564 * This is designed to be as fast as possible 564 * This is designed to be as fast as possible
565 * m stands for "measurement". 565 * m stands for "measurement".
566 * 566 *
567 * On a 1990 paper the rto value is changed to: 567 * On a 1990 paper the rto value is changed to:
@@ -1249,8 +1249,8 @@ void tcp_enter_frto(struct sock *sk)
1249 tp->frto_counter = 1; 1249 tp->frto_counter = 1;
1250 1250
1251 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1251 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1252 tp->snd_una == tp->high_seq || 1252 tp->snd_una == tp->high_seq ||
1253 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1253 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1254 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1254 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1255 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1255 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1256 tcp_ca_event(sk, CA_EVENT_FRTO); 1256 tcp_ca_event(sk, CA_EVENT_FRTO);
@@ -1969,11 +1969,11 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1969 * 1. Reno does not count dupacks (sacked_out) automatically. */ 1969 * 1. Reno does not count dupacks (sacked_out) automatically. */
1970 if (!tp->packets_out) 1970 if (!tp->packets_out)
1971 tp->sacked_out = 0; 1971 tp->sacked_out = 0;
1972 /* 2. SACK counts snd_fack in packets inaccurately. */ 1972 /* 2. SACK counts snd_fack in packets inaccurately. */
1973 if (tp->sacked_out == 0) 1973 if (tp->sacked_out == 0)
1974 tp->fackets_out = 0; 1974 tp->fackets_out = 0;
1975 1975
1976 /* Now state machine starts. 1976 /* Now state machine starts.
1977 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 1977 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
1978 if (flag&FLAG_ECE) 1978 if (flag&FLAG_ECE)
1979 tp->prior_ssthresh = 0; 1979 tp->prior_ssthresh = 0;
@@ -2203,7 +2203,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
2203 __u32 now, __s32 *seq_rtt) 2203 __u32 now, __s32 *seq_rtt)
2204{ 2204{
2205 struct tcp_sock *tp = tcp_sk(sk); 2205 struct tcp_sock *tp = tcp_sk(sk);
2206 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 2206 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
2207 __u32 seq = tp->snd_una; 2207 __u32 seq = tp->snd_una;
2208 __u32 packets_acked; 2208 __u32 packets_acked;
2209 int acked = 0; 2209 int acked = 0;
@@ -2279,7 +2279,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2279 2279
2280 while ((skb = skb_peek(&sk->sk_write_queue)) && 2280 while ((skb = skb_peek(&sk->sk_write_queue)) &&
2281 skb != sk->sk_send_head) { 2281 skb != sk->sk_send_head) {
2282 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 2282 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
2283 __u8 sacked = scb->sacked; 2283 __u8 sacked = scb->sacked;
2284 2284
2285 /* If our packet is before the ack sequence we can 2285 /* If our packet is before the ack sequence we can
@@ -2470,9 +2470,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
2470static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) 2470static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2471{ 2471{
2472 struct tcp_sock *tp = tcp_sk(sk); 2472 struct tcp_sock *tp = tcp_sk(sk);
2473 2473
2474 tcp_sync_left_out(tp); 2474 tcp_sync_left_out(tp);
2475 2475
2476 if (tp->snd_una == prior_snd_una || 2476 if (tp->snd_una == prior_snd_una ||
2477 !before(tp->snd_una, tp->frto_highmark)) { 2477 !before(tp->snd_una, tp->frto_highmark)) {
2478 /* RTO was caused by loss, start retransmitting in 2478 /* RTO was caused by loss, start retransmitting in
@@ -2627,7 +2627,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2627 opt_rx->saw_tstamp = 0; 2627 opt_rx->saw_tstamp = 0;
2628 2628
2629 while(length>0) { 2629 while(length>0) {
2630 int opcode=*ptr++; 2630 int opcode=*ptr++;
2631 int opsize; 2631 int opsize;
2632 2632
2633 switch (opcode) { 2633 switch (opcode) {
@@ -2642,7 +2642,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2642 return; 2642 return;
2643 if (opsize > length) 2643 if (opsize > length)
2644 return; /* don't parse partial options */ 2644 return; /* don't parse partial options */
2645 switch(opcode) { 2645 switch(opcode) {
2646 case TCPOPT_MSS: 2646 case TCPOPT_MSS:
2647 if(opsize==TCPOLEN_MSS && th->syn && !estab) { 2647 if(opsize==TCPOLEN_MSS && th->syn && !estab) {
2648 u16 in_mss = ntohs(get_unaligned((__be16 *)ptr)); 2648 u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
@@ -2701,10 +2701,10 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2701 */ 2701 */
2702 break; 2702 break;
2703#endif 2703#endif
2704 }; 2704 };
2705 ptr+=opsize-2; 2705 ptr+=opsize-2;
2706 length-=opsize; 2706 length-=opsize;
2707 }; 2707 };
2708 } 2708 }
2709} 2709}
2710 2710
@@ -3263,7 +3263,7 @@ drop:
3263 TCP_SKB_CB(skb)->end_seq); 3263 TCP_SKB_CB(skb)->end_seq);
3264 3264
3265 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 3265 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
3266 3266
3267 /* If window is closed, drop tail of packet. But after 3267 /* If window is closed, drop tail of packet. But after
3268 * remembering D-SACK for its head made in previous line. 3268 * remembering D-SACK for its head made in previous line.
3269 */ 3269 */
@@ -3342,7 +3342,7 @@ drop:
3342 } 3342 }
3343 } 3343 }
3344 __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); 3344 __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
3345 3345
3346 /* And clean segments covered by new one as whole. */ 3346 /* And clean segments covered by new one as whole. */
3347 while ((skb1 = skb->next) != 3347 while ((skb1 = skb->next) !=
3348 (struct sk_buff*)&tp->out_of_order_queue && 3348 (struct sk_buff*)&tp->out_of_order_queue &&
@@ -3507,7 +3507,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
3507 */ 3507 */
3508static int tcp_prune_queue(struct sock *sk) 3508static int tcp_prune_queue(struct sock *sk)
3509{ 3509{
3510 struct tcp_sock *tp = tcp_sk(sk); 3510 struct tcp_sock *tp = tcp_sk(sk);
3511 3511
3512 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 3512 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
3513 3513
@@ -3617,7 +3617,7 @@ static void tcp_new_space(struct sock *sk)
3617 struct tcp_sock *tp = tcp_sk(sk); 3617 struct tcp_sock *tp = tcp_sk(sk);
3618 3618
3619 if (tcp_should_expand_sndbuf(sk, tp)) { 3619 if (tcp_should_expand_sndbuf(sk, tp)) {
3620 int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 3620 int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
3621 MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), 3621 MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
3622 demanded = max_t(unsigned int, tp->snd_cwnd, 3622 demanded = max_t(unsigned int, tp->snd_cwnd,
3623 tp->reordering + 1); 3623 tp->reordering + 1);
@@ -3690,7 +3690,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
3690 * For 1003.1g we should support a new option TCP_STDURG to permit 3690 * For 1003.1g we should support a new option TCP_STDURG to permit
3691 * either form (or just set the sysctl tcp_stdurg). 3691 * either form (or just set the sysctl tcp_stdurg).
3692 */ 3692 */
3693 3693
3694static void tcp_check_urg(struct sock * sk, struct tcphdr * th) 3694static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3695{ 3695{
3696 struct tcp_sock *tp = tcp_sk(sk); 3696 struct tcp_sock *tp = tcp_sk(sk);
@@ -3771,7 +3771,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
3771 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 3771 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
3772 th->syn; 3772 th->syn;
3773 3773
3774 /* Is the urgent pointer pointing into this packet? */ 3774 /* Is the urgent pointer pointing into this packet? */
3775 if (ptr < skb->len) { 3775 if (ptr < skb->len) {
3776 u8 tmp; 3776 u8 tmp;
3777 if (skb_copy_bits(skb, ptr, &tmp, 1)) 3777 if (skb_copy_bits(skb, ptr, &tmp, 1))
@@ -3835,7 +3835,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
3835 int copied_early = 0; 3835 int copied_early = 0;
3836 3836
3837 if (tp->ucopy.wakeup) 3837 if (tp->ucopy.wakeup)
3838 return 0; 3838 return 0;
3839 3839
3840 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 3840 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
3841 tp->ucopy.dma_chan = get_softnet_dma(); 3841 tp->ucopy.dma_chan = get_softnet_dma();
@@ -3871,26 +3871,26 @@ out:
3871#endif /* CONFIG_NET_DMA */ 3871#endif /* CONFIG_NET_DMA */
3872 3872
3873/* 3873/*
3874 * TCP receive function for the ESTABLISHED state. 3874 * TCP receive function for the ESTABLISHED state.
3875 * 3875 *
3876 * It is split into a fast path and a slow path. The fast path is 3876 * It is split into a fast path and a slow path. The fast path is
3877 * disabled when: 3877 * disabled when:
3878 * - A zero window was announced from us - zero window probing 3878 * - A zero window was announced from us - zero window probing
3879 * is only handled properly in the slow path. 3879 * is only handled properly in the slow path.
3880 * - Out of order segments arrived. 3880 * - Out of order segments arrived.
3881 * - Urgent data is expected. 3881 * - Urgent data is expected.
3882 * - There is no buffer space left 3882 * - There is no buffer space left
3883 * - Unexpected TCP flags/window values/header lengths are received 3883 * - Unexpected TCP flags/window values/header lengths are received
3884 * (detected by checking the TCP header against pred_flags) 3884 * (detected by checking the TCP header against pred_flags)
3885 * - Data is sent in both directions. Fast path only supports pure senders 3885 * - Data is sent in both directions. Fast path only supports pure senders
3886 * or pure receivers (this means either the sequence number or the ack 3886 * or pure receivers (this means either the sequence number or the ack
3887 * value must stay constant) 3887 * value must stay constant)
3888 * - Unexpected TCP option. 3888 * - Unexpected TCP option.
3889 * 3889 *
3890 * When these conditions are not satisfied it drops into a standard 3890 * When these conditions are not satisfied it drops into a standard
3891 * receive procedure patterned after RFC793 to handle all cases. 3891 * receive procedure patterned after RFC793 to handle all cases.
3892 * The first three cases are guaranteed by proper pred_flags setting, 3892 * The first three cases are guaranteed by proper pred_flags setting,
3893 * the rest is checked inline. Fast processing is turned on in 3893 * the rest is checked inline. Fast processing is turned on in
3894 * tcp_data_queue when everything is OK. 3894 * tcp_data_queue when everything is OK.
3895 */ 3895 */
3896int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 3896int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
@@ -3900,15 +3900,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3900 3900
3901 /* 3901 /*
3902 * Header prediction. 3902 * Header prediction.
3903 * The code loosely follows the one in the famous 3903 * The code loosely follows the one in the famous
3904 * "30 instruction TCP receive" Van Jacobson mail. 3904 * "30 instruction TCP receive" Van Jacobson mail.
3905 * 3905 *
3906 * Van's trick is to deposit buffers into socket queue 3906 * Van's trick is to deposit buffers into socket queue
3907 * on a device interrupt, to call tcp_recv function 3907 * on a device interrupt, to call tcp_recv function
3908 * on the receive process context and checksum and copy 3908 * on the receive process context and checksum and copy
3909 * the buffer to user space. smart... 3909 * the buffer to user space. smart...
3910 * 3910 *
3911 * Our current scheme is not silly either but we take the 3911 * Our current scheme is not silly either but we take the
3912 * extra cost of the net_bh soft interrupt processing... 3912 * extra cost of the net_bh soft interrupt processing...
3913 * We do checksum and copy also but from device to kernel. 3913 * We do checksum and copy also but from device to kernel.
3914 */ 3914 */
@@ -3919,7 +3919,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3919 * if header_prediction is to be made 3919 * if header_prediction is to be made
3920 * 'S' will always be tp->tcp_header_len >> 2 3920 * 'S' will always be tp->tcp_header_len >> 2
3921 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 3921 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
3922 * turn it off (when there are holes in the receive 3922 * turn it off (when there are holes in the receive
3923 * space for instance) 3923 * space for instance)
3924 * PSH flag is ignored. 3924 * PSH flag is ignored.
3925 */ 3925 */
@@ -3943,7 +3943,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3943 goto slow_path; 3943 goto slow_path;
3944 3944
3945 tp->rx_opt.saw_tstamp = 1; 3945 tp->rx_opt.saw_tstamp = 1;
3946 ++ptr; 3946 ++ptr;
3947 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3947 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3948 ++ptr; 3948 ++ptr;
3949 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 3949 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
@@ -3975,7 +3975,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3975 * on entry. 3975 * on entry.
3976 */ 3976 */
3977 tcp_ack(sk, skb, 0); 3977 tcp_ack(sk, skb, 0);
3978 __kfree_skb(skb); 3978 __kfree_skb(skb);
3979 tcp_data_snd_check(sk, tp); 3979 tcp_data_snd_check(sk, tp);
3980 return 0; 3980 return 0;
3981 } else { /* Header too small */ 3981 } else { /* Header too small */
@@ -4393,11 +4393,11 @@ reset_and_undo:
4393 4393
4394/* 4394/*
4395 * This function implements the receiving procedure of RFC 793 for 4395 * This function implements the receiving procedure of RFC 793 for
4396 * all states except ESTABLISHED and TIME_WAIT. 4396 * all states except ESTABLISHED and TIME_WAIT.
4397 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 4397 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
4398 * address independent. 4398 * address independent.
4399 */ 4399 */
4400 4400
4401int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 4401int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4402 struct tcphdr *th, unsigned len) 4402 struct tcphdr *th, unsigned len)
4403{ 4403{
@@ -4422,19 +4422,19 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4422 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 4422 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
4423 return 1; 4423 return 1;
4424 4424
4425 /* Now we have several options: In theory there is 4425 /* Now we have several options: In theory there is
4426 * nothing else in the frame. KA9Q has an option to 4426 * nothing else in the frame. KA9Q has an option to
4427 * send data with the syn, BSD accepts data with the 4427 * send data with the syn, BSD accepts data with the
4428 * syn up to the [to be] advertised window and 4428 * syn up to the [to be] advertised window and
4429 * Solaris 2.1 gives you a protocol error. For now 4429 * Solaris 2.1 gives you a protocol error. For now
4430 * we just ignore it, that fits the spec precisely 4430 * we just ignore it, that fits the spec precisely
4431 * and avoids incompatibilities. It would be nice in 4431 * and avoids incompatibilities. It would be nice in
4432 * future to drop through and process the data. 4432 * future to drop through and process the data.
4433 * 4433 *
4434 * Now that TTCP is starting to be used we ought to 4434 * Now that TTCP is starting to be used we ought to
4435 * queue this data. 4435 * queue this data.
4436 * But, this leaves one open to an easy denial of 4436 * But, this leaves one open to an easy denial of
4437 * service attack, and SYN cookies can't defend 4437 * service attack, and SYN cookies can't defend
4438 * against this problem. So, we drop the data 4438 * against this problem. So, we drop the data
4439 * in the interest of security over speed unless 4439 * in the interest of security over speed unless
4440 * it's still in use. 4440 * it's still in use.
@@ -4624,7 +4624,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4624 case TCP_FIN_WAIT1: 4624 case TCP_FIN_WAIT1:
4625 case TCP_FIN_WAIT2: 4625 case TCP_FIN_WAIT2:
4626 /* RFC 793 says to queue data in these states, 4626 /* RFC 793 says to queue data in these states,
4627 * RFC 1122 says we MUST send a reset. 4627 * RFC 1122 says we MUST send a reset.
4628 * BSD 4.4 also does reset. 4628 * BSD 4.4 also does reset.
4629 */ 4629 */
4630 if (sk->sk_shutdown & RCV_SHUTDOWN) { 4630 if (sk->sk_shutdown & RCV_SHUTDOWN) {
@@ -4636,7 +4636,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4636 } 4636 }
4637 } 4637 }
4638 /* Fall through */ 4638 /* Fall through */
4639 case TCP_ESTABLISHED: 4639 case TCP_ESTABLISHED:
4640 tcp_data_queue(sk, skb); 4640 tcp_data_queue(sk, skb);
4641 queued = 1; 4641 queued = 1;
4642 break; 4642 break;
@@ -4648,7 +4648,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4648 tcp_ack_snd_check(sk); 4648 tcp_ack_snd_check(sk);
4649 } 4649 }
4650 4650
4651 if (!queued) { 4651 if (!queued) {
4652discard: 4652discard:
4653 __kfree_skb(skb); 4653 __kfree_skb(skb);
4654 } 4654 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f51d6404c61c..0ba74bbe7d30 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -303,7 +303,7 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
303 /* We don't check in the destentry if pmtu discovery is forbidden 303 /* We don't check in the destentry if pmtu discovery is forbidden
304 * on this route. We just assume that no packet_to_big packets 304 * on this route. We just assume that no packet_to_big packets
305 * are send back when pmtu discovery is not active. 305 * are send back when pmtu discovery is not active.
306 * There is a small race when the user changes this flag in the 306 * There is a small race when the user changes this flag in the
307 * route, but I think that's acceptable. 307 * route, but I think that's acceptable.
308 */ 308 */
309 if ((dst = __sk_dst_check(sk, 0)) == NULL) 309 if ((dst = __sk_dst_check(sk, 0)) == NULL)
@@ -880,7 +880,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
880 880
881 if (md5sig->alloced4 == md5sig->entries4) { 881 if (md5sig->alloced4 == md5sig->entries4) {
882 keys = kmalloc((sizeof(*keys) * 882 keys = kmalloc((sizeof(*keys) *
883 (md5sig->entries4 + 1)), GFP_ATOMIC); 883 (md5sig->entries4 + 1)), GFP_ATOMIC);
884 if (!keys) { 884 if (!keys) {
885 kfree(newkey); 885 kfree(newkey);
886 tcp_free_md5sig_pool(); 886 tcp_free_md5sig_pool();
@@ -934,7 +934,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
934 memcpy(&tp->md5sig_info->keys4[i], 934 memcpy(&tp->md5sig_info->keys4[i],
935 &tp->md5sig_info->keys4[i+1], 935 &tp->md5sig_info->keys4[i+1],
936 (tp->md5sig_info->entries4 - i) * 936 (tp->md5sig_info->entries4 - i) *
937 sizeof(struct tcp4_md5sig_key)); 937 sizeof(struct tcp4_md5sig_key));
938 } 938 }
939 tcp_free_md5sig_pool(); 939 tcp_free_md5sig_pool();
940 return 0; 940 return 0;
@@ -1388,7 +1388,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1388 goto drop_and_free; 1388 goto drop_and_free;
1389 1389
1390 if (want_cookie) { 1390 if (want_cookie) {
1391 reqsk_free(req); 1391 reqsk_free(req);
1392 } else { 1392 } else {
1393 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1393 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1394 } 1394 }
@@ -1704,7 +1704,7 @@ bad_packet:
1704discard_it: 1704discard_it:
1705 /* Discard frame. */ 1705 /* Discard frame. */
1706 kfree_skb(skb); 1706 kfree_skb(skb);
1707 return 0; 1707 return 0;
1708 1708
1709discard_and_relse: 1709discard_and_relse:
1710 sock_put(sk); 1710 sock_put(sk);
@@ -1890,10 +1890,10 @@ int tcp_v4_destroy_sock(struct sock *sk)
1890 tcp_cleanup_congestion_control(sk); 1890 tcp_cleanup_congestion_control(sk);
1891 1891
1892 /* Cleanup up the write buffer. */ 1892 /* Cleanup up the write buffer. */
1893 sk_stream_writequeue_purge(sk); 1893 sk_stream_writequeue_purge(sk);
1894 1894
1895 /* Cleans up our, hopefully empty, out_of_order_queue. */ 1895 /* Cleans up our, hopefully empty, out_of_order_queue. */
1896 __skb_queue_purge(&tp->out_of_order_queue); 1896 __skb_queue_purge(&tp->out_of_order_queue);
1897 1897
1898#ifdef CONFIG_TCP_MD5SIG 1898#ifdef CONFIG_TCP_MD5SIG
1899 /* Clean up the MD5 key list, if any */ 1899 /* Clean up the MD5 key list, if any */
@@ -1906,7 +1906,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
1906 1906
1907#ifdef CONFIG_NET_DMA 1907#ifdef CONFIG_NET_DMA
1908 /* Cleans up our sk_async_wait_queue */ 1908 /* Cleans up our sk_async_wait_queue */
1909 __skb_queue_purge(&sk->sk_async_wait_queue); 1909 __skb_queue_purge(&sk->sk_async_wait_queue);
1910#endif 1910#endif
1911 1911
1912 /* Clean prequeue, it must be empty really */ 1912 /* Clean prequeue, it must be empty really */
@@ -1983,7 +1983,7 @@ get_req:
1983 st->state = TCP_SEQ_STATE_LISTENING; 1983 st->state = TCP_SEQ_STATE_LISTENING;
1984 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1984 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1985 } else { 1985 } else {
1986 icsk = inet_csk(sk); 1986 icsk = inet_csk(sk);
1987 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1987 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1988 if (reqsk_queue_len(&icsk->icsk_accept_queue)) 1988 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1989 goto start_req; 1989 goto start_req;
@@ -1996,7 +1996,7 @@ get_sk:
1996 cur = sk; 1996 cur = sk;
1997 goto out; 1997 goto out;
1998 } 1998 }
1999 icsk = inet_csk(sk); 1999 icsk = inet_csk(sk);
2000 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 2000 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2001 if (reqsk_queue_len(&icsk->icsk_accept_queue)) { 2001 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2002start_req: 2002start_req:
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 4a3889dd1943..30b1e520ad94 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -64,7 +64,7 @@ static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
64 return (seq == e_win && seq == end_seq); 64 return (seq == e_win && seq == end_seq);
65} 65}
66 66
67/* 67/*
68 * * Main purpose of TIME-WAIT state is to close connection gracefully, 68 * * Main purpose of TIME-WAIT state is to close connection gracefully,
69 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 69 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
70 * (and, probably, tail of data) and one or more our ACKs are lost. 70 * (and, probably, tail of data) and one or more our ACKs are lost.
@@ -176,13 +176,13 @@ kill_with_rst:
176 * "When a connection is [...] on TIME-WAIT state [...] 176 * "When a connection is [...] on TIME-WAIT state [...]
177 * [a TCP] MAY accept a new SYN from the remote TCP to 177 * [a TCP] MAY accept a new SYN from the remote TCP to
178 * reopen the connection directly, if it: 178 * reopen the connection directly, if it:
179 * 179 *
180 * (1) assigns its initial sequence number for the new 180 * (1) assigns its initial sequence number for the new
181 * connection to be larger than the largest sequence 181 * connection to be larger than the largest sequence
182 * number it used on the previous connection incarnation, 182 * number it used on the previous connection incarnation,
183 * and 183 * and
184 * 184 *
185 * (2) returns to TIME-WAIT state if the SYN turns out 185 * (2) returns to TIME-WAIT state if the SYN turns out
186 * to be an old duplicate". 186 * to be an old duplicate".
187 */ 187 */
188 188
@@ -266,9 +266,9 @@ kill:
266 return TCP_TW_SUCCESS; 266 return TCP_TW_SUCCESS;
267} 267}
268 268
269/* 269/*
270 * Move a socket to time-wait or dead fin-wait-2 state. 270 * Move a socket to time-wait or dead fin-wait-2 state.
271 */ 271 */
272void tcp_time_wait(struct sock *sk, int state, int timeo) 272void tcp_time_wait(struct sock *sk, int state, int timeo)
273{ 273{
274 struct inet_timewait_sock *tw = NULL; 274 struct inet_timewait_sock *tw = NULL;
@@ -481,7 +481,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
481 return newsk; 481 return newsk;
482} 482}
483 483
484/* 484/*
485 * Process an incoming packet for SYN_RECV sockets represented 485 * Process an incoming packet for SYN_RECV sockets represented
486 * as a request_sock. 486 * as a request_sock.
487 */ 487 */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 58b7111523f4..cebe9aa918a3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -198,7 +198,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
198 (*rcv_wscale) = 0; 198 (*rcv_wscale) = 0;
199 if (wscale_ok) { 199 if (wscale_ok) {
200 /* Set window scaling on max possible window 200 /* Set window scaling on max possible window
201 * See RFC1323 for an explanation of the limit to 14 201 * See RFC1323 for an explanation of the limit to 14
202 */ 202 */
203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 space = min_t(u32, space, *window_clamp); 204 space = min_t(u32, space, *window_clamp);
@@ -451,7 +451,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
451 (tp->rx_opt.eff_sacks * 451 (tp->rx_opt.eff_sacks *
452 TCPOLEN_SACK_PERBLOCK)); 452 TCPOLEN_SACK_PERBLOCK));
453 } 453 }
454 454
455 if (tcp_packets_in_flight(tp) == 0) 455 if (tcp_packets_in_flight(tp) == 0)
456 tcp_ca_event(sk, CA_EVENT_TX_START); 456 tcp_ca_event(sk, CA_EVENT_TX_START);
457 457
@@ -555,7 +555,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
555} 555}
556 556
557 557
558/* This routine just queue's the buffer 558/* This routine just queue's the buffer
559 * 559 *
560 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 560 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
561 * otherwise socket can stall. 561 * otherwise socket can stall.
@@ -597,7 +597,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
597 597
598/* Function to create two new TCP segments. Shrinks the given segment 598/* Function to create two new TCP segments. Shrinks the given segment
599 * to the specified size and appends a new segment with the rest of the 599 * to the specified size and appends a new segment with the rest of the
600 * packet to the list. This won't be called frequently, I hope. 600 * packet to the list. This won't be called frequently, I hope.
601 * Remember, these are still headerless SKBs at this point. 601 * Remember, these are still headerless SKBs at this point.
602 */ 602 */
603int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) 603int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
@@ -610,7 +610,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
610 610
611 BUG_ON(len > skb->len); 611 BUG_ON(len > skb->len);
612 612
613 clear_all_retrans_hints(tp); 613 clear_all_retrans_hints(tp);
614 nsize = skb_headlen(skb) - len; 614 nsize = skb_headlen(skb) - len;
615 if (nsize < 0) 615 if (nsize < 0)
616 nsize = 0; 616 nsize = 0;
@@ -821,7 +821,7 @@ void tcp_mtup_init(struct sock *sk)
821 821
822 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 822 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
823 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 823 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
824 icsk->icsk_af_ops->net_header_len; 824 icsk->icsk_af_ops->net_header_len;
825 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 825 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
826 icsk->icsk_mtup.probe_size = 0; 826 icsk->icsk_mtup.probe_size = 0;
827} 827}
@@ -1008,7 +1008,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp)
1008 */ 1008 */
1009 1009
1010static inline int tcp_nagle_check(const struct tcp_sock *tp, 1010static inline int tcp_nagle_check(const struct tcp_sock *tp,
1011 const struct sk_buff *skb, 1011 const struct sk_buff *skb,
1012 unsigned mss_now, int nonagle) 1012 unsigned mss_now, int nonagle)
1013{ 1013{
1014 return (skb->len < mss_now && 1014 return (skb->len < mss_now &&
@@ -1078,7 +1078,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1078 return cwnd_quota; 1078 return cwnd_quota;
1079} 1079}
1080 1080
1081static inline int tcp_skb_is_last(const struct sock *sk, 1081static inline int tcp_skb_is_last(const struct sock *sk,
1082 const struct sk_buff *skb) 1082 const struct sk_buff *skb)
1083{ 1083{
1084 return skb->next == (struct sk_buff *)&sk->sk_write_queue; 1084 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
@@ -1298,7 +1298,7 @@ static int tcp_mtu_probe(struct sock *sk)
1298 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1298 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1299 else 1299 else
1300 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1300 nskb->csum = skb_copy_and_csum_bits(skb, 0,
1301 skb_put(nskb, copy), copy, nskb->csum); 1301 skb_put(nskb, copy), copy, nskb->csum);
1302 1302
1303 if (skb->len <= copy) { 1303 if (skb->len <= copy) {
1304 /* We've eaten all the data from this skb. 1304 /* We've eaten all the data from this skb.
@@ -1308,7 +1308,7 @@ static int tcp_mtu_probe(struct sock *sk)
1308 sk_stream_free_skb(sk, skb); 1308 sk_stream_free_skb(sk, skb);
1309 } else { 1309 } else {
1310 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1310 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1311 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1311 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1312 if (!skb_shinfo(skb)->nr_frags) { 1312 if (!skb_shinfo(skb)->nr_frags) {
1313 skb_pull(skb, copy); 1313 skb_pull(skb, copy);
1314 if (skb->ip_summed != CHECKSUM_PARTIAL) 1314 if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1501,7 +1501,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1501 1501
1502/* This function returns the amount that we can raise the 1502/* This function returns the amount that we can raise the
1503 * usable window based on the following constraints 1503 * usable window based on the following constraints
1504 * 1504 *
1505 * 1. The window can never be shrunk once it is offered (RFC 793) 1505 * 1. The window can never be shrunk once it is offered (RFC 793)
1506 * 2. We limit memory per socket 1506 * 2. We limit memory per socket
1507 * 1507 *
@@ -1520,12 +1520,12 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1520 * side SWS prevention criteria. The problem is that under this rule 1520 * side SWS prevention criteria. The problem is that under this rule
1521 * a stream of single byte packets will cause the right side of the 1521 * a stream of single byte packets will cause the right side of the
1522 * window to always advance by a single byte. 1522 * window to always advance by a single byte.
1523 * 1523 *
1524 * Of course, if the sender implements sender side SWS prevention 1524 * Of course, if the sender implements sender side SWS prevention
1525 * then this will not be a problem. 1525 * then this will not be a problem.
1526 * 1526 *
1527 * BSD seems to make the following compromise: 1527 * BSD seems to make the following compromise:
1528 * 1528 *
1529 * If the free space is less than the 1/4 of the maximum 1529 * If the free space is less than the 1/4 of the maximum
1530 * space available and the free space is less than 1/2 mss, 1530 * space available and the free space is less than 1/2 mss,
1531 * then set the window to 0. 1531 * then set the window to 0.
@@ -1567,7 +1567,7 @@ u32 __tcp_select_window(struct sock *sk)
1567 int window; 1567 int window;
1568 1568
1569 if (mss > full_space) 1569 if (mss > full_space)
1570 mss = full_space; 1570 mss = full_space;
1571 1571
1572 if (free_space < full_space/2) { 1572 if (free_space < full_space/2) {
1573 icsk->icsk_ack.quick = 0; 1573 icsk->icsk_ack.quick = 0;
@@ -1691,9 +1691,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1691} 1691}
1692 1692
1693/* Do a simple retransmit without using the backoff mechanisms in 1693/* Do a simple retransmit without using the backoff mechanisms in
1694 * tcp_timer. This is used for path mtu discovery. 1694 * tcp_timer. This is used for path mtu discovery.
1695 * The socket is already locked here. 1695 * The socket is already locked here.
1696 */ 1696 */
1697void tcp_simple_retransmit(struct sock *sk) 1697void tcp_simple_retransmit(struct sock *sk)
1698{ 1698{
1699 const struct inet_connection_sock *icsk = inet_csk(sk); 1699 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1703,7 +1703,7 @@ void tcp_simple_retransmit(struct sock *sk)
1703 int lost = 0; 1703 int lost = 0;
1704 1704
1705 sk_stream_for_retrans_queue(skb, sk) { 1705 sk_stream_for_retrans_queue(skb, sk) {
1706 if (skb->len > mss && 1706 if (skb->len > mss &&
1707 !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { 1707 !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1708 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 1708 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1709 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1709 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
@@ -1724,7 +1724,7 @@ void tcp_simple_retransmit(struct sock *sk)
1724 1724
1725 tcp_sync_left_out(tp); 1725 tcp_sync_left_out(tp);
1726 1726
1727 /* Don't muck with the congestion window here. 1727 /* Don't muck with the congestion window here.
1728 * Reason is that we do not increase amount of _data_ 1728 * Reason is that we do not increase amount of _data_
1729 * in network, but units changed and effective 1729 * in network, but units changed and effective
1730 * cwnd/ssthresh really reduced now. 1730 * cwnd/ssthresh really reduced now.
@@ -1747,7 +1747,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1747{ 1747{
1748 struct tcp_sock *tp = tcp_sk(sk); 1748 struct tcp_sock *tp = tcp_sk(sk);
1749 struct inet_connection_sock *icsk = inet_csk(sk); 1749 struct inet_connection_sock *icsk = inet_csk(sk);
1750 unsigned int cur_mss = tcp_current_mss(sk, 0); 1750 unsigned int cur_mss = tcp_current_mss(sk, 0);
1751 int err; 1751 int err;
1752 1752
1753 /* Inconslusive MTU probe */ 1753 /* Inconslusive MTU probe */
@@ -1984,10 +1984,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1984 */ 1984 */
1985void tcp_send_fin(struct sock *sk) 1985void tcp_send_fin(struct sock *sk)
1986{ 1986{
1987 struct tcp_sock *tp = tcp_sk(sk); 1987 struct tcp_sock *tp = tcp_sk(sk);
1988 struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); 1988 struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
1989 int mss_now; 1989 int mss_now;
1990 1990
1991 /* Optimization, tack on the FIN if we have a queue of 1991 /* Optimization, tack on the FIN if we have a queue of
1992 * unsent frames. But be careful about outgoing SACKS 1992 * unsent frames. But be careful about outgoing SACKS
1993 * and IP options. 1993 * and IP options.
@@ -2146,17 +2146,17 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2146 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2146 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2147 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2147 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2148 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2148 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2149 __u8 rcv_wscale; 2149 __u8 rcv_wscale;
2150 /* Set this up on the first call only */ 2150 /* Set this up on the first call only */
2151 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2151 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2152 /* tcp_full_space because it is guaranteed to be the first packet */ 2152 /* tcp_full_space because it is guaranteed to be the first packet */
2153 tcp_select_initial_window(tcp_full_space(sk), 2153 tcp_select_initial_window(tcp_full_space(sk),
2154 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2154 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2155 &req->rcv_wnd, 2155 &req->rcv_wnd,
2156 &req->window_clamp, 2156 &req->window_clamp,
2157 ireq->wscale_ok, 2157 ireq->wscale_ok,
2158 &rcv_wscale); 2158 &rcv_wscale);
2159 ireq->rcv_wscale = rcv_wscale; 2159 ireq->rcv_wscale = rcv_wscale;
2160 } 2160 }
2161 2161
2162 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2162 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
@@ -2192,9 +2192,9 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2192 return skb; 2192 return skb;
2193} 2193}
2194 2194
2195/* 2195/*
2196 * Do all connect socket setups that can be done AF independent. 2196 * Do all connect socket setups that can be done AF independent.
2197 */ 2197 */
2198static void tcp_connect_init(struct sock *sk) 2198static void tcp_connect_init(struct sock *sk)
2199{ 2199{
2200 struct dst_entry *dst = __sk_dst_get(sk); 2200 struct dst_entry *dst = __sk_dst_get(sk);
@@ -2251,7 +2251,7 @@ static void tcp_connect_init(struct sock *sk)
2251 2251
2252/* 2252/*
2253 * Build a SYN and send it off. 2253 * Build a SYN and send it off.
2254 */ 2254 */
2255int tcp_connect(struct sock *sk) 2255int tcp_connect(struct sock *sk)
2256{ 2256{
2257 struct tcp_sock *tp = tcp_sk(sk); 2257 struct tcp_sock *tp = tcp_sk(sk);
@@ -2409,7 +2409,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2409 2409
2410 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2410 /* We don't queue it, tcp_transmit_skb() sets ownership. */
2411 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2411 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2412 if (skb == NULL) 2412 if (skb == NULL)
2413 return -1; 2413 return -1;
2414 2414
2415 /* Reserve space for headers and set control bits. */ 2415 /* Reserve space for headers and set control bits. */
@@ -2498,7 +2498,7 @@ void tcp_send_probe0(struct sock *sk)
2498 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2498 if (icsk->icsk_backoff < sysctl_tcp_retries2)
2499 icsk->icsk_backoff++; 2499 icsk->icsk_backoff++;
2500 icsk->icsk_probes_out++; 2500 icsk->icsk_probes_out++;
2501 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2501 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2502 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2502 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2503 TCP_RTO_MAX); 2503 TCP_RTO_MAX);
2504 } else { 2504 } else {
@@ -2510,7 +2510,7 @@ void tcp_send_probe0(struct sock *sk)
2510 */ 2510 */
2511 if (!icsk->icsk_probes_out) 2511 if (!icsk->icsk_probes_out)
2512 icsk->icsk_probes_out = 1; 2512 icsk->icsk_probes_out = 1;
2513 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2513 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2514 min(icsk->icsk_rto << icsk->icsk_backoff, 2514 min(icsk->icsk_rto << icsk->icsk_backoff,
2515 TCP_RESOURCE_PROBE_INTERVAL), 2515 TCP_RESOURCE_PROBE_INTERVAL),
2516 TCP_RTO_MAX); 2516 TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 3355c276b611..a9243cfc1bea 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -69,7 +69,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
69 struct tcp_sock *tp = tcp_sk(sk); 69 struct tcp_sock *tp = tcp_sk(sk);
70 int orphans = atomic_read(&tcp_orphan_count); 70 int orphans = atomic_read(&tcp_orphan_count);
71 71
72 /* If peer does not open window for long time, or did not transmit 72 /* If peer does not open window for long time, or did not transmit
73 * anything for long time, penalize it. */ 73 * anything for long time, penalize it. */
74 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) 74 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
75 orphans <<= 1; 75 orphans <<= 1;
@@ -137,7 +137,7 @@ static int tcp_write_timeout(struct sock *sk)
137 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 137 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
138 } else { 138 } else {
139 mss = min(sysctl_tcp_base_mss, 139 mss = min(sysctl_tcp_base_mss,
140 tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)/2); 140 tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)/2);
141 mss = max(mss, 68 - tp->tcp_header_len); 141 mss = max(mss, 68 - tp->tcp_header_len);
142 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 142 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
143 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 143 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
@@ -150,7 +150,7 @@ static int tcp_write_timeout(struct sock *sk)
150 retry_until = sysctl_tcp_retries2; 150 retry_until = sysctl_tcp_retries2;
151 if (sock_flag(sk, SOCK_DEAD)) { 151 if (sock_flag(sk, SOCK_DEAD)) {
152 const int alive = (icsk->icsk_rto < TCP_RTO_MAX); 152 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
153 153
154 retry_until = tcp_orphan_retries(sk, alive); 154 retry_until = tcp_orphan_retries(sk, alive);
155 155
156 if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) 156 if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until))
@@ -257,7 +257,7 @@ static void tcp_probe_timer(struct sock *sk)
257 257
258 if (sock_flag(sk, SOCK_DEAD)) { 258 if (sock_flag(sk, SOCK_DEAD)) {
259 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); 259 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
260 260
261 max_probes = tcp_orphan_retries(sk, alive); 261 max_probes = tcp_orphan_retries(sk, alive);
262 262
263 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes)) 263 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
@@ -453,7 +453,7 @@ static void tcp_keepalive_timer (unsigned long data)
453 /* Only process if socket is not in use. */ 453 /* Only process if socket is not in use. */
454 bh_lock_sock(sk); 454 bh_lock_sock(sk);
455 if (sock_owned_by_user(sk)) { 455 if (sock_owned_by_user(sk)) {
456 /* Try again later. */ 456 /* Try again later. */
457 inet_csk_reset_keepalive_timer (sk, HZ/20); 457 inet_csk_reset_keepalive_timer (sk, HZ/20);
458 goto out; 458 goto out;
459 } 459 }
@@ -515,7 +515,7 @@ resched:
515 inet_csk_reset_keepalive_timer (sk, elapsed); 515 inet_csk_reset_keepalive_timer (sk, elapsed);
516 goto out; 516 goto out;
517 517
518death: 518death:
519 tcp_done(sk); 519 tcp_done(sk);
520 520
521out: 521out:
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index ddc4bcc5785e..5c484dceb967 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -330,9 +330,9 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
330 vegas->minRTT = 0x7fffffff; 330 vegas->minRTT = 0x7fffffff;
331 } 331 }
332 /* Use normal slow start */ 332 /* Use normal slow start */
333 else if (tp->snd_cwnd <= tp->snd_ssthresh) 333 else if (tp->snd_cwnd <= tp->snd_ssthresh)
334 tcp_slow_start(tp); 334 tcp_slow_start(tp);
335 335
336} 336}
337 337
338/* Extract info for Tcp socket info provided via netlink. */ 338/* Extract info for Tcp socket info provided via netlink. */
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 4f42a86c77f3..4e1b61032a9c 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -63,10 +63,10 @@ static void tcp_westwood_init(struct sock *sk)
63 struct westwood *w = inet_csk_ca(sk); 63 struct westwood *w = inet_csk_ca(sk);
64 64
65 w->bk = 0; 65 w->bk = 0;
66 w->bw_ns_est = 0; 66 w->bw_ns_est = 0;
67 w->bw_est = 0; 67 w->bw_est = 0;
68 w->accounted = 0; 68 w->accounted = 0;
69 w->cumul_ack = 0; 69 w->cumul_ack = 0;
70 w->reset_rtt_min = 1; 70 w->reset_rtt_min = 1;
71 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; 71 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
72 w->rtt_win_sx = tcp_time_stamp; 72 w->rtt_win_sx = tcp_time_stamp;
@@ -121,7 +121,7 @@ static void westwood_update_window(struct sock *sk)
121 * to fix mismatch between tp->snd_una and w->snd_una for the first 121 * to fix mismatch between tp->snd_una and w->snd_una for the first
122 * bandwidth sample 122 * bandwidth sample
123 */ 123 */
124 if (w->first_ack) { 124 if (w->first_ack) {
125 w->snd_una = tcp_sk(sk)->snd_una; 125 w->snd_una = tcp_sk(sk)->snd_una;
126 w->first_ack = 0; 126 w->first_ack = 0;
127 } 127 }
@@ -147,7 +147,7 @@ static inline void update_rtt_min(struct westwood *w)
147{ 147{
148 if (w->reset_rtt_min) { 148 if (w->reset_rtt_min) {
149 w->rtt_min = w->rtt; 149 w->rtt_min = w->rtt;
150 w->reset_rtt_min = 0; 150 w->reset_rtt_min = 0;
151 } else 151 } else
152 w->rtt_min = min(w->rtt, w->rtt_min); 152 w->rtt_min = min(w->rtt, w->rtt_min);
153} 153}
@@ -183,15 +183,15 @@ static inline u32 westwood_acked_count(struct sock *sk)
183 183
184 w->cumul_ack = tp->snd_una - w->snd_una; 184 w->cumul_ack = tp->snd_una - w->snd_una;
185 185
186 /* If cumul_ack is 0 this is a dupack since it's not moving 186 /* If cumul_ack is 0 this is a dupack since it's not moving
187 * tp->snd_una. 187 * tp->snd_una.
188 */ 188 */
189 if (!w->cumul_ack) { 189 if (!w->cumul_ack) {
190 w->accounted += tp->mss_cache; 190 w->accounted += tp->mss_cache;
191 w->cumul_ack = tp->mss_cache; 191 w->cumul_ack = tp->mss_cache;
192 } 192 }
193 193
194 if (w->cumul_ack > tp->mss_cache) { 194 if (w->cumul_ack > tp->mss_cache) {
195 /* Partial or delayed ack */ 195 /* Partial or delayed ack */
196 if (w->accounted >= w->cumul_ack) { 196 if (w->accounted >= w->cumul_ack) {
197 w->accounted -= w->cumul_ack; 197 w->accounted -= w->cumul_ack;
@@ -237,7 +237,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
237 237
238 case CA_EVENT_FRTO: 238 case CA_EVENT_FRTO:
239 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 239 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
240 /* Update RTT_min when next ack arrives */ 240 /* Update RTT_min when next ack arrives */
241 w->reset_rtt_min = 1; 241 w->reset_rtt_min = 1;
242 break; 242 break;
243 243
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8b54c68a0d12..ce6c46034314 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -20,8 +20,8 @@
20 * for udp at least is 'valid'. 20 * for udp at least is 'valid'.
21 * Alan Cox : Fixed icmp handling properly 21 * Alan Cox : Fixed icmp handling properly
22 * Alan Cox : Correct error for oversized datagrams 22 * Alan Cox : Correct error for oversized datagrams
23 * Alan Cox : Tidied select() semantics. 23 * Alan Cox : Tidied select() semantics.
24 * Alan Cox : udp_err() fixed properly, also now 24 * Alan Cox : udp_err() fixed properly, also now
25 * select and read wake correctly on errors 25 * select and read wake correctly on errors
26 * Alan Cox : udp_send verify_area moved to avoid mem leak 26 * Alan Cox : udp_send verify_area moved to avoid mem leak
27 * Alan Cox : UDP can count its memory 27 * Alan Cox : UDP can count its memory
@@ -56,7 +56,7 @@
56 * does have a high hit rate. 56 * does have a high hit rate.
57 * Olaf Kirch : Don't linearise iovec on sendmsg. 57 * Olaf Kirch : Don't linearise iovec on sendmsg.
58 * Andi Kleen : Some cleanups, cache destination entry 58 * Andi Kleen : Some cleanups, cache destination entry
59 * for connect. 59 * for connect.
60 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 60 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
61 * Melvin Smith : Check msg_name not msg_namelen in sendto(), 61 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
62 * return ENOTCONN for unconnected sockets (POSIX) 62 * return ENOTCONN for unconnected sockets (POSIX)
@@ -77,7 +77,7 @@
77 * as published by the Free Software Foundation; either version 77 * as published by the Free Software Foundation; either version
78 * 2 of the License, or (at your option) any later version. 78 * 2 of the License, or (at your option) any later version.
79 */ 79 */
80 80
81#include <asm/system.h> 81#include <asm/system.h>
82#include <asm/uaccess.h> 82#include <asm/uaccess.h>
83#include <asm/ioctls.h> 83#include <asm/ioctls.h>
@@ -120,7 +120,7 @@ static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[])
120 struct hlist_node *node; 120 struct hlist_node *node;
121 121
122 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 122 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
123 if (inet_sk(sk)->num == num) 123 if (sk->sk_hash == num)
124 return 1; 124 return 1;
125 return 0; 125 return 0;
126} 126}
@@ -191,7 +191,7 @@ gotit:
191 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 191 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
192 192
193 sk_for_each(sk2, node, head) 193 sk_for_each(sk2, node, head)
194 if (inet_sk(sk2)->num == snum && 194 if (sk2->sk_hash == snum &&
195 sk2 != sk && 195 sk2 != sk &&
196 (!sk2->sk_reuse || !sk->sk_reuse) && 196 (!sk2->sk_reuse || !sk->sk_reuse) &&
197 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 197 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
@@ -200,6 +200,7 @@ gotit:
200 goto fail; 200 goto fail;
201 } 201 }
202 inet_sk(sk)->num = snum; 202 inet_sk(sk)->num = snum;
203 sk->sk_hash = snum;
203 if (sk_unhashed(sk)) { 204 if (sk_unhashed(sk)) {
204 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 205 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
205 sk_add_node(sk, head); 206 sk_add_node(sk, head);
@@ -247,7 +248,7 @@ static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
247 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 248 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
248 struct inet_sock *inet = inet_sk(sk); 249 struct inet_sock *inet = inet_sk(sk);
249 250
250 if (inet->num == hnum && !ipv6_only_sock(sk)) { 251 if (sk->sk_hash == hnum && !ipv6_only_sock(sk)) {
251 int score = (sk->sk_family == PF_INET ? 1 : 0); 252 int score = (sk->sk_family == PF_INET ? 1 : 0);
252 if (inet->rcv_saddr) { 253 if (inet->rcv_saddr) {
253 if (inet->rcv_saddr != daddr) 254 if (inet->rcv_saddr != daddr)
@@ -296,7 +297,7 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
296 sk_for_each_from(s, node) { 297 sk_for_each_from(s, node) {
297 struct inet_sock *inet = inet_sk(s); 298 struct inet_sock *inet = inet_sk(s);
298 299
299 if (inet->num != hnum || 300 if (s->sk_hash != hnum ||
300 (inet->daddr && inet->daddr != rmt_addr) || 301 (inet->daddr && inet->daddr != rmt_addr) ||
301 (inet->dport != rmt_port && inet->dport) || 302 (inet->dport != rmt_port && inet->dport) ||
302 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || 303 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
@@ -306,17 +307,17 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
306 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) 307 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
307 continue; 308 continue;
308 goto found; 309 goto found;
309 } 310 }
310 s = NULL; 311 s = NULL;
311found: 312found:
312 return s; 313 return s;
313} 314}
314 315
315/* 316/*
316 * This routine is called by the ICMP module when it gets some 317 * This routine is called by the ICMP module when it gets some
317 * sort of error condition. If err < 0 then the socket should 318 * sort of error condition. If err < 0 then the socket should
318 * be closed and the error returned to the user. If err > 0 319 * be closed and the error returned to the user. If err > 0
319 * it's just the icmp type << 8 | icmp code. 320 * it's just the icmp type << 8 | icmp code.
320 * Header points to the ip header of the error packet. We move 321 * Header points to the ip header of the error packet. We move
321 * on past this. Then (as it used to claim before adjustment) 322 * on past this. Then (as it used to claim before adjustment)
322 * header points to the first 8 bytes of the udp header. We need 323 * header points to the first 8 bytes of the udp header. We need
@@ -338,7 +339,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
338 skb->dev->ifindex, udptable ); 339 skb->dev->ifindex, udptable );
339 if (sk == NULL) { 340 if (sk == NULL) {
340 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 341 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
341 return; /* No socket for error */ 342 return; /* No socket for error */
342 } 343 }
343 344
344 err = 0; 345 err = 0;
@@ -374,7 +375,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
374 } 375 }
375 376
376 /* 377 /*
377 * RFC1122: OK. Passes ICMP errors back to application, as per 378 * RFC1122: OK. Passes ICMP errors back to application, as per
378 * 4.1.3.3. 379 * 4.1.3.3.
379 */ 380 */
380 if (!inet->recverr) { 381 if (!inet->recverr) {
@@ -524,7 +525,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
524 if (len > 0xFFFF) 525 if (len > 0xFFFF)
525 return -EMSGSIZE; 526 return -EMSGSIZE;
526 527
527 /* 528 /*
528 * Check the flags. 529 * Check the flags.
529 */ 530 */
530 531
@@ -536,7 +537,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
536 if (up->pending) { 537 if (up->pending) {
537 /* 538 /*
538 * There are pending frames. 539 * There are pending frames.
539 * The socket lock must be held while it's corked. 540 * The socket lock must be held while it's corked.
540 */ 541 */
541 lock_sock(sk); 542 lock_sock(sk);
542 if (likely(up->pending)) { 543 if (likely(up->pending)) {
@@ -544,14 +545,14 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
544 release_sock(sk); 545 release_sock(sk);
545 return -EINVAL; 546 return -EINVAL;
546 } 547 }
547 goto do_append_data; 548 goto do_append_data;
548 } 549 }
549 release_sock(sk); 550 release_sock(sk);
550 } 551 }
551 ulen += sizeof(struct udphdr); 552 ulen += sizeof(struct udphdr);
552 553
553 /* 554 /*
554 * Get and verify the address. 555 * Get and verify the address.
555 */ 556 */
556 if (msg->msg_name) { 557 if (msg->msg_name) {
557 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name; 558 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
@@ -575,7 +576,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
575 Route will not be used, if at least one option is set. 576 Route will not be used, if at least one option is set.
576 */ 577 */
577 connected = 1; 578 connected = 1;
578 } 579 }
579 ipc.addr = inet->saddr; 580 ipc.addr = inet->saddr;
580 581
581 ipc.oif = sk->sk_bound_dev_if; 582 ipc.oif = sk->sk_bound_dev_if;
@@ -601,7 +602,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
601 } 602 }
602 tos = RT_TOS(inet->tos); 603 tos = RT_TOS(inet->tos);
603 if (sock_flag(sk, SOCK_LOCALROUTE) || 604 if (sock_flag(sk, SOCK_LOCALROUTE) ||
604 (msg->msg_flags & MSG_DONTROUTE) || 605 (msg->msg_flags & MSG_DONTROUTE) ||
605 (ipc.opt && ipc.opt->is_strictroute)) { 606 (ipc.opt && ipc.opt->is_strictroute)) {
606 tos |= RTO_ONLINK; 607 tos |= RTO_ONLINK;
607 connected = 0; 608 connected = 0;
@@ -761,10 +762,10 @@ out:
761/* 762/*
762 * IOCTL requests applicable to the UDP protocol 763 * IOCTL requests applicable to the UDP protocol
763 */ 764 */
764 765
765int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) 766int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
766{ 767{
767 switch(cmd) 768 switch(cmd)
768 { 769 {
769 case SIOCOUTQ: 770 case SIOCOUTQ:
770 { 771 {
@@ -804,11 +805,11 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
804 */ 805 */
805 806
806int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 807int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
807 size_t len, int noblock, int flags, int *addr_len) 808 size_t len, int noblock, int flags, int *addr_len)
808{ 809{
809 struct inet_sock *inet = inet_sk(sk); 810 struct inet_sock *inet = inet_sk(sk);
810 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 811 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
811 struct sk_buff *skb; 812 struct sk_buff *skb;
812 int copied, err, copy_only, is_udplite = IS_UDPLITE(sk); 813 int copied, err, copy_only, is_udplite = IS_UDPLITE(sk);
813 814
814 /* 815 /*
@@ -824,8 +825,8 @@ try_again:
824 skb = skb_recv_datagram(sk, flags, noblock, &err); 825 skb = skb_recv_datagram(sk, flags, noblock, &err);
825 if (!skb) 826 if (!skb)
826 goto out; 827 goto out;
827 828
828 copied = skb->len - sizeof(struct udphdr); 829 copied = skb->len - sizeof(struct udphdr);
829 if (copied > len) { 830 if (copied > len) {
830 copied = len; 831 copied = len;
831 msg->msg_flags |= MSG_TRUNC; 832 msg->msg_flags |= MSG_TRUNC;
@@ -868,18 +869,18 @@ try_again:
868 sin->sin_port = skb->h.uh->source; 869 sin->sin_port = skb->h.uh->source;
869 sin->sin_addr.s_addr = skb->nh.iph->saddr; 870 sin->sin_addr.s_addr = skb->nh.iph->saddr;
870 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 871 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
871 } 872 }
872 if (inet->cmsg_flags) 873 if (inet->cmsg_flags)
873 ip_cmsg_recv(msg, skb); 874 ip_cmsg_recv(msg, skb);
874 875
875 err = copied; 876 err = copied;
876 if (flags & MSG_TRUNC) 877 if (flags & MSG_TRUNC)
877 err = skb->len - sizeof(struct udphdr); 878 err = skb->len - sizeof(struct udphdr);
878 879
879out_free: 880out_free:
880 skb_free_datagram(sk, skb); 881 skb_free_datagram(sk, skb);
881out: 882out:
882 return err; 883 return err;
883 884
884csum_copy_err: 885csum_copy_err:
885 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 886 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
@@ -887,7 +888,7 @@ csum_copy_err:
887 skb_kill_datagram(sk, skb, flags); 888 skb_kill_datagram(sk, skb, flags);
888 889
889 if (noblock) 890 if (noblock)
890 return -EAGAIN; 891 return -EAGAIN;
891 goto try_again; 892 goto try_again;
892} 893}
893 894
@@ -898,7 +899,7 @@ int udp_disconnect(struct sock *sk, int flags)
898 /* 899 /*
899 * 1003.1g - break association. 900 * 1003.1g - break association.
900 */ 901 */
901 902
902 sk->sk_state = TCP_CLOSE; 903 sk->sk_state = TCP_CLOSE;
903 inet->daddr = 0; 904 inet->daddr = 0;
904 inet->dport = 0; 905 inet->dport = 0;
@@ -922,13 +923,13 @@ int udp_disconnect(struct sock *sk, int flags)
922static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) 923static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
923{ 924{
924#ifndef CONFIG_XFRM 925#ifndef CONFIG_XFRM
925 return 1; 926 return 1;
926#else 927#else
927 struct udp_sock *up = udp_sk(sk); 928 struct udp_sock *up = udp_sk(sk);
928 struct udphdr *uh; 929 struct udphdr *uh;
929 struct iphdr *iph; 930 struct iphdr *iph;
930 int iphlen, len; 931 int iphlen, len;
931 932
932 __u8 *udpdata; 933 __u8 *udpdata;
933 __be32 *udpdata32; 934 __be32 *udpdata32;
934 __u16 encap_type = up->encap_type; 935 __u16 encap_type = up->encap_type;
@@ -971,7 +972,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
971 return 0; 972 return 0;
972 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && 973 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
973 udpdata32[0] == 0 && udpdata32[1] == 0) { 974 udpdata32[0] == 0 && udpdata32[1] == 0) {
974 975
975 /* ESP Packet with Non-IKE marker */ 976 /* ESP Packet with Non-IKE marker */
976 len = sizeof(struct udphdr) + 2 * sizeof(u32); 977 len = sizeof(struct udphdr) + 2 * sizeof(u32);
977 } else 978 } else
@@ -1187,14 +1188,14 @@ static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh)
1187} 1188}
1188 1189
1189/* 1190/*
1190 * All we need to do is get the socket, and then do a checksum. 1191 * All we need to do is get the socket, and then do a checksum.
1191 */ 1192 */
1192 1193
1193int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], 1194int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1194 int is_udplite) 1195 int is_udplite)
1195{ 1196{
1196 struct sock *sk; 1197 struct sock *sk;
1197 struct udphdr *uh = skb->h.uh; 1198 struct udphdr *uh = skb->h.uh;
1198 unsigned short ulen; 1199 unsigned short ulen;
1199 struct rtable *rt = (struct rtable*)skb->dst; 1200 struct rtable *rt = (struct rtable*)skb->dst;
1200 __be32 saddr = skb->nh.iph->saddr; 1201 __be32 saddr = skb->nh.iph->saddr;
@@ -1270,9 +1271,9 @@ short_packet:
1270 goto drop; 1271 goto drop;
1271 1272
1272csum_error: 1273csum_error:
1273 /* 1274 /*
1274 * RFC1122: OK. Discards the bad packet silently (as far as 1275 * RFC1122: OK. Discards the bad packet silently (as far as
1275 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1276 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1276 */ 1277 */
1277 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", 1278 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1278 is_udplite? "-Lite" : "", 1279 is_udplite? "-Lite" : "",
@@ -1328,7 +1329,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1328 release_sock(sk); 1329 release_sock(sk);
1329 } 1330 }
1330 break; 1331 break;
1331 1332
1332 case UDP_ENCAP: 1333 case UDP_ENCAP:
1333 switch (val) { 1334 switch (val) {
1334 case 0: 1335 case 0:
@@ -1356,8 +1357,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1356 up->pcflag |= UDPLITE_SEND_CC; 1357 up->pcflag |= UDPLITE_SEND_CC;
1357 break; 1358 break;
1358 1359
1359 /* The receiver specifies a minimum checksum coverage value. To make 1360 /* The receiver specifies a minimum checksum coverage value. To make
1360 * sense, this should be set to at least 8 (as done below). If zero is 1361 * sense, this should be set to at least 8 (as done below). If zero is
1361 * used, this again means full checksum coverage. */ 1362 * used, this again means full checksum coverage. */
1362 case UDPLITE_RECV_CSCOV: 1363 case UDPLITE_RECV_CSCOV:
1363 if (!up->pcflag) /* Disable the option on UDP sockets */ 1364 if (!up->pcflag) /* Disable the option on UDP sockets */
@@ -1406,7 +1407,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1406 return -EFAULT; 1407 return -EFAULT;
1407 1408
1408 len = min_t(unsigned int, len, sizeof(int)); 1409 len = min_t(unsigned int, len, sizeof(int));
1409 1410
1410 if(len < 0) 1411 if(len < 0)
1411 return -EINVAL; 1412 return -EINVAL;
1412 1413
@@ -1433,11 +1434,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1433 return -ENOPROTOOPT; 1434 return -ENOPROTOOPT;
1434 }; 1435 };
1435 1436
1436 if(put_user(len, optlen)) 1437 if(put_user(len, optlen))
1437 return -EFAULT; 1438 return -EFAULT;
1438 if(copy_to_user(optval, &val,len)) 1439 if(copy_to_user(optval, &val,len))
1439 return -EFAULT; 1440 return -EFAULT;
1440 return 0; 1441 return 0;
1441} 1442}
1442 1443
1443int udp_getsockopt(struct sock *sk, int level, int optname, 1444int udp_getsockopt(struct sock *sk, int level, int optname,
@@ -1463,7 +1464,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
1463 * @sock - socket 1464 * @sock - socket
1464 * @wait - poll table 1465 * @wait - poll table
1465 * 1466 *
1466 * This is same as datagram poll, except for the special case of 1467 * This is same as datagram poll, except for the special case of
1467 * blocking sockets. If application is using a blocking fd 1468 * blocking sockets. If application is using a blocking fd
1468 * and a packet with checksum error is in the queue; 1469 * and a packet with checksum error is in the queue;
1469 * then it could get return from select indicating data available 1470 * then it could get return from select indicating data available
@@ -1502,11 +1503,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1502 } 1503 }
1503 1504
1504 return mask; 1505 return mask;
1505 1506
1506} 1507}
1507 1508
1508struct proto udp_prot = { 1509struct proto udp_prot = {
1509 .name = "UDP", 1510 .name = "UDP",
1510 .owner = THIS_MODULE, 1511 .owner = THIS_MODULE,
1511 .close = udp_lib_close, 1512 .close = udp_lib_close,
1512 .connect = ip4_datagram_connect, 1513 .connect = ip4_datagram_connect,
@@ -1670,7 +1671,7 @@ static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1670 1671
1671 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" 1672 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1672 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", 1673 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1673 bucket, src, srcp, dest, destp, sp->sk_state, 1674 bucket, src, srcp, dest, destp, sp->sk_state,
1674 atomic_read(&sp->sk_wmem_alloc), 1675 atomic_read(&sp->sk_wmem_alloc),
1675 atomic_read(&sp->sk_rmem_alloc), 1676 atomic_read(&sp->sk_rmem_alloc),
1676 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 1677 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index f6f4277ba6dc..820a477cfaa6 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -10,7 +10,7 @@ extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []);
10 10
11extern int __udp_lib_get_port(struct sock *sk, unsigned short snum, 11extern int __udp_lib_get_port(struct sock *sk, unsigned short snum,
12 struct hlist_head udptable[], int *port_rover, 12 struct hlist_head udptable[], int *port_rover,
13 int (*)(const struct sock*,const struct sock*)); 13 int (*)(const struct sock*,const struct sock*));
14extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *); 14extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
15 15
16 16
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 8655d038364c..289146bdb8b0 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -6,7 +6,7 @@
6 * Split up af-specific portion 6 * Split up af-specific portion
7 * Derek Atkins <derek@ihtfp.com> 7 * Derek Atkins <derek@ihtfp.com>
8 * Add Encapsulation support 8 * Add Encapsulation support
9 * 9 *
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
@@ -42,7 +42,7 @@ static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
42 42
43 if (skb->dst == NULL) { 43 if (skb->dst == NULL) {
44 if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, 44 if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
45 skb->dev)) 45 skb->dev))
46 goto drop; 46 goto drop;
47 } 47 }
48 return dst_input(skb); 48 return dst_input(skb);
@@ -149,7 +149,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
149 ip_send_check(skb->nh.iph); 149 ip_send_check(skb->nh.iph);
150 150
151 NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, 151 NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
152 xfrm4_rcv_encap_finish); 152 xfrm4_rcv_encap_finish);
153 return 0; 153 return 0;
154#else 154#else
155 return -skb->nh.iph->protocol; 155 return -skb->nh.iph->protocol;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 04403fb01a58..038ca160fe2c 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * xfrm4_output.c - Common IPsec encapsulation code for IPv4. 2 * xfrm4_output.c - Common IPsec encapsulation code for IPv4.
3 * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au> 3 * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au>
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 7 * as published by the Free Software Foundation; either version
@@ -28,7 +28,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
28 goto out; 28 goto out;
29 29
30 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; 30 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
31 31
32 if (!(iph->frag_off & htons(IP_DF)) || skb->local_df) 32 if (!(iph->frag_off & htons(IP_DF)) || skb->local_df)
33 goto out; 33 goto out;
34 34
@@ -47,7 +47,7 @@ static int xfrm4_output_one(struct sk_buff *skb)
47 struct dst_entry *dst = skb->dst; 47 struct dst_entry *dst = skb->dst;
48 struct xfrm_state *x = dst->xfrm; 48 struct xfrm_state *x = dst->xfrm;
49 int err; 49 int err;
50 50
51 if (skb->ip_summed == CHECKSUM_PARTIAL) { 51 if (skb->ip_summed == CHECKSUM_PARTIAL) {
52 err = skb_checksum_help(skb); 52 err = skb_checksum_help(skb);
53 if (err) 53 if (err)
@@ -78,7 +78,7 @@ static int xfrm4_output_one(struct sk_buff *skb)
78 x->curlft.packets++; 78 x->curlft.packets++;
79 79
80 spin_unlock_bh(&x->lock); 80 spin_unlock_bh(&x->lock);
81 81
82 if (!(skb->dst = dst_pop(dst))) { 82 if (!(skb->dst = dst_pop(dst))) {
83 err = -EHOSTUNREACH; 83 err = -EHOSTUNREACH;
84 goto error_nolock; 84 goto error_nolock;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 699f27ce62ad..fef19c6bcb98 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * xfrm4_policy.c 2 * xfrm4_policy.c
3 * 3 *
4 * Changes: 4 * Changes:
5 * Kazunori MIYAZAWA @USAGI 5 * Kazunori MIYAZAWA @USAGI
6 * YOSHIFUJI Hideaki @USAGI 6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion 7 * Split up af-specific portion
8 * 8 *
9 */ 9 */
10 10
11#include <linux/compiler.h> 11#include <linux/compiler.h>
@@ -50,8 +50,8 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
50 struct xfrm_dst *xdst = (struct xfrm_dst*)dst; 50 struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
51 if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ 51 if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
52 xdst->u.rt.fl.fl4_dst == fl->fl4_dst && 52 xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
53 xdst->u.rt.fl.fl4_src == fl->fl4_src && 53 xdst->u.rt.fl.fl4_src == fl->fl4_src &&
54 xdst->u.rt.fl.fl4_tos == fl->fl4_tos && 54 xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
55 xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) { 55 xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
56 dst_clone(dst); 56 dst_clone(dst);
57 break; 57 break;
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index f110af5b1319..1be6762b2d47 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -13,7 +13,7 @@
13static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) 13static int ipip_output(struct xfrm_state *x, struct sk_buff *skb)
14{ 14{
15 struct iphdr *iph; 15 struct iphdr *iph;
16 16
17 iph = skb->nh.iph; 17 iph = skb->nh.iph;
18 iph->tot_len = htons(skb->len); 18 iph->tot_len = htons(skb->len);
19 ip_send_check(iph); 19 ip_send_check(iph);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index fe5e1d833871..77e56f2b1af2 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3,7 +3,7 @@
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * 8 *
9 * $Id: addrconf.c,v 1.69 2001/10/31 21:55:54 davem Exp $ 9 * $Id: addrconf.c,v 1.69 2001/10/31 21:55:54 davem Exp $
@@ -105,7 +105,7 @@ static void addrconf_sysctl_unregister(struct ipv6_devconf *p);
105 105
106#ifdef CONFIG_IPV6_PRIVACY 106#ifdef CONFIG_IPV6_PRIVACY
107static int __ipv6_regen_rndid(struct inet6_dev *idev); 107static int __ipv6_regen_rndid(struct inet6_dev *idev);
108static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); 108static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
109static void ipv6_regen_rndid(unsigned long data); 109static void ipv6_regen_rndid(unsigned long data);
110 110
111static int desync_factor = MAX_DESYNC_FACTOR * HZ; 111static int desync_factor = MAX_DESYNC_FACTOR * HZ;
@@ -137,7 +137,7 @@ static void addrconf_rs_timer(unsigned long data);
137static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 137static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
138static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 138static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
139 139
140static void inet6_prefix_notify(int event, struct inet6_dev *idev, 140static void inet6_prefix_notify(int event, struct inet6_dev *idev,
141 struct prefix_info *pinfo); 141 struct prefix_info *pinfo);
142static int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev); 142static int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev);
143 143
@@ -241,7 +241,7 @@ int __ipv6_addr_type(const struct in6_addr *addr)
241 */ 241 */
242 if ((st & htonl(0xE0000000)) != htonl(0x00000000) && 242 if ((st & htonl(0xE0000000)) != htonl(0x00000000) &&
243 (st & htonl(0xE0000000)) != htonl(0xE0000000)) 243 (st & htonl(0xE0000000)) != htonl(0xE0000000))
244 return (IPV6_ADDR_UNICAST | 244 return (IPV6_ADDR_UNICAST |
245 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); 245 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL));
246 246
247 if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) { 247 if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) {
@@ -252,7 +252,7 @@ int __ipv6_addr_type(const struct in6_addr *addr)
252 } 252 }
253 253
254 if ((st & htonl(0xFFC00000)) == htonl(0xFE800000)) 254 if ((st & htonl(0xFFC00000)) == htonl(0xFE800000))
255 return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST | 255 return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST |
256 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */ 256 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */
257 if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000)) 257 if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000))
258 return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST | 258 return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST |
@@ -272,11 +272,11 @@ int __ipv6_addr_type(const struct in6_addr *addr)
272 } 272 }
273 273
274 if (addr->s6_addr32[2] == htonl(0x0000ffff)) 274 if (addr->s6_addr32[2] == htonl(0x0000ffff))
275 return (IPV6_ADDR_MAPPED | 275 return (IPV6_ADDR_MAPPED |
276 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ 276 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
277 } 277 }
278 278
279 return (IPV6_ADDR_RESERVED | 279 return (IPV6_ADDR_RESERVED |
280 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */ 280 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */
281} 281}
282 282
@@ -348,10 +348,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
348 if (dev->mtu < IPV6_MIN_MTU) 348 if (dev->mtu < IPV6_MIN_MTU)
349 return NULL; 349 return NULL;
350 350
351 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL); 351 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
352 352
353 if (ndev == NULL) 353 if (ndev == NULL)
354 return NULL; 354 return NULL;
355 355
356 rwlock_init(&ndev->lock); 356 rwlock_init(&ndev->lock);
357 ndev->dev = dev; 357 ndev->dev = dev;
@@ -462,7 +462,7 @@ static void dev_forward_change(struct inet6_dev *idev)
462 dev = idev->dev; 462 dev = idev->dev;
463 if (dev && (dev->flags & IFF_MULTICAST)) { 463 if (dev && (dev->flags & IFF_MULTICAST)) {
464 ipv6_addr_all_routers(&addr); 464 ipv6_addr_all_routers(&addr);
465 465
466 if (idev->cnf.forwarding) 466 if (idev->cnf.forwarding)
467 ipv6_dev_mc_inc(dev, &addr); 467 ipv6_dev_mc_inc(dev, &addr);
468 else 468 else
@@ -817,8 +817,8 @@ retry:
817 tmp_valid_lft = min_t(__u32, 817 tmp_valid_lft = min_t(__u32,
818 ifp->valid_lft, 818 ifp->valid_lft,
819 idev->cnf.temp_valid_lft); 819 idev->cnf.temp_valid_lft);
820 tmp_prefered_lft = min_t(__u32, 820 tmp_prefered_lft = min_t(__u32,
821 ifp->prefered_lft, 821 ifp->prefered_lft,
822 idev->cnf.temp_prefered_lft - desync_factor / HZ); 822 idev->cnf.temp_prefered_lft - desync_factor / HZ);
823 tmp_plen = ifp->prefix_len; 823 tmp_plen = ifp->prefix_len;
824 max_addresses = idev->cnf.max_addresses; 824 max_addresses = idev->cnf.max_addresses;
@@ -828,7 +828,7 @@ retry:
828 828
829 write_unlock(&idev->lock); 829 write_unlock(&idev->lock);
830 ift = !max_addresses || 830 ift = !max_addresses ||
831 ipv6_count_addresses(idev) < max_addresses ? 831 ipv6_count_addresses(idev) < max_addresses ?
832 ipv6_add_addr(idev, &addr, tmp_plen, 832 ipv6_add_addr(idev, &addr, tmp_plen,
833 ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK, IFA_F_TEMPORARY) : NULL; 833 ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK, IFA_F_TEMPORARY) : NULL;
834 if (!ift || IS_ERR(ift)) { 834 if (!ift || IS_ERR(ift)) {
@@ -1160,7 +1160,7 @@ record_it:
1160 1160
1161 if (!ifa_result) 1161 if (!ifa_result)
1162 return -EADDRNOTAVAIL; 1162 return -EADDRNOTAVAIL;
1163 1163
1164 ipv6_addr_copy(saddr, &ifa_result->addr); 1164 ipv6_addr_copy(saddr, &ifa_result->addr);
1165 in6_ifa_put(ifa_result); 1165 in6_ifa_put(ifa_result);
1166 return 0; 1166 return 0;
@@ -1475,7 +1475,7 @@ regen:
1475 * - value 0 1475 * - value 0
1476 * - XXX: already assigned to an address on the device 1476 * - XXX: already assigned to an address on the device
1477 */ 1477 */
1478 if (idev->rndid[0] == 0xfd && 1478 if (idev->rndid[0] == 0xfd &&
1479 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff && 1479 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
1480 (idev->rndid[7]&0x80)) 1480 (idev->rndid[7]&0x80))
1481 goto regen; 1481 goto regen;
@@ -1502,9 +1502,9 @@ static void ipv6_regen_rndid(unsigned long data)
1502 1502
1503 if (__ipv6_regen_rndid(idev) < 0) 1503 if (__ipv6_regen_rndid(idev) < 0)
1504 goto out; 1504 goto out;
1505 1505
1506 expires = jiffies + 1506 expires = jiffies +
1507 idev->cnf.temp_prefered_lft * HZ - 1507 idev->cnf.temp_prefered_lft * HZ -
1508 idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - desync_factor; 1508 idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - desync_factor;
1509 if (time_before(expires, jiffies)) { 1509 if (time_before(expires, jiffies)) {
1510 printk(KERN_WARNING 1510 printk(KERN_WARNING
@@ -1630,12 +1630,12 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1630 struct inet6_dev *in6_dev; 1630 struct inet6_dev *in6_dev;
1631 1631
1632 pinfo = (struct prefix_info *) opt; 1632 pinfo = (struct prefix_info *) opt;
1633 1633
1634 if (len < sizeof(struct prefix_info)) { 1634 if (len < sizeof(struct prefix_info)) {
1635 ADBG(("addrconf: prefix option too short\n")); 1635 ADBG(("addrconf: prefix option too short\n"));
1636 return; 1636 return;
1637 } 1637 }
1638 1638
1639 /* 1639 /*
1640 * Validation checks ([ADDRCONF], page 19) 1640 * Validation checks ([ADDRCONF], page 19)
1641 */ 1641 */
@@ -1827,7 +1827,7 @@ ok:
1827 * When a new public address is created as described in [ADDRCONF], 1827 * When a new public address is created as described in [ADDRCONF],
1828 * also create a new temporary address. 1828 * also create a new temporary address.
1829 */ 1829 */
1830 read_unlock_bh(&in6_dev->lock); 1830 read_unlock_bh(&in6_dev->lock);
1831 ipv6_create_tempaddr(ifp, NULL); 1831 ipv6_create_tempaddr(ifp, NULL);
1832 } else { 1832 } else {
1833 read_unlock_bh(&in6_dev->lock); 1833 read_unlock_bh(&in6_dev->lock);
@@ -1913,14 +1913,14 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
1913 int scope; 1913 int scope;
1914 1914
1915 ASSERT_RTNL(); 1915 ASSERT_RTNL();
1916 1916
1917 /* check the lifetime */ 1917 /* check the lifetime */
1918 if (!valid_lft || prefered_lft > valid_lft) 1918 if (!valid_lft || prefered_lft > valid_lft)
1919 return -EINVAL; 1919 return -EINVAL;
1920 1920
1921 if ((dev = __dev_get_by_index(ifindex)) == NULL) 1921 if ((dev = __dev_get_by_index(ifindex)) == NULL)
1922 return -ENODEV; 1922 return -ENODEV;
1923 1923
1924 if ((idev = addrconf_add_dev(dev)) == NULL) 1924 if ((idev = addrconf_add_dev(dev)) == NULL)
1925 return -ENOBUFS; 1925 return -ENOBUFS;
1926 1926
@@ -1960,7 +1960,7 @@ static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen)
1960 struct inet6_ifaddr *ifp; 1960 struct inet6_ifaddr *ifp;
1961 struct inet6_dev *idev; 1961 struct inet6_dev *idev;
1962 struct net_device *dev; 1962 struct net_device *dev;
1963 1963
1964 if ((dev = __dev_get_by_index(ifindex)) == NULL) 1964 if ((dev = __dev_get_by_index(ifindex)) == NULL)
1965 return -ENODEV; 1965 return -ENODEV;
1966 1966
@@ -1973,7 +1973,7 @@ static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen)
1973 ipv6_addr_equal(pfx, &ifp->addr)) { 1973 ipv6_addr_equal(pfx, &ifp->addr)) {
1974 in6_ifa_hold(ifp); 1974 in6_ifa_hold(ifp);
1975 read_unlock_bh(&idev->lock); 1975 read_unlock_bh(&idev->lock);
1976 1976
1977 ipv6_del_addr(ifp); 1977 ipv6_del_addr(ifp);
1978 1978
1979 /* If the last address is deleted administratively, 1979 /* If the last address is deleted administratively,
@@ -1993,10 +1993,10 @@ int addrconf_add_ifaddr(void __user *arg)
1993{ 1993{
1994 struct in6_ifreq ireq; 1994 struct in6_ifreq ireq;
1995 int err; 1995 int err;
1996 1996
1997 if (!capable(CAP_NET_ADMIN)) 1997 if (!capable(CAP_NET_ADMIN))
1998 return -EPERM; 1998 return -EPERM;
1999 1999
2000 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) 2000 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2001 return -EFAULT; 2001 return -EFAULT;
2002 2002
@@ -2011,7 +2011,7 @@ int addrconf_del_ifaddr(void __user *arg)
2011{ 2011{
2012 struct in6_ifreq ireq; 2012 struct in6_ifreq ireq;
2013 int err; 2013 int err;
2014 2014
2015 if (!capable(CAP_NET_ADMIN)) 2015 if (!capable(CAP_NET_ADMIN))
2016 return -EPERM; 2016 return -EPERM;
2017 2017
@@ -2056,7 +2056,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2056 return; 2056 return;
2057 } 2057 }
2058 2058
2059 for (dev = dev_base; dev != NULL; dev = dev->next) { 2059 for (dev = dev_base; dev != NULL; dev = dev->next) {
2060 struct in_device * in_dev = __in_dev_get_rtnl(dev); 2060 struct in_device * in_dev = __in_dev_get_rtnl(dev);
2061 if (in_dev && (dev->flags & IFF_UP)) { 2061 if (in_dev && (dev->flags & IFF_UP)) {
2062 struct in_ifaddr * ifa; 2062 struct in_ifaddr * ifa;
@@ -2091,7 +2091,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2091 } 2091 }
2092 } 2092 }
2093 } 2093 }
2094 } 2094 }
2095} 2095}
2096#endif 2096#endif
2097 2097
@@ -2137,7 +2137,7 @@ static void addrconf_dev_config(struct net_device *dev)
2137 2137
2138 ASSERT_RTNL(); 2138 ASSERT_RTNL();
2139 2139
2140 if ((dev->type != ARPHRD_ETHER) && 2140 if ((dev->type != ARPHRD_ETHER) &&
2141 (dev->type != ARPHRD_FDDI) && 2141 (dev->type != ARPHRD_FDDI) &&
2142 (dev->type != ARPHRD_IEEE802_TR) && 2142 (dev->type != ARPHRD_IEEE802_TR) &&
2143 (dev->type != ARPHRD_ARCNET) && 2143 (dev->type != ARPHRD_ARCNET) &&
@@ -2164,9 +2164,9 @@ static void addrconf_sit_config(struct net_device *dev)
2164 2164
2165 ASSERT_RTNL(); 2165 ASSERT_RTNL();
2166 2166
2167 /* 2167 /*
2168 * Configure the tunnel with one of our IPv4 2168 * Configure the tunnel with one of our IPv4
2169 * addresses... we should configure all of 2169 * addresses... we should configure all of
2170 * our v4 addrs in the tunnel 2170 * our v4 addrs in the tunnel
2171 */ 2171 */
2172 2172
@@ -2233,7 +2233,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2233 ip6_tnl_add_linklocal(idev); 2233 ip6_tnl_add_linklocal(idev);
2234} 2234}
2235 2235
2236static int addrconf_notify(struct notifier_block *this, unsigned long event, 2236static int addrconf_notify(struct notifier_block *this, unsigned long event,
2237 void * data) 2237 void * data)
2238{ 2238{
2239 struct net_device *dev = (struct net_device *) data; 2239 struct net_device *dev = (struct net_device *) data;
@@ -2378,7 +2378,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2378 return -ENODEV; 2378 return -ENODEV;
2379 2379
2380 /* Step 1: remove reference to ipv6 device from parent device. 2380 /* Step 1: remove reference to ipv6 device from parent device.
2381 Do not dev_put! 2381 Do not dev_put!
2382 */ 2382 */
2383 if (how == 1) { 2383 if (how == 1) {
2384 idev->dead = 1; 2384 idev->dead = 1;
@@ -2461,7 +2461,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2461 /* Step 5: netlink notification of this interface */ 2461 /* Step 5: netlink notification of this interface */
2462 idev->tstamp = jiffies; 2462 idev->tstamp = jiffies;
2463 inet6_ifinfo_notify(RTM_DELLINK, idev); 2463 inet6_ifinfo_notify(RTM_DELLINK, idev);
2464 2464
2465 /* Shot the device (if unregistered) */ 2465 /* Shot the device (if unregistered) */
2466 2466
2467 if (how == 1) { 2467 if (how == 1) {
@@ -2850,8 +2850,8 @@ restart:
2850 age = (now - ifp->tstamp) / HZ; 2850 age = (now - ifp->tstamp) / HZ;
2851 2851
2852#ifdef CONFIG_IPV6_PRIVACY 2852#ifdef CONFIG_IPV6_PRIVACY
2853 regen_advance = ifp->idev->cnf.regen_max_retry * 2853 regen_advance = ifp->idev->cnf.regen_max_retry *
2854 ifp->idev->cnf.dad_transmits * 2854 ifp->idev->cnf.dad_transmits *
2855 ifp->idev->nd_parms->retrans_time / HZ; 2855 ifp->idev->nd_parms->retrans_time / HZ;
2856#endif 2856#endif
2857 2857
@@ -3217,7 +3217,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3217 s_idx = cb->args[0]; 3217 s_idx = cb->args[0];
3218 s_ip_idx = ip_idx = cb->args[1]; 3218 s_ip_idx = ip_idx = cb->args[1];
3219 read_lock(&dev_base_lock); 3219 read_lock(&dev_base_lock);
3220 3220
3221 for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { 3221 for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
3222 if (idx < s_idx) 3222 if (idx < s_idx)
3223 continue; 3223 continue;
@@ -3234,8 +3234,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3234 ifa = ifa->if_next, ip_idx++) { 3234 ifa = ifa->if_next, ip_idx++) {
3235 if (ip_idx < s_ip_idx) 3235 if (ip_idx < s_ip_idx)
3236 continue; 3236 continue;
3237 if ((err = inet6_fill_ifaddr(skb, ifa, 3237 if ((err = inet6_fill_ifaddr(skb, ifa,
3238 NETLINK_CB(cb->skb).pid, 3238 NETLINK_CB(cb->skb).pid,
3239 cb->nlh->nlmsg_seq, RTM_NEWADDR, 3239 cb->nlh->nlmsg_seq, RTM_NEWADDR,
3240 NLM_F_MULTI)) <= 0) 3240 NLM_F_MULTI)) <= 0)
3241 goto done; 3241 goto done;
@@ -3243,12 +3243,12 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3243 break; 3243 break;
3244 case MULTICAST_ADDR: 3244 case MULTICAST_ADDR:
3245 /* multicast address */ 3245 /* multicast address */
3246 for (ifmca = idev->mc_list; ifmca; 3246 for (ifmca = idev->mc_list; ifmca;
3247 ifmca = ifmca->next, ip_idx++) { 3247 ifmca = ifmca->next, ip_idx++) {
3248 if (ip_idx < s_ip_idx) 3248 if (ip_idx < s_ip_idx)
3249 continue; 3249 continue;
3250 if ((err = inet6_fill_ifmcaddr(skb, ifmca, 3250 if ((err = inet6_fill_ifmcaddr(skb, ifmca,
3251 NETLINK_CB(cb->skb).pid, 3251 NETLINK_CB(cb->skb).pid,
3252 cb->nlh->nlmsg_seq, RTM_GETMULTICAST, 3252 cb->nlh->nlmsg_seq, RTM_GETMULTICAST,
3253 NLM_F_MULTI)) <= 0) 3253 NLM_F_MULTI)) <= 0)
3254 goto done; 3254 goto done;
@@ -3260,10 +3260,10 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3260 ifaca = ifaca->aca_next, ip_idx++) { 3260 ifaca = ifaca->aca_next, ip_idx++) {
3261 if (ip_idx < s_ip_idx) 3261 if (ip_idx < s_ip_idx)
3262 continue; 3262 continue;
3263 if ((err = inet6_fill_ifacaddr(skb, ifaca, 3263 if ((err = inet6_fill_ifacaddr(skb, ifaca,
3264 NETLINK_CB(cb->skb).pid, 3264 NETLINK_CB(cb->skb).pid,
3265 cb->nlh->nlmsg_seq, RTM_GETANYCAST, 3265 cb->nlh->nlmsg_seq, RTM_GETANYCAST,
3266 NLM_F_MULTI)) <= 0) 3266 NLM_F_MULTI)) <= 0)
3267 goto done; 3267 goto done;
3268 } 3268 }
3269 break; 3269 break;
@@ -3426,7 +3426,7 @@ static inline size_t inet6_if_nlmsg_size(void)
3426 ); 3426 );
3427} 3427}
3428 3428
3429static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, 3429static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3430 u32 pid, u32 seq, int event, unsigned int flags) 3430 u32 pid, u32 seq, int event, unsigned int flags)
3431{ 3431{
3432 struct net_device *dev = idev->dev; 3432 struct net_device *dev = idev->dev;
@@ -3498,7 +3498,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3498 continue; 3498 continue;
3499 if ((idev = in6_dev_get(dev)) == NULL) 3499 if ((idev = in6_dev_get(dev)) == NULL)
3500 continue; 3500 continue;
3501 err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid, 3501 err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid,
3502 cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); 3502 cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI);
3503 in6_dev_put(idev); 3503 in6_dev_put(idev);
3504 if (err <= 0) 3504 if (err <= 0)
@@ -3514,7 +3514,7 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
3514{ 3514{
3515 struct sk_buff *skb; 3515 struct sk_buff *skb;
3516 int err = -ENOBUFS; 3516 int err = -ENOBUFS;
3517 3517
3518 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); 3518 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
3519 if (skb == NULL) 3519 if (skb == NULL)
3520 goto errout; 3520 goto errout;
@@ -3578,7 +3578,7 @@ nla_put_failure:
3578 return -EMSGSIZE; 3578 return -EMSGSIZE;
3579} 3579}
3580 3580
3581static void inet6_prefix_notify(int event, struct inet6_dev *idev, 3581static void inet6_prefix_notify(int event, struct inet6_dev *idev,
3582 struct prefix_info *pinfo) 3582 struct prefix_info *pinfo)
3583{ 3583{
3584 struct sk_buff *skb; 3584 struct sk_buff *skb;
@@ -3675,10 +3675,10 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
3675 rt6_purge_dflt_routers(); 3675 rt6_purge_dflt_routers();
3676 } 3676 }
3677 3677
3678 return ret; 3678 return ret;
3679} 3679}
3680 3680
3681static int addrconf_sysctl_forward_strategy(ctl_table *table, 3681static int addrconf_sysctl_forward_strategy(ctl_table *table,
3682 int __user *name, int nlen, 3682 int __user *name, int nlen,
3683 void __user *oldval, 3683 void __user *oldval,
3684 size_t __user *oldlenp, 3684 size_t __user *oldlenp,
@@ -3743,19 +3743,19 @@ static struct addrconf_sysctl_table
3743} addrconf_sysctl __read_mostly = { 3743} addrconf_sysctl __read_mostly = {
3744 .sysctl_header = NULL, 3744 .sysctl_header = NULL,
3745 .addrconf_vars = { 3745 .addrconf_vars = {
3746 { 3746 {
3747 .ctl_name = NET_IPV6_FORWARDING, 3747 .ctl_name = NET_IPV6_FORWARDING,
3748 .procname = "forwarding", 3748 .procname = "forwarding",
3749 .data = &ipv6_devconf.forwarding, 3749 .data = &ipv6_devconf.forwarding,
3750 .maxlen = sizeof(int), 3750 .maxlen = sizeof(int),
3751 .mode = 0644, 3751 .mode = 0644,
3752 .proc_handler = &addrconf_sysctl_forward, 3752 .proc_handler = &addrconf_sysctl_forward,
3753 .strategy = &addrconf_sysctl_forward_strategy, 3753 .strategy = &addrconf_sysctl_forward_strategy,
3754 }, 3754 },
3755 { 3755 {
3756 .ctl_name = NET_IPV6_HOP_LIMIT, 3756 .ctl_name = NET_IPV6_HOP_LIMIT,
3757 .procname = "hop_limit", 3757 .procname = "hop_limit",
3758 .data = &ipv6_devconf.hop_limit, 3758 .data = &ipv6_devconf.hop_limit,
3759 .maxlen = sizeof(int), 3759 .maxlen = sizeof(int),
3760 .mode = 0644, 3760 .mode = 0644,
3761 .proc_handler = proc_dointvec, 3761 .proc_handler = proc_dointvec,
@@ -3764,116 +3764,116 @@ static struct addrconf_sysctl_table
3764 .ctl_name = NET_IPV6_MTU, 3764 .ctl_name = NET_IPV6_MTU,
3765 .procname = "mtu", 3765 .procname = "mtu",
3766 .data = &ipv6_devconf.mtu6, 3766 .data = &ipv6_devconf.mtu6,
3767 .maxlen = sizeof(int), 3767 .maxlen = sizeof(int),
3768 .mode = 0644, 3768 .mode = 0644,
3769 .proc_handler = &proc_dointvec, 3769 .proc_handler = &proc_dointvec,
3770 }, 3770 },
3771 { 3771 {
3772 .ctl_name = NET_IPV6_ACCEPT_RA, 3772 .ctl_name = NET_IPV6_ACCEPT_RA,
3773 .procname = "accept_ra", 3773 .procname = "accept_ra",
3774 .data = &ipv6_devconf.accept_ra, 3774 .data = &ipv6_devconf.accept_ra,
3775 .maxlen = sizeof(int), 3775 .maxlen = sizeof(int),
3776 .mode = 0644, 3776 .mode = 0644,
3777 .proc_handler = &proc_dointvec, 3777 .proc_handler = &proc_dointvec,
3778 }, 3778 },
3779 { 3779 {
3780 .ctl_name = NET_IPV6_ACCEPT_REDIRECTS, 3780 .ctl_name = NET_IPV6_ACCEPT_REDIRECTS,
3781 .procname = "accept_redirects", 3781 .procname = "accept_redirects",
3782 .data = &ipv6_devconf.accept_redirects, 3782 .data = &ipv6_devconf.accept_redirects,
3783 .maxlen = sizeof(int), 3783 .maxlen = sizeof(int),
3784 .mode = 0644, 3784 .mode = 0644,
3785 .proc_handler = &proc_dointvec, 3785 .proc_handler = &proc_dointvec,
3786 }, 3786 },
3787 { 3787 {
3788 .ctl_name = NET_IPV6_AUTOCONF, 3788 .ctl_name = NET_IPV6_AUTOCONF,
3789 .procname = "autoconf", 3789 .procname = "autoconf",
3790 .data = &ipv6_devconf.autoconf, 3790 .data = &ipv6_devconf.autoconf,
3791 .maxlen = sizeof(int), 3791 .maxlen = sizeof(int),
3792 .mode = 0644, 3792 .mode = 0644,
3793 .proc_handler = &proc_dointvec, 3793 .proc_handler = &proc_dointvec,
3794 }, 3794 },
3795 { 3795 {
3796 .ctl_name = NET_IPV6_DAD_TRANSMITS, 3796 .ctl_name = NET_IPV6_DAD_TRANSMITS,
3797 .procname = "dad_transmits", 3797 .procname = "dad_transmits",
3798 .data = &ipv6_devconf.dad_transmits, 3798 .data = &ipv6_devconf.dad_transmits,
3799 .maxlen = sizeof(int), 3799 .maxlen = sizeof(int),
3800 .mode = 0644, 3800 .mode = 0644,
3801 .proc_handler = &proc_dointvec, 3801 .proc_handler = &proc_dointvec,
3802 }, 3802 },
3803 { 3803 {
3804 .ctl_name = NET_IPV6_RTR_SOLICITS, 3804 .ctl_name = NET_IPV6_RTR_SOLICITS,
3805 .procname = "router_solicitations", 3805 .procname = "router_solicitations",
3806 .data = &ipv6_devconf.rtr_solicits, 3806 .data = &ipv6_devconf.rtr_solicits,
3807 .maxlen = sizeof(int), 3807 .maxlen = sizeof(int),
3808 .mode = 0644, 3808 .mode = 0644,
3809 .proc_handler = &proc_dointvec, 3809 .proc_handler = &proc_dointvec,
3810 }, 3810 },
3811 { 3811 {
3812 .ctl_name = NET_IPV6_RTR_SOLICIT_INTERVAL, 3812 .ctl_name = NET_IPV6_RTR_SOLICIT_INTERVAL,
3813 .procname = "router_solicitation_interval", 3813 .procname = "router_solicitation_interval",
3814 .data = &ipv6_devconf.rtr_solicit_interval, 3814 .data = &ipv6_devconf.rtr_solicit_interval,
3815 .maxlen = sizeof(int), 3815 .maxlen = sizeof(int),
3816 .mode = 0644, 3816 .mode = 0644,
3817 .proc_handler = &proc_dointvec_jiffies, 3817 .proc_handler = &proc_dointvec_jiffies,
3818 .strategy = &sysctl_jiffies, 3818 .strategy = &sysctl_jiffies,
3819 }, 3819 },
3820 { 3820 {
3821 .ctl_name = NET_IPV6_RTR_SOLICIT_DELAY, 3821 .ctl_name = NET_IPV6_RTR_SOLICIT_DELAY,
3822 .procname = "router_solicitation_delay", 3822 .procname = "router_solicitation_delay",
3823 .data = &ipv6_devconf.rtr_solicit_delay, 3823 .data = &ipv6_devconf.rtr_solicit_delay,
3824 .maxlen = sizeof(int), 3824 .maxlen = sizeof(int),
3825 .mode = 0644, 3825 .mode = 0644,
3826 .proc_handler = &proc_dointvec_jiffies, 3826 .proc_handler = &proc_dointvec_jiffies,
3827 .strategy = &sysctl_jiffies, 3827 .strategy = &sysctl_jiffies,
3828 }, 3828 },
3829 { 3829 {
3830 .ctl_name = NET_IPV6_FORCE_MLD_VERSION, 3830 .ctl_name = NET_IPV6_FORCE_MLD_VERSION,
3831 .procname = "force_mld_version", 3831 .procname = "force_mld_version",
3832 .data = &ipv6_devconf.force_mld_version, 3832 .data = &ipv6_devconf.force_mld_version,
3833 .maxlen = sizeof(int), 3833 .maxlen = sizeof(int),
3834 .mode = 0644, 3834 .mode = 0644,
3835 .proc_handler = &proc_dointvec, 3835 .proc_handler = &proc_dointvec,
3836 }, 3836 },
3837#ifdef CONFIG_IPV6_PRIVACY 3837#ifdef CONFIG_IPV6_PRIVACY
3838 { 3838 {
3839 .ctl_name = NET_IPV6_USE_TEMPADDR, 3839 .ctl_name = NET_IPV6_USE_TEMPADDR,
3840 .procname = "use_tempaddr", 3840 .procname = "use_tempaddr",
3841 .data = &ipv6_devconf.use_tempaddr, 3841 .data = &ipv6_devconf.use_tempaddr,
3842 .maxlen = sizeof(int), 3842 .maxlen = sizeof(int),
3843 .mode = 0644, 3843 .mode = 0644,
3844 .proc_handler = &proc_dointvec, 3844 .proc_handler = &proc_dointvec,
3845 }, 3845 },
3846 { 3846 {
3847 .ctl_name = NET_IPV6_TEMP_VALID_LFT, 3847 .ctl_name = NET_IPV6_TEMP_VALID_LFT,
3848 .procname = "temp_valid_lft", 3848 .procname = "temp_valid_lft",
3849 .data = &ipv6_devconf.temp_valid_lft, 3849 .data = &ipv6_devconf.temp_valid_lft,
3850 .maxlen = sizeof(int), 3850 .maxlen = sizeof(int),
3851 .mode = 0644, 3851 .mode = 0644,
3852 .proc_handler = &proc_dointvec, 3852 .proc_handler = &proc_dointvec,
3853 }, 3853 },
3854 { 3854 {
3855 .ctl_name = NET_IPV6_TEMP_PREFERED_LFT, 3855 .ctl_name = NET_IPV6_TEMP_PREFERED_LFT,
3856 .procname = "temp_prefered_lft", 3856 .procname = "temp_prefered_lft",
3857 .data = &ipv6_devconf.temp_prefered_lft, 3857 .data = &ipv6_devconf.temp_prefered_lft,
3858 .maxlen = sizeof(int), 3858 .maxlen = sizeof(int),
3859 .mode = 0644, 3859 .mode = 0644,
3860 .proc_handler = &proc_dointvec, 3860 .proc_handler = &proc_dointvec,
3861 }, 3861 },
3862 { 3862 {
3863 .ctl_name = NET_IPV6_REGEN_MAX_RETRY, 3863 .ctl_name = NET_IPV6_REGEN_MAX_RETRY,
3864 .procname = "regen_max_retry", 3864 .procname = "regen_max_retry",
3865 .data = &ipv6_devconf.regen_max_retry, 3865 .data = &ipv6_devconf.regen_max_retry,
3866 .maxlen = sizeof(int), 3866 .maxlen = sizeof(int),
3867 .mode = 0644, 3867 .mode = 0644,
3868 .proc_handler = &proc_dointvec, 3868 .proc_handler = &proc_dointvec,
3869 }, 3869 },
3870 { 3870 {
3871 .ctl_name = NET_IPV6_MAX_DESYNC_FACTOR, 3871 .ctl_name = NET_IPV6_MAX_DESYNC_FACTOR,
3872 .procname = "max_desync_factor", 3872 .procname = "max_desync_factor",
3873 .data = &ipv6_devconf.max_desync_factor, 3873 .data = &ipv6_devconf.max_desync_factor,
3874 .maxlen = sizeof(int), 3874 .maxlen = sizeof(int),
3875 .mode = 0644, 3875 .mode = 0644,
3876 .proc_handler = &proc_dointvec, 3876 .proc_handler = &proc_dointvec,
3877 }, 3877 },
3878#endif 3878#endif
3879 { 3879 {
@@ -3887,18 +3887,18 @@ static struct addrconf_sysctl_table
3887 { 3887 {
3888 .ctl_name = NET_IPV6_ACCEPT_RA_DEFRTR, 3888 .ctl_name = NET_IPV6_ACCEPT_RA_DEFRTR,
3889 .procname = "accept_ra_defrtr", 3889 .procname = "accept_ra_defrtr",
3890 .data = &ipv6_devconf.accept_ra_defrtr, 3890 .data = &ipv6_devconf.accept_ra_defrtr,
3891 .maxlen = sizeof(int), 3891 .maxlen = sizeof(int),
3892 .mode = 0644, 3892 .mode = 0644,
3893 .proc_handler = &proc_dointvec, 3893 .proc_handler = &proc_dointvec,
3894 }, 3894 },
3895 { 3895 {
3896 .ctl_name = NET_IPV6_ACCEPT_RA_PINFO, 3896 .ctl_name = NET_IPV6_ACCEPT_RA_PINFO,
3897 .procname = "accept_ra_pinfo", 3897 .procname = "accept_ra_pinfo",
3898 .data = &ipv6_devconf.accept_ra_pinfo, 3898 .data = &ipv6_devconf.accept_ra_pinfo,
3899 .maxlen = sizeof(int), 3899 .maxlen = sizeof(int),
3900 .mode = 0644, 3900 .mode = 0644,
3901 .proc_handler = &proc_dointvec, 3901 .proc_handler = &proc_dointvec,
3902 }, 3902 },
3903#ifdef CONFIG_IPV6_ROUTER_PREF 3903#ifdef CONFIG_IPV6_ROUTER_PREF
3904 { 3904 {
@@ -4003,18 +4003,18 @@ static void addrconf_sysctl_register(struct inet6_dev *idev, struct ipv6_devconf
4003 t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ 4003 t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
4004 } 4004 }
4005 if (dev) { 4005 if (dev) {
4006 dev_name = dev->name; 4006 dev_name = dev->name;
4007 t->addrconf_dev[0].ctl_name = dev->ifindex; 4007 t->addrconf_dev[0].ctl_name = dev->ifindex;
4008 } else { 4008 } else {
4009 dev_name = "default"; 4009 dev_name = "default";
4010 t->addrconf_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT; 4010 t->addrconf_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT;
4011 } 4011 }
4012 4012
4013 /* 4013 /*
4014 * Make a copy of dev_name, because '.procname' is regarded as const 4014 * Make a copy of dev_name, because '.procname' is regarded as const
4015 * by sysctl and we wouldn't want anyone to change it under our feet 4015 * by sysctl and we wouldn't want anyone to change it under our feet
4016 * (see SIOCSIFNAME). 4016 * (see SIOCSIFNAME).
4017 */ 4017 */
4018 dev_name = kstrdup(dev_name, GFP_KERNEL); 4018 dev_name = kstrdup(dev_name, GFP_KERNEL);
4019 if (!dev_name) 4019 if (!dev_name)
4020 goto free; 4020 goto free;
@@ -4066,12 +4066,12 @@ static void addrconf_sysctl_unregister(struct ipv6_devconf *p)
4066 4066
4067int register_inet6addr_notifier(struct notifier_block *nb) 4067int register_inet6addr_notifier(struct notifier_block *nb)
4068{ 4068{
4069 return atomic_notifier_chain_register(&inet6addr_chain, nb); 4069 return atomic_notifier_chain_register(&inet6addr_chain, nb);
4070} 4070}
4071 4071
4072int unregister_inet6addr_notifier(struct notifier_block *nb) 4072int unregister_inet6addr_notifier(struct notifier_block *nb)
4073{ 4073{
4074 return atomic_notifier_chain_unregister(&inet6addr_chain,nb); 4074 return atomic_notifier_chain_unregister(&inet6addr_chain,nb);
4075} 4075}
4076 4076
4077/* 4077/*
@@ -4124,9 +4124,9 @@ int __init addrconf_init(void)
4124 4124
4125void __exit addrconf_cleanup(void) 4125void __exit addrconf_cleanup(void)
4126{ 4126{
4127 struct net_device *dev; 4127 struct net_device *dev;
4128 struct inet6_dev *idev; 4128 struct inet6_dev *idev;
4129 struct inet6_ifaddr *ifa; 4129 struct inet6_ifaddr *ifa;
4130 int i; 4130 int i;
4131 4131
4132 unregister_netdevice_notifier(&ipv6_dev_notf); 4132 unregister_netdevice_notifier(&ipv6_dev_notf);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0e0e4262f4dc..a006d242be76 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * PF_INET6 socket protocol family 2 * PF_INET6 socket protocol family
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * Adapted from linux/net/ipv4/af_inet.c 8 * Adapted from linux/net/ipv4/af_inet.c
9 * 9 *
@@ -191,7 +191,7 @@ lookup_protocol:
191 np->mc_loop = 1; 191 np->mc_loop = 1;
192 np->pmtudisc = IPV6_PMTUDISC_WANT; 192 np->pmtudisc = IPV6_PMTUDISC_WANT;
193 np->ipv6only = sysctl_ipv6_bindv6only; 193 np->ipv6only = sysctl_ipv6_bindv6only;
194 194
195 /* Init the ipv4 part of the socket since we can have sockets 195 /* Init the ipv4 part of the socket since we can have sockets
196 * using v6 API for ipv4. 196 * using v6 API for ipv4.
197 */ 197 */
@@ -206,7 +206,7 @@ lookup_protocol:
206 inet->pmtudisc = IP_PMTUDISC_DONT; 206 inet->pmtudisc = IP_PMTUDISC_DONT;
207 else 207 else
208 inet->pmtudisc = IP_PMTUDISC_WANT; 208 inet->pmtudisc = IP_PMTUDISC_WANT;
209 /* 209 /*
210 * Increment only the relevant sk_prot->socks debug field, this changes 210 * Increment only the relevant sk_prot->socks debug field, this changes
211 * the previous behaviour of incrementing both the equivalent to 211 * the previous behaviour of incrementing both the equivalent to
212 * answer->prot->socks (inet6_sock_nr) and inet_sock_nr. 212 * answer->prot->socks (inet6_sock_nr) and inet_sock_nr.
@@ -293,7 +293,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
293 */ 293 */
294 sk->sk_bound_dev_if = addr->sin6_scope_id; 294 sk->sk_bound_dev_if = addr->sin6_scope_id;
295 } 295 }
296 296
297 /* Binding to link-local address requires an interface */ 297 /* Binding to link-local address requires an interface */
298 if (!sk->sk_bound_dev_if) { 298 if (!sk->sk_bound_dev_if) {
299 err = -EINVAL; 299 err = -EINVAL;
@@ -327,7 +327,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
327 inet->saddr = v4addr; 327 inet->saddr = v4addr;
328 328
329 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); 329 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
330 330
331 if (!(addr_type & IPV6_ADDR_MULTICAST)) 331 if (!(addr_type & IPV6_ADDR_MULTICAST))
332 ipv6_addr_copy(&np->saddr, &addr->sin6_addr); 332 ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
333 333
@@ -393,7 +393,7 @@ EXPORT_SYMBOL_GPL(inet6_destroy_sock);
393/* 393/*
394 * This does both peername and sockname. 394 * This does both peername and sockname.
395 */ 395 */
396 396
397int inet6_getname(struct socket *sock, struct sockaddr *uaddr, 397int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
398 int *uaddr_len, int peer) 398 int *uaddr_len, int peer)
399{ 399{
@@ -401,7 +401,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
401 struct sock *sk = sock->sk; 401 struct sock *sk = sock->sk;
402 struct inet_sock *inet = inet_sk(sk); 402 struct inet_sock *inet = inet_sk(sk);
403 struct ipv6_pinfo *np = inet6_sk(sk); 403 struct ipv6_pinfo *np = inet6_sk(sk);
404 404
405 sin->sin6_family = AF_INET6; 405 sin->sin6_family = AF_INET6;
406 sin->sin6_flowinfo = 0; 406 sin->sin6_flowinfo = 0;
407 sin->sin6_scope_id = 0; 407 sin->sin6_scope_id = 0;
@@ -433,14 +433,14 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
433{ 433{
434 struct sock *sk = sock->sk; 434 struct sock *sk = sock->sk;
435 435
436 switch(cmd) 436 switch(cmd)
437 { 437 {
438 case SIOCGSTAMP: 438 case SIOCGSTAMP:
439 return sock_get_timestamp(sk, (struct timeval __user *)arg); 439 return sock_get_timestamp(sk, (struct timeval __user *)arg);
440 440
441 case SIOCADDRT: 441 case SIOCADDRT:
442 case SIOCDELRT: 442 case SIOCDELRT:
443 443
444 return(ipv6_route_ioctl(cmd,(void __user *)arg)); 444 return(ipv6_route_ioctl(cmd,(void __user *)arg));
445 445
446 case SIOCSIFADDR: 446 case SIOCSIFADDR:
@@ -584,7 +584,7 @@ inet6_register_protosw(struct inet_protosw *p)
584 /* Add the new entry after the last permanent entry if any, so that 584 /* Add the new entry after the last permanent entry if any, so that
585 * the new entry does not override a permanent entry when matched with 585 * the new entry does not override a permanent entry when matched with
586 * a wild-card protocol. But it is allowed to override any existing 586 * a wild-card protocol. But it is allowed to override any existing
587 * non-permanent entry. This means that when we remove this entry, the 587 * non-permanent entry. This means that when we remove this entry, the
588 * system automatically returns to the old behavior. 588 * system automatically returns to the old behavior.
589 */ 589 */
590 list_add_rcu(&p->list, last_perm); 590 list_add_rcu(&p->list, last_perm);
@@ -749,7 +749,7 @@ err_icmp_mib:
749 snmp6_mib_free((void **)ipv6_statistics); 749 snmp6_mib_free((void **)ipv6_statistics);
750err_ip_mib: 750err_ip_mib:
751 return -ENOMEM; 751 return -ENOMEM;
752 752
753} 753}
754 754
755static void cleanup_ipv6_mibs(void) 755static void cleanup_ipv6_mibs(void)
@@ -763,7 +763,7 @@ static void cleanup_ipv6_mibs(void)
763static int __init inet6_init(void) 763static int __init inet6_init(void)
764{ 764{
765 struct sk_buff *dummy_skb; 765 struct sk_buff *dummy_skb;
766 struct list_head *r; 766 struct list_head *r;
767 int err; 767 int err;
768 768
769 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); 769 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
@@ -814,7 +814,7 @@ static int __init inet6_init(void)
814 err = init_ipv6_mibs(); 814 err = init_ipv6_mibs();
815 if (err) 815 if (err)
816 goto out_unregister_sock; 816 goto out_unregister_sock;
817 817
818 /* 818 /*
819 * ipngwg API draft makes clear that the correct semantics 819 * ipngwg API draft makes clear that the correct semantics
820 * for TCP and UDP is to consider one TCP and UDP instance 820 * for TCP and UDP is to consider one TCP and UDP instance
@@ -933,11 +933,11 @@ static void __exit inet6_exit(void)
933#ifdef CONFIG_PROC_FS 933#ifdef CONFIG_PROC_FS
934 if6_proc_exit(); 934 if6_proc_exit();
935 ac6_proc_exit(); 935 ac6_proc_exit();
936 ipv6_misc_proc_exit(); 936 ipv6_misc_proc_exit();
937 udp6_proc_exit(); 937 udp6_proc_exit();
938 udplite6_proc_exit(); 938 udplite6_proc_exit();
939 tcp6_proc_exit(); 939 tcp6_proc_exit();
940 raw6_proc_exit(); 940 raw6_proc_exit();
941#endif 941#endif
942#ifdef CONFIG_IPV6_MIP6 942#ifdef CONFIG_IPV6_MIP6
943 mip6_fini(); 943 mip6_fini();
@@ -952,7 +952,7 @@ static void __exit inet6_exit(void)
952 ndisc_cleanup(); 952 ndisc_cleanup();
953 icmpv6_cleanup(); 953 icmpv6_cleanup();
954#ifdef CONFIG_SYSCTL 954#ifdef CONFIG_SYSCTL
955 ipv6_sysctl_unregister(); 955 ipv6_sysctl_unregister();
956#endif 956#endif
957 cleanup_ipv6_mibs(); 957 cleanup_ipv6_mibs();
958 proto_unregister(&rawv6_prot); 958 proto_unregister(&rawv6_prot);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 12c5a4dec09e..dc68b7269c3c 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -1,26 +1,26 @@
1/* 1/*
2 * Copyright (C)2002 USAGI/WIDE Project 2 * Copyright (C)2002 USAGI/WIDE Project
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * 17 *
18 * Authors 18 * Authors
19 * 19 *
20 * Mitsuru KANDA @USAGI : IPv6 Support 20 * Mitsuru KANDA @USAGI : IPv6 Support
21 * Kazunori MIYAZAWA @USAGI : 21 * Kazunori MIYAZAWA @USAGI :
22 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 22 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
23 * 23 *
24 * This file is derived from net/ipv4/ah.c. 24 * This file is derived from net/ipv4/ah.c.
25 */ 25 */
26 26
@@ -54,7 +54,7 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
54 optlen = 1; 54 optlen = 1;
55 break; 55 break;
56 default: 56 default:
57 if (len < 2) 57 if (len < 2)
58 goto bad; 58 goto bad;
59 optlen = opt[off+1]+2; 59 optlen = opt[off+1]+2;
60 if (len < optlen) 60 if (len < optlen)
@@ -152,7 +152,7 @@ static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
152 segments_left = rthdr->segments_left; 152 segments_left = rthdr->segments_left;
153 if (segments_left == 0) 153 if (segments_left == 0)
154 return; 154 return;
155 rthdr->segments_left = 0; 155 rthdr->segments_left = 0;
156 156
157 /* The value of rthdr->hdrlen has been verified either by the system 157 /* The value of rthdr->hdrlen has been verified either by the system
158 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming 158 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
@@ -278,7 +278,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
278 top_iph->hop_limit = 0; 278 top_iph->hop_limit = 0;
279 279
280 ahp = x->data; 280 ahp = x->data;
281 ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + 281 ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) +
282 ahp->icv_trunc_len) >> 2) - 2; 282 ahp->icv_trunc_len) >> 2) - 2;
283 283
284 ah->reserved = 0; 284 ah->reserved = 0;
@@ -319,7 +319,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
319 * Moving the pointer of skb->nh.raw by using skb_pull as long as AH 319 * Moving the pointer of skb->nh.raw by using skb_pull as long as AH
320 * header length. Then copy back the copy as long as hdr_len 320 * header length. Then copy back the copy as long as hdr_len
321 * If destination header following AH exists, copy it into after [Ext2]. 321 * If destination header following AH exists, copy it into after [Ext2].
322 * 322 *
323 * |<>|[IPv6][Ext1][Ext2][Dest][Payload] 323 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
324 * There is offset of AH before IPv6 header after the process. 324 * There is offset of AH before IPv6 header after the process.
325 */ 325 */
@@ -347,9 +347,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
347 nexthdr = ah->nexthdr; 347 nexthdr = ah->nexthdr;
348 ah_hlen = (ah->hdrlen + 2) << 2; 348 ah_hlen = (ah->hdrlen + 2) << 2;
349 349
350 if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) && 350 if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) &&
351 ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len)) 351 ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len))
352 goto out; 352 goto out;
353 353
354 if (!pskb_may_pull(skb, ah_hlen)) 354 if (!pskb_may_pull(skb, ah_hlen))
355 goto out; 355 goto out;
@@ -365,7 +365,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
365 skb->nh.ipv6h->flow_lbl[2] = 0; 365 skb->nh.ipv6h->flow_lbl[2] = 0;
366 skb->nh.ipv6h->hop_limit = 0; 366 skb->nh.ipv6h->hop_limit = 0;
367 367
368 { 368 {
369 u8 auth_data[MAX_AH_AUTH_LEN]; 369 u8 auth_data[MAX_AH_AUTH_LEN];
370 370
371 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 371 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
@@ -395,8 +395,8 @@ out:
395 return err; 395 return err;
396} 396}
397 397
398static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 398static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
399 int type, int code, int offset, __be32 info) 399 int type, int code, int offset, __be32 info)
400{ 400{
401 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 401 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
402 struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); 402 struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
@@ -445,7 +445,7 @@ static int ah6_init_state(struct xfrm_state *x)
445 ahp->tfm = tfm; 445 ahp->tfm = tfm;
446 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) 446 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
447 goto error; 447 goto error;
448 448
449 /* 449 /*
450 * Lookup the algorithm description maintained by xfrm_algo, 450 * Lookup the algorithm description maintained by xfrm_algo,
451 * verify crypto transform properties, and store information 451 * verify crypto transform properties, and store information
@@ -462,16 +462,16 @@ static int ah6_init_state(struct xfrm_state *x)
462 aalg_desc->uinfo.auth.icv_fullbits/8); 462 aalg_desc->uinfo.auth.icv_fullbits/8);
463 goto error; 463 goto error;
464 } 464 }
465 465
466 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 466 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
467 ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 467 ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
468 468
469 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); 469 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
470 470
471 ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); 471 ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
472 if (!ahp->work_icv) 472 if (!ahp->work_icv)
473 goto error; 473 goto error;
474 474
475 x->props.header_len = XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len); 475 x->props.header_len = XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len);
476 if (x->props.mode == XFRM_MODE_TUNNEL) 476 if (x->props.mode == XFRM_MODE_TUNNEL)
477 x->props.header_len += sizeof(struct ipv6hdr); 477 x->props.header_len += sizeof(struct ipv6hdr);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index a9604764e015..e2dc1c41bbf5 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Anycast support for IPv6 2 * Anycast support for IPv6
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * David L Stevens (dlstevens@us.ibm.com) 6 * David L Stevens (dlstevens@us.ibm.com)
@@ -397,7 +397,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
397 in6_dev_put(idev); 397 in6_dev_put(idev);
398 return ret; 398 return ret;
399} 399}
400 400
401/* 401/*
402 * check if the interface has this anycast address 402 * check if the interface has this anycast address
403 */ 403 */
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index ecde30140f4a..5f54dec3e205 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * common UDP/RAW code 2 * common UDP/RAW code
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: datagram.c,v 1.24 2002/02/01 22:01:04 davem Exp $ 8 * $Id: datagram.c,v 1.24 2002/02/01 22:01:04 davem Exp $
9 * 9 *
@@ -55,10 +55,10 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
55 } 55 }
56 56
57 if (addr_len < SIN6_LEN_RFC2133) 57 if (addr_len < SIN6_LEN_RFC2133)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 if (usin->sin6_family != AF_INET6) 60 if (usin->sin6_family != AF_INET6)
61 return -EAFNOSUPPORT; 61 return -EAFNOSUPPORT;
62 62
63 memset(&fl, 0, sizeof(fl)); 63 memset(&fl, 0, sizeof(fl));
64 if (np->sndflow) { 64 if (np->sndflow) {
@@ -93,14 +93,14 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
93 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 93 sin.sin_addr.s_addr = daddr->s6_addr32[3];
94 sin.sin_port = usin->sin6_port; 94 sin.sin_port = usin->sin6_port;
95 95
96 err = ip4_datagram_connect(sk, 96 err = ip4_datagram_connect(sk,
97 (struct sockaddr*) &sin, 97 (struct sockaddr*) &sin,
98 sizeof(sin)); 98 sizeof(sin));
99 99
100ipv4_connected: 100ipv4_connected:
101 if (err) 101 if (err)
102 goto out; 102 goto out;
103 103
104 ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr); 104 ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
105 105
106 if (ipv6_addr_any(&np->saddr)) { 106 if (ipv6_addr_any(&np->saddr)) {
@@ -206,7 +206,7 @@ out:
206 return err; 206 return err;
207} 207}
208 208
209void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 209void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
210 __be16 port, u32 info, u8 *payload) 210 __be16 port, u32 info, u8 *payload)
211{ 211{
212 struct ipv6_pinfo *np = inet6_sk(sk); 212 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -223,7 +223,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
223 serr = SKB_EXT_ERR(skb); 223 serr = SKB_EXT_ERR(skb);
224 serr->ee.ee_errno = err; 224 serr->ee.ee_errno = err;
225 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6; 225 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP6;
226 serr->ee.ee_type = icmph->icmp6_type; 226 serr->ee.ee_type = icmph->icmp6_type;
227 serr->ee.ee_code = icmph->icmp6_code; 227 serr->ee.ee_code = icmph->icmp6_code;
228 serr->ee.ee_pad = 0; 228 serr->ee.ee_pad = 0;
229 serr->ee.ee_info = info; 229 serr->ee.ee_info = info;
@@ -259,7 +259,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
259 serr = SKB_EXT_ERR(skb); 259 serr = SKB_EXT_ERR(skb);
260 serr->ee.ee_errno = err; 260 serr->ee.ee_errno = err;
261 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 261 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
262 serr->ee.ee_type = 0; 262 serr->ee.ee_type = 0;
263 serr->ee.ee_code = 0; 263 serr->ee.ee_code = 0;
264 serr->ee.ee_pad = 0; 264 serr->ee.ee_pad = 0;
265 serr->ee.ee_info = info; 265 serr->ee.ee_info = info;
@@ -274,7 +274,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info)
274 kfree_skb(skb); 274 kfree_skb(skb);
275} 275}
276 276
277/* 277/*
278 * Handle MSG_ERRQUEUE 278 * Handle MSG_ERRQUEUE
279 */ 279 */
280int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 280int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
@@ -312,7 +312,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
312 if (sin) { 312 if (sin) {
313 sin->sin6_family = AF_INET6; 313 sin->sin6_family = AF_INET6;
314 sin->sin6_flowinfo = 0; 314 sin->sin6_flowinfo = 0;
315 sin->sin6_port = serr->port; 315 sin->sin6_port = serr->port;
316 sin->sin6_scope_id = 0; 316 sin->sin6_scope_id = 0;
317 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { 317 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
318 ipv6_addr_copy(&sin->sin6_addr, 318 ipv6_addr_copy(&sin->sin6_addr,
@@ -370,7 +370,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
370 spin_unlock_bh(&sk->sk_error_queue.lock); 370 spin_unlock_bh(&sk->sk_error_queue.lock);
371 } 371 }
372 372
373out_free_skb: 373out_free_skb:
374 kfree_skb(skb); 374 kfree_skb(skb);
375out: 375out:
376 return err; 376 return err;
@@ -419,7 +419,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
419 * report extension headers (except for HbH) 419 * report extension headers (except for HbH)
420 * in order. 420 * in order.
421 * 421 *
422 * Also note that IPV6_RECVRTHDRDSTOPTS is NOT 422 * Also note that IPV6_RECVRTHDRDSTOPTS is NOT
423 * (and WILL NOT be) defined because 423 * (and WILL NOT be) defined because
424 * IPV6_RECVDSTOPTS is more generic. --yoshfuji 424 * IPV6_RECVDSTOPTS is more generic. --yoshfuji
425 */ 425 */
@@ -512,15 +512,15 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
512 continue; 512 continue;
513 513
514 switch (cmsg->cmsg_type) { 514 switch (cmsg->cmsg_type) {
515 case IPV6_PKTINFO: 515 case IPV6_PKTINFO:
516 case IPV6_2292PKTINFO: 516 case IPV6_2292PKTINFO:
517 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) { 517 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) {
518 err = -EINVAL; 518 err = -EINVAL;
519 goto exit_f; 519 goto exit_f;
520 } 520 }
521 521
522 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); 522 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
523 523
524 if (src_info->ipi6_ifindex) { 524 if (src_info->ipi6_ifindex) {
525 if (fl->oif && src_info->ipi6_ifindex != fl->oif) 525 if (fl->oif && src_info->ipi6_ifindex != fl->oif)
526 return -EINVAL; 526 return -EINVAL;
@@ -531,7 +531,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
531 531
532 if (addr_type == IPV6_ADDR_ANY) 532 if (addr_type == IPV6_ADDR_ANY)
533 break; 533 break;
534 534
535 if (addr_type & IPV6_ADDR_LINKLOCAL) { 535 if (addr_type & IPV6_ADDR_LINKLOCAL) {
536 if (!src_info->ipi6_ifindex) 536 if (!src_info->ipi6_ifindex)
537 return -EINVAL; 537 return -EINVAL;
@@ -554,7 +554,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
554 break; 554 break;
555 555
556 case IPV6_FLOWINFO: 556 case IPV6_FLOWINFO:
557 if (cmsg->cmsg_len < CMSG_LEN(4)) { 557 if (cmsg->cmsg_len < CMSG_LEN(4)) {
558 err = -EINVAL; 558 err = -EINVAL;
559 goto exit_f; 559 goto exit_f;
560 } 560 }
@@ -570,7 +570,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
570 570
571 case IPV6_2292HOPOPTS: 571 case IPV6_2292HOPOPTS:
572 case IPV6_HOPOPTS: 572 case IPV6_HOPOPTS:
573 if (opt->hopopt || cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) { 573 if (opt->hopopt || cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
574 err = -EINVAL; 574 err = -EINVAL;
575 goto exit_f; 575 goto exit_f;
576 } 576 }
@@ -590,7 +590,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
590 break; 590 break;
591 591
592 case IPV6_2292DSTOPTS: 592 case IPV6_2292DSTOPTS:
593 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) { 593 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
594 err = -EINVAL; 594 err = -EINVAL;
595 goto exit_f; 595 goto exit_f;
596 } 596 }
@@ -641,7 +641,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
641 641
642 case IPV6_2292RTHDR: 642 case IPV6_2292RTHDR:
643 case IPV6_RTHDR: 643 case IPV6_RTHDR:
644 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_rt_hdr))) { 644 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_rt_hdr))) {
645 err = -EINVAL; 645 err = -EINVAL;
646 goto exit_f; 646 goto exit_f;
647 } 647 }
@@ -661,7 +661,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
661 661
662 len = ((rthdr->hdrlen + 1) << 3); 662 len = ((rthdr->hdrlen + 1) << 3);
663 663
664 if (cmsg->cmsg_len < CMSG_LEN(len)) { 664 if (cmsg->cmsg_len < CMSG_LEN(len)) {
665 err = -EINVAL; 665 err = -EINVAL;
666 goto exit_f; 666 goto exit_f;
667 } 667 }
@@ -716,7 +716,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
716 } 716 }
717 default: 717 default:
718 LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n", 718 LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n",
719 cmsg->cmsg_type); 719 cmsg->cmsg_type);
720 err = -EINVAL; 720 err = -EINVAL;
721 break; 721 break;
722 }; 722 };
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 25dcf69cd807..363e63ffecca 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -1,26 +1,26 @@
1/* 1/*
2 * Copyright (C)2002 USAGI/WIDE Project 2 * Copyright (C)2002 USAGI/WIDE Project
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * 17 *
18 * Authors 18 * Authors
19 * 19 *
20 * Mitsuru KANDA @USAGI : IPv6 Support 20 * Mitsuru KANDA @USAGI : IPv6 Support
21 * Kazunori MIYAZAWA @USAGI : 21 * Kazunori MIYAZAWA @USAGI :
22 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 22 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
23 * 23 *
24 * This file is derived from net/ipv4/esp.c 24 * This file is derived from net/ipv4/esp.c
25 */ 25 */
26 26
@@ -166,7 +166,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
166 } 166 }
167 167
168 /* If integrity check is required, do this. */ 168 /* If integrity check is required, do this. */
169 if (esp->auth.icv_full_len) { 169 if (esp->auth.icv_full_len) {
170 u8 sum[alen]; 170 u8 sum[alen];
171 171
172 ret = esp_mac_digest(esp, skb, 0, skb->len - alen); 172 ret = esp_mac_digest(esp, skb, 0, skb->len - alen);
@@ -197,7 +197,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
197 if (esp->conf.ivlen) 197 if (esp->conf.ivlen)
198 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); 198 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
199 199
200 { 200 {
201 u8 nexthdr[2]; 201 u8 nexthdr[2];
202 struct scatterlist *sg = &esp->sgbuf[0]; 202 struct scatterlist *sg = &esp->sgbuf[0];
203 u8 padlen; 203 u8 padlen;
@@ -225,7 +225,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
225 ret = -EINVAL; 225 ret = -EINVAL;
226 goto out; 226 goto out;
227 } 227 }
228 /* ... check padding bits here. Silly. :-) */ 228 /* ... check padding bits here. Silly. :-) */
229 229
230 pskb_trim(skb, skb->len - alen - padlen - 2); 230 pskb_trim(skb, skb->len - alen - padlen - 2);
231 ret = nexthdr[1]; 231 ret = nexthdr[1];
@@ -256,20 +256,20 @@ static u32 esp6_get_max_size(struct xfrm_state *x, int mtu)
256} 256}
257 257
258static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 258static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
259 int type, int code, int offset, __be32 info) 259 int type, int code, int offset, __be32 info)
260{ 260{
261 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 261 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
262 struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset); 262 struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset);
263 struct xfrm_state *x; 263 struct xfrm_state *x;
264 264
265 if (type != ICMPV6_DEST_UNREACH && 265 if (type != ICMPV6_DEST_UNREACH &&
266 type != ICMPV6_PKT_TOOBIG) 266 type != ICMPV6_PKT_TOOBIG)
267 return; 267 return;
268 268
269 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); 269 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
270 if (!x) 270 if (!x)
271 return; 271 return;
272 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/" NIP6_FMT "\n", 272 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/" NIP6_FMT "\n",
273 ntohl(esph->spi), NIP6(iph->daddr)); 273 ntohl(esph->spi), NIP6(iph->daddr));
274 xfrm_state_put(x); 274 xfrm_state_put(x);
275} 275}
@@ -326,10 +326,10 @@ static int esp6_init_state(struct xfrm_state *x)
326 esp->auth.tfm = hash; 326 esp->auth.tfm = hash;
327 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) 327 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
328 goto error; 328 goto error;
329 329
330 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 330 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
331 BUG_ON(!aalg_desc); 331 BUG_ON(!aalg_desc);
332 332
333 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 333 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
334 crypto_hash_digestsize(hash)) { 334 crypto_hash_digestsize(hash)) {
335 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 335 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
@@ -338,10 +338,10 @@ static int esp6_init_state(struct xfrm_state *x)
338 aalg_desc->uinfo.auth.icv_fullbits/8); 338 aalg_desc->uinfo.auth.icv_fullbits/8);
339 goto error; 339 goto error;
340 } 340 }
341 341
342 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 342 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
343 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 343 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
344 344
345 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); 345 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
346 if (!esp->auth.work_icv) 346 if (!esp->auth.work_icv)
347 goto error; 347 goto error;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 0711f92d6a12..08313efc48c8 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -16,7 +16,7 @@
16 */ 16 */
17 17
18/* Changes: 18/* Changes:
19 * yoshfuji : ensure not to overrun while parsing 19 * yoshfuji : ensure not to overrun while parsing
20 * tlv options. 20 * tlv options.
21 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs(). 21 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
22 * YOSHIFUJI Hideaki @USAGI Register inbound extension header 22 * YOSHIFUJI Hideaki @USAGI Register inbound extension header
@@ -167,8 +167,8 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp)
167 goto bad; 167 goto bad;
168 for (curr=procs; curr->type >= 0; curr++) { 168 for (curr=procs; curr->type >= 0; curr++) {
169 if (curr->type == skb->nh.raw[off]) { 169 if (curr->type == skb->nh.raw[off]) {
170 /* type specific length/alignment 170 /* type specific length/alignment
171 checks will be performed in the 171 checks will be performed in the
172 func(). */ 172 func(). */
173 if (curr->func(skbp, off) == 0) 173 if (curr->func(skbp, off) == 0)
174 return 0; 174 return 0;
@@ -572,7 +572,7 @@ void __init ipv6_rthdr_init(void)
572 For now we need to test the engine, so that I created 572 For now we need to test the engine, so that I created
573 temporary (or permanent) backdoor. 573 temporary (or permanent) backdoor.
574 If listening socket set IPV6_RTHDR to 2, then we invert header. 574 If listening socket set IPV6_RTHDR to 2, then we invert header.
575 --ANK (980729) 575 --ANK (980729)
576 */ 576 */
577 577
578struct ipv6_txoptions * 578struct ipv6_txoptions *
@@ -635,7 +635,7 @@ static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
635 return 1; 635 return 1;
636 } 636 }
637 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", 637 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
638 skb->nh.raw[optoff+1]); 638 skb->nh.raw[optoff+1]);
639 kfree_skb(skb); 639 kfree_skb(skb);
640 return 0; 640 return 0;
641} 641}
@@ -649,7 +649,7 @@ static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff)
649 649
650 if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) { 650 if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
651 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", 651 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
652 skb->nh.raw[optoff+1]); 652 skb->nh.raw[optoff+1]);
653 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 653 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
654 IPSTATS_MIB_INHDRERRORS); 654 IPSTATS_MIB_INHDRERRORS);
655 goto drop; 655 goto drop;
@@ -740,7 +740,7 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
740 int hops; 740 int hops;
741 741
742 ihdr = (struct rt0_hdr *) opt; 742 ihdr = (struct rt0_hdr *) opt;
743 743
744 phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3); 744 phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
745 memcpy(phdr, ihdr, sizeof(struct rt0_hdr)); 745 memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
746 746
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 21cbbbddaf4d..e1caa5d526c2 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -4,13 +4,13 @@
4 */ 4 */
5#include <net/ipv6.h> 5#include <net/ipv6.h>
6 6
7/* 7/*
8 * find out if nexthdr is a well-known extension header or a protocol 8 * find out if nexthdr is a well-known extension header or a protocol
9 */ 9 */
10 10
11int ipv6_ext_hdr(u8 nexthdr) 11int ipv6_ext_hdr(u8 nexthdr)
12{ 12{
13 /* 13 /*
14 * find out if nexthdr is an extension header or a protocol 14 * find out if nexthdr is an extension header or a protocol
15 */ 15 */
16 return ( (nexthdr == NEXTHDR_HOP) || 16 return ( (nexthdr == NEXTHDR_HOP) ||
@@ -25,18 +25,18 @@ int ipv6_ext_hdr(u8 nexthdr)
25 * Skip any extension headers. This is used by the ICMP module. 25 * Skip any extension headers. This is used by the ICMP module.
26 * 26 *
27 * Note that strictly speaking this conflicts with RFC 2460 4.0: 27 * Note that strictly speaking this conflicts with RFC 2460 4.0:
28 * ...The contents and semantics of each extension header determine whether 28 * ...The contents and semantics of each extension header determine whether
29 * or not to proceed to the next header. Therefore, extension headers must 29 * or not to proceed to the next header. Therefore, extension headers must
30 * be processed strictly in the order they appear in the packet; a 30 * be processed strictly in the order they appear in the packet; a
31 * receiver must not, for example, scan through a packet looking for a 31 * receiver must not, for example, scan through a packet looking for a
32 * particular kind of extension header and process that header prior to 32 * particular kind of extension header and process that header prior to
33 * processing all preceding ones. 33 * processing all preceding ones.
34 * 34 *
35 * We do exactly this. This is a protocol bug. We can't decide after a 35 * We do exactly this. This is a protocol bug. We can't decide after a
36 * seeing an unknown discard-with-error flavour TLV option if it's a 36 * seeing an unknown discard-with-error flavour TLV option if it's a
37 * ICMP error message or not (errors should never be send in reply to 37 * ICMP error message or not (errors should never be send in reply to
38 * ICMP error messages). 38 * ICMP error messages).
39 * 39 *
40 * But I see no other way to do this. This might need to be reexamined 40 * But I see no other way to do this. This might need to be reexamined
41 * when Linux implements ESP (and maybe AUTH) headers. 41 * when Linux implements ESP (and maybe AUTH) headers.
42 * --AK 42 * --AK
@@ -90,9 +90,9 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
90 break; 90 break;
91 hdrlen = 8; 91 hdrlen = 8;
92 } else if (nexthdr == NEXTHDR_AUTH) 92 } else if (nexthdr == NEXTHDR_AUTH)
93 hdrlen = (hp->hdrlen+2)<<2; 93 hdrlen = (hp->hdrlen+2)<<2;
94 else 94 else
95 hdrlen = ipv6_optlen(hp); 95 hdrlen = ipv6_optlen(hp);
96 96
97 nexthdr = hp->nexthdr; 97 nexthdr = hp->nexthdr;
98 start += hdrlen; 98 start += hdrlen;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 3dcc4b7f41b4..9377fea02682 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -107,7 +107,7 @@ static __inline__ void icmpv6_xmit_unlock(void)
107 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock); 107 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
108} 108}
109 109
110/* 110/*
111 * Slightly more convenient version of icmpv6_send. 111 * Slightly more convenient version of icmpv6_send.
112 */ 112 */
113void icmpv6_param_prob(struct sk_buff *skb, int code, int pos) 113void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
@@ -153,8 +153,8 @@ static int is_ineligible(struct sk_buff *skb)
153 153
154static int sysctl_icmpv6_time __read_mostly = 1*HZ; 154static int sysctl_icmpv6_time __read_mostly = 1*HZ;
155 155
156/* 156/*
157 * Check the ICMP output rate limit 157 * Check the ICMP output rate limit
158 */ 158 */
159static inline int icmpv6_xrlim_allow(struct sock *sk, int type, 159static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
160 struct flowi *fl) 160 struct flowi *fl)
@@ -170,7 +170,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
170 if (type == ICMPV6_PKT_TOOBIG) 170 if (type == ICMPV6_PKT_TOOBIG)
171 return 1; 171 return 1;
172 172
173 /* 173 /*
174 * Look up the output route. 174 * Look up the output route.
175 * XXX: perhaps the expire for routing entries cloned by 175 * XXX: perhaps the expire for routing entries cloned by
176 * this lookup should be more aggressive (not longer than timeout). 176 * this lookup should be more aggressive (not longer than timeout).
@@ -198,7 +198,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
198/* 198/*
199 * an inline helper for the "simple" if statement below 199 * an inline helper for the "simple" if statement below
200 * checks if parameter problem report is caused by an 200 * checks if parameter problem report is caused by an
201 * unrecognized IPv6 option that has the Option Type 201 * unrecognized IPv6 option that has the Option Type
202 * highest-order two bits set to 10 202 * highest-order two bits set to 10
203 */ 203 */
204 204
@@ -298,7 +298,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
298/* 298/*
299 * Send an ICMP message in response to a packet in error 299 * Send an ICMP message in response to a packet in error
300 */ 300 */
301void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, 301void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
302 struct net_device *dev) 302 struct net_device *dev)
303{ 303{
304 struct inet6_dev *idev = NULL; 304 struct inet6_dev *idev = NULL;
@@ -320,7 +320,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
320 return; 320 return;
321 321
322 /* 322 /*
323 * Make sure we respect the rules 323 * Make sure we respect the rules
324 * i.e. RFC 1885 2.4(e) 324 * i.e. RFC 1885 2.4(e)
325 * Rule (e.1) is enforced by not using icmpv6_send 325 * Rule (e.1) is enforced by not using icmpv6_send
326 * in any code that processes icmp errors. 326 * in any code that processes icmp errors.
@@ -336,8 +336,8 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
336 336
337 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) { 337 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
338 if (type != ICMPV6_PKT_TOOBIG && 338 if (type != ICMPV6_PKT_TOOBIG &&
339 !(type == ICMPV6_PARAMPROB && 339 !(type == ICMPV6_PARAMPROB &&
340 code == ICMPV6_UNK_OPTION && 340 code == ICMPV6_UNK_OPTION &&
341 (opt_unrec(skb, info)))) 341 (opt_unrec(skb, info))))
342 return; 342 return;
343 343
@@ -364,7 +364,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
364 return; 364 return;
365 } 365 }
366 366
367 /* 367 /*
368 * Never answer to a ICMP packet. 368 * Never answer to a ICMP packet.
369 */ 369 */
370 if (is_ineligible(skb)) { 370 if (is_ineligible(skb)) {
@@ -543,14 +543,14 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
543 } 543 }
544 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr)); 544 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
545 545
546 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES); 546 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
547 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); 547 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
548 548
549out_put: 549out_put:
550 if (likely(idev != NULL)) 550 if (likely(idev != NULL))
551 in6_dev_put(idev); 551 in6_dev_put(idev);
552 dst_release(dst); 552 dst_release(dst);
553out: 553out:
554 icmpv6_xmit_unlock(); 554 icmpv6_xmit_unlock();
555} 555}
556 556
@@ -608,7 +608,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
608 } 608 }
609 read_unlock(&raw_v6_lock); 609 read_unlock(&raw_v6_lock);
610} 610}
611 611
612/* 612/*
613 * Handle icmp messages 613 * Handle icmp messages
614 */ 614 */
@@ -722,9 +722,9 @@ static int icmpv6_rcv(struct sk_buff **pskb)
722 if (type & ICMPV6_INFOMSG_MASK) 722 if (type & ICMPV6_INFOMSG_MASK)
723 break; 723 break;
724 724
725 /* 725 /*
726 * error of unknown type. 726 * error of unknown type.
727 * must pass to upper level 727 * must pass to upper level
728 */ 728 */
729 729
730 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); 730 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
@@ -852,7 +852,7 @@ int icmpv6_err_convert(int type, int code, int *err)
852 case ICMPV6_PKT_TOOBIG: 852 case ICMPV6_PKT_TOOBIG:
853 *err = EMSGSIZE; 853 *err = EMSGSIZE;
854 break; 854 break;
855 855
856 case ICMPV6_PARAMPROB: 856 case ICMPV6_PARAMPROB:
857 *err = EPROTO; 857 *err = EPROTO;
858 fatal = 1; 858 fatal = 1;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index e61116949bee..30b16da739c2 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -115,7 +115,7 @@ struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo,
115 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) { 115 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) {
116 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { 116 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
117 const struct ipv6_pinfo *np = inet6_sk(sk); 117 const struct ipv6_pinfo *np = inet6_sk(sk);
118 118
119 score = 1; 119 score = 1;
120 if (!ipv6_addr_any(&np->rcv_saddr)) { 120 if (!ipv6_addr_any(&np->rcv_saddr)) {
121 if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) 121 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
@@ -249,81 +249,81 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
249{ 249{
250 struct inet_hashinfo *hinfo = death_row->hashinfo; 250 struct inet_hashinfo *hinfo = death_row->hashinfo;
251 const unsigned short snum = inet_sk(sk)->num; 251 const unsigned short snum = inet_sk(sk)->num;
252 struct inet_bind_hashbucket *head; 252 struct inet_bind_hashbucket *head;
253 struct inet_bind_bucket *tb; 253 struct inet_bind_bucket *tb;
254 int ret; 254 int ret;
255 255
256 if (snum == 0) { 256 if (snum == 0) {
257 const int low = sysctl_local_port_range[0]; 257 const int low = sysctl_local_port_range[0];
258 const int high = sysctl_local_port_range[1]; 258 const int high = sysctl_local_port_range[1];
259 const int range = high - low; 259 const int range = high - low;
260 int i, port; 260 int i, port;
261 static u32 hint; 261 static u32 hint;
262 const u32 offset = hint + inet6_sk_port_offset(sk); 262 const u32 offset = hint + inet6_sk_port_offset(sk);
263 struct hlist_node *node; 263 struct hlist_node *node;
264 struct inet_timewait_sock *tw = NULL; 264 struct inet_timewait_sock *tw = NULL;
265 265
266 local_bh_disable(); 266 local_bh_disable();
267 for (i = 1; i <= range; i++) { 267 for (i = 1; i <= range; i++) {
268 port = low + (i + offset) % range; 268 port = low + (i + offset) % range;
269 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; 269 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
270 spin_lock(&head->lock); 270 spin_lock(&head->lock);
271 271
272 /* Does not bother with rcv_saddr checks, 272 /* Does not bother with rcv_saddr checks,
273 * because the established check is already 273 * because the established check is already
274 * unique enough. 274 * unique enough.
275 */ 275 */
276 inet_bind_bucket_for_each(tb, node, &head->chain) { 276 inet_bind_bucket_for_each(tb, node, &head->chain) {
277 if (tb->port == port) { 277 if (tb->port == port) {
278 BUG_TRAP(!hlist_empty(&tb->owners)); 278 BUG_TRAP(!hlist_empty(&tb->owners));
279 if (tb->fastreuse >= 0) 279 if (tb->fastreuse >= 0)
280 goto next_port; 280 goto next_port;
281 if (!__inet6_check_established(death_row, 281 if (!__inet6_check_established(death_row,
282 sk, port, 282 sk, port,
283 &tw)) 283 &tw))
284 goto ok; 284 goto ok;
285 goto next_port; 285 goto next_port;
286 } 286 }
287 } 287 }
288 288
289 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, 289 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
290 head, port); 290 head, port);
291 if (!tb) { 291 if (!tb) {
292 spin_unlock(&head->lock); 292 spin_unlock(&head->lock);
293 break; 293 break;
294 } 294 }
295 tb->fastreuse = -1; 295 tb->fastreuse = -1;
296 goto ok; 296 goto ok;
297 297
298 next_port: 298 next_port:
299 spin_unlock(&head->lock); 299 spin_unlock(&head->lock);
300 } 300 }
301 local_bh_enable(); 301 local_bh_enable();
302 302
303 return -EADDRNOTAVAIL; 303 return -EADDRNOTAVAIL;
304 304
305ok: 305ok:
306 hint += i; 306 hint += i;
307 307
308 /* Head lock still held and bh's disabled */ 308 /* Head lock still held and bh's disabled */
309 inet_bind_hash(sk, tb, port); 309 inet_bind_hash(sk, tb, port);
310 if (sk_unhashed(sk)) { 310 if (sk_unhashed(sk)) {
311 inet_sk(sk)->sport = htons(port); 311 inet_sk(sk)->sport = htons(port);
312 __inet6_hash(hinfo, sk); 312 __inet6_hash(hinfo, sk);
313 } 313 }
314 spin_unlock(&head->lock); 314 spin_unlock(&head->lock);
315 315
316 if (tw) { 316 if (tw) {
317 inet_twsk_deschedule(tw, death_row); 317 inet_twsk_deschedule(tw, death_row);
318 inet_twsk_put(tw); 318 inet_twsk_put(tw);
319 } 319 }
320 320
321 ret = 0; 321 ret = 0;
322 goto out; 322 goto out;
323 } 323 }
324 324
325 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; 325 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
326 tb = inet_csk(sk)->icsk_bind_hash; 326 tb = inet_csk(sk)->icsk_bind_hash;
327 spin_lock_bh(&head->lock); 327 spin_lock_bh(&head->lock);
328 328
329 if (sk_head(&tb->owners) == sk && sk->sk_bind_node.next == NULL) { 329 if (sk_head(&tb->owners) == sk && sk->sk_bind_node.next == NULL) {
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 827f8842b578..f4d7be77eb0f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * Linux INET6 implementation 2 * Linux INET6 implementation
3 * Forwarding Information Database 3 * Forwarding Information Database
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $ 8 * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $
9 * 9 *
@@ -97,7 +97,7 @@ static DEFINE_TIMER(ip6_fib_timer, fib6_run_gc, 0, 0);
97 97
98static struct fib6_walker_t fib6_walker_list = { 98static struct fib6_walker_t fib6_walker_list = {
99 .prev = &fib6_walker_list, 99 .prev = &fib6_walker_list,
100 .next = &fib6_walker_list, 100 .next = &fib6_walker_list,
101}; 101};
102 102
103#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next) 103#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
@@ -131,7 +131,7 @@ static __inline__ u32 fib6_new_sernum(void)
131/* 131/*
132 * Auxiliary address test functions for the radix tree. 132 * Auxiliary address test functions for the radix tree.
133 * 133 *
134 * These assume a 32bit processor (although it will work on 134 * These assume a 32bit processor (although it will work on
135 * 64bit processors) 135 * 64bit processors)
136 */ 136 */
137 137
@@ -297,7 +297,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
297 int res; 297 int res;
298 struct rt6_info *rt; 298 struct rt6_info *rt;
299 299
300 for (rt = w->leaf; rt; rt = rt->u.next) { 300 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
301 res = rt6_dump_route(rt, w->args); 301 res = rt6_dump_route(rt, w->args);
302 if (res < 0) { 302 if (res < 0) {
303 /* Frame is full, suspend walking */ 303 /* Frame is full, suspend walking */
@@ -433,7 +433,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
433 struct fib6_node *pn = NULL; 433 struct fib6_node *pn = NULL;
434 struct rt6key *key; 434 struct rt6key *key;
435 int bit; 435 int bit;
436 __be32 dir = 0; 436 __be32 dir = 0;
437 __u32 sernum = fib6_new_sernum(); 437 __u32 sernum = fib6_new_sernum();
438 438
439 RT6_TRACE("fib6_add_1\n"); 439 RT6_TRACE("fib6_add_1\n");
@@ -451,27 +451,27 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
451 if (plen < fn->fn_bit || 451 if (plen < fn->fn_bit ||
452 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) 452 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
453 goto insert_above; 453 goto insert_above;
454 454
455 /* 455 /*
456 * Exact match ? 456 * Exact match ?
457 */ 457 */
458 458
459 if (plen == fn->fn_bit) { 459 if (plen == fn->fn_bit) {
460 /* clean up an intermediate node */ 460 /* clean up an intermediate node */
461 if ((fn->fn_flags & RTN_RTINFO) == 0) { 461 if ((fn->fn_flags & RTN_RTINFO) == 0) {
462 rt6_release(fn->leaf); 462 rt6_release(fn->leaf);
463 fn->leaf = NULL; 463 fn->leaf = NULL;
464 } 464 }
465 465
466 fn->fn_sernum = sernum; 466 fn->fn_sernum = sernum;
467 467
468 return fn; 468 return fn;
469 } 469 }
470 470
471 /* 471 /*
472 * We have more bits to go 472 * We have more bits to go
473 */ 473 */
474 474
475 /* Try to walk down on tree. */ 475 /* Try to walk down on tree. */
476 fn->fn_sernum = sernum; 476 fn->fn_sernum = sernum;
477 dir = addr_bit_set(addr, fn->fn_bit); 477 dir = addr_bit_set(addr, fn->fn_bit);
@@ -489,7 +489,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
489 if (ln == NULL) 489 if (ln == NULL)
490 return NULL; 490 return NULL;
491 ln->fn_bit = plen; 491 ln->fn_bit = plen;
492 492
493 ln->parent = pn; 493 ln->parent = pn;
494 ln->fn_sernum = sernum; 494 ln->fn_sernum = sernum;
495 495
@@ -503,7 +503,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
503 503
504insert_above: 504insert_above:
505 /* 505 /*
506 * split since we don't have a common prefix anymore or 506 * split since we don't have a common prefix anymore or
507 * we have a less significant route. 507 * we have a less significant route.
508 * we've to insert an intermediate node on the list 508 * we've to insert an intermediate node on the list
509 * this new node will point to the one we need to create 509 * this new node will point to the one we need to create
@@ -517,18 +517,18 @@ insert_above:
517 See comment in __ipv6_addr_diff: bit may be an invalid value, 517 See comment in __ipv6_addr_diff: bit may be an invalid value,
518 but if it is >= plen, the value is ignored in any case. 518 but if it is >= plen, the value is ignored in any case.
519 */ 519 */
520 520
521 bit = __ipv6_addr_diff(addr, &key->addr, addrlen); 521 bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
522 522
523 /* 523 /*
524 * (intermediate)[in] 524 * (intermediate)[in]
525 * / \ 525 * / \
526 * (new leaf node)[ln] (old node)[fn] 526 * (new leaf node)[ln] (old node)[fn]
527 */ 527 */
528 if (plen > bit) { 528 if (plen > bit) {
529 in = node_alloc(); 529 in = node_alloc();
530 ln = node_alloc(); 530 ln = node_alloc();
531 531
532 if (in == NULL || ln == NULL) { 532 if (in == NULL || ln == NULL) {
533 if (in) 533 if (in)
534 node_free(in); 534 node_free(in);
@@ -537,8 +537,8 @@ insert_above:
537 return NULL; 537 return NULL;
538 } 538 }
539 539
540 /* 540 /*
541 * new intermediate node. 541 * new intermediate node.
542 * RTN_RTINFO will 542 * RTN_RTINFO will
543 * be off since that an address that chooses one of 543 * be off since that an address that chooses one of
544 * the branches would not match less specific routes 544 * the branches would not match less specific routes
@@ -575,7 +575,7 @@ insert_above:
575 } 575 }
576 } else { /* plen <= bit */ 576 } else { /* plen <= bit */
577 577
578 /* 578 /*
579 * (new leaf node)[ln] 579 * (new leaf node)[ln]
580 * / \ 580 * / \
581 * (old node)[fn] NULL 581 * (old node)[fn] NULL
@@ -591,7 +591,7 @@ insert_above:
591 ln->parent = pn; 591 ln->parent = pn;
592 592
593 ln->fn_sernum = sernum; 593 ln->fn_sernum = sernum;
594 594
595 if (dir) 595 if (dir)
596 pn->right = ln; 596 pn->right = ln;
597 else 597 else
@@ -623,11 +623,11 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
623 fn->leaf == &ip6_null_entry && 623 fn->leaf == &ip6_null_entry &&
624 !(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){ 624 !(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){
625 fn->leaf = rt; 625 fn->leaf = rt;
626 rt->u.next = NULL; 626 rt->u.dst.rt6_next = NULL;
627 goto out; 627 goto out;
628 } 628 }
629 629
630 for (iter = fn->leaf; iter; iter=iter->u.next) { 630 for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) {
631 /* 631 /*
632 * Search for duplicates 632 * Search for duplicates
633 */ 633 */
@@ -655,7 +655,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
655 if (iter->rt6i_metric > rt->rt6i_metric) 655 if (iter->rt6i_metric > rt->rt6i_metric)
656 break; 656 break;
657 657
658 ins = &iter->u.next; 658 ins = &iter->u.dst.rt6_next;
659 } 659 }
660 660
661 /* 661 /*
@@ -663,7 +663,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
663 */ 663 */
664 664
665out: 665out:
666 rt->u.next = iter; 666 rt->u.dst.rt6_next = iter;
667 *ins = rt; 667 *ins = rt;
668 rt->rt6i_node = fn; 668 rt->rt6i_node = fn;
669 atomic_inc(&rt->rt6i_ref); 669 atomic_inc(&rt->rt6i_ref);
@@ -1104,7 +1104,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1104 RT6_TRACE("fib6_del_route\n"); 1104 RT6_TRACE("fib6_del_route\n");
1105 1105
1106 /* Unlink it */ 1106 /* Unlink it */
1107 *rtp = rt->u.next; 1107 *rtp = rt->u.dst.rt6_next;
1108 rt->rt6i_node = NULL; 1108 rt->rt6i_node = NULL;
1109 rt6_stats.fib_rt_entries--; 1109 rt6_stats.fib_rt_entries--;
1110 rt6_stats.fib_discarded_routes++; 1110 rt6_stats.fib_discarded_routes++;
@@ -1114,14 +1114,14 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1114 FOR_WALKERS(w) { 1114 FOR_WALKERS(w) {
1115 if (w->state == FWS_C && w->leaf == rt) { 1115 if (w->state == FWS_C && w->leaf == rt) {
1116 RT6_TRACE("walker %p adjusted by delroute\n", w); 1116 RT6_TRACE("walker %p adjusted by delroute\n", w);
1117 w->leaf = rt->u.next; 1117 w->leaf = rt->u.dst.rt6_next;
1118 if (w->leaf == NULL) 1118 if (w->leaf == NULL)
1119 w->state = FWS_U; 1119 w->state = FWS_U;
1120 } 1120 }
1121 } 1121 }
1122 read_unlock(&fib6_walker_lock); 1122 read_unlock(&fib6_walker_lock);
1123 1123
1124 rt->u.next = NULL; 1124 rt->u.dst.rt6_next = NULL;
1125 1125
1126 if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT) 1126 if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT)
1127 fn->leaf = &ip6_null_entry; 1127 fn->leaf = &ip6_null_entry;
@@ -1189,7 +1189,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1189 * Walk the leaf entries looking for ourself 1189 * Walk the leaf entries looking for ourself
1190 */ 1190 */
1191 1191
1192 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.next) { 1192 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) {
1193 if (*rtp == rt) { 1193 if (*rtp == rt) {
1194 fib6_del_route(fn, rtp, info); 1194 fib6_del_route(fn, rtp, info);
1195 return 0; 1195 return 0;
@@ -1205,7 +1205,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1205 * However, it is internally reenterable wrt itself and fib6_add/fib6_del. 1205 * However, it is internally reenterable wrt itself and fib6_add/fib6_del.
1206 * It means, that we can modify tree during walking 1206 * It means, that we can modify tree during walking
1207 * and use this function for garbage collection, clone pruning, 1207 * and use this function for garbage collection, clone pruning,
1208 * cleaning tree when a device goes down etc. etc. 1208 * cleaning tree when a device goes down etc. etc.
1209 * 1209 *
1210 * It guarantees that every node will be traversed, 1210 * It guarantees that every node will be traversed,
1211 * and that it will be traversed only once. 1211 * and that it will be traversed only once.
@@ -1244,7 +1244,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
1244 continue; 1244 continue;
1245 } 1245 }
1246 w->state = FWS_L; 1246 w->state = FWS_L;
1247#endif 1247#endif
1248 case FWS_L: 1248 case FWS_L:
1249 if (fn->left) { 1249 if (fn->left) {
1250 w->node = fn->left; 1250 w->node = fn->left;
@@ -1316,7 +1316,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
1316 struct rt6_info *rt; 1316 struct rt6_info *rt;
1317 struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w; 1317 struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w;
1318 1318
1319 for (rt = w->leaf; rt; rt = rt->u.next) { 1319 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
1320 res = c->func(rt, c->arg); 1320 res = c->func(rt, c->arg);
1321 if (res < 0) { 1321 if (res < 0) {
1322 w->leaf = rt; 1322 w->leaf = rt;
@@ -1337,7 +1337,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
1337 1337
1338/* 1338/*
1339 * Convenient frontend to tree walker. 1339 * Convenient frontend to tree walker.
1340 * 1340 *
1341 * func is called on each route. 1341 * func is called on each route.
1342 * It may return -1 -> delete this route. 1342 * It may return -1 -> delete this route.
1343 * 0 -> continue walking 1343 * 0 -> continue walking
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 624fae251f4e..1551ab3890a3 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -228,10 +228,10 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
228 struct ipv6_txoptions * fopt) 228 struct ipv6_txoptions * fopt)
229{ 229{
230 struct ipv6_txoptions * fl_opt = fl->opt; 230 struct ipv6_txoptions * fl_opt = fl->opt;
231 231
232 if (fopt == NULL || fopt->opt_flen == 0) 232 if (fopt == NULL || fopt->opt_flen == 0)
233 return fl_opt; 233 return fl_opt;
234 234
235 if (fl_opt != NULL) { 235 if (fl_opt != NULL) {
236 opt_space->hopopt = fl_opt->hopopt; 236 opt_space->hopopt = fl_opt->hopopt;
237 opt_space->dst0opt = fl_opt->dst0opt; 237 opt_space->dst0opt = fl_opt->dst0opt;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index ad0b8abcdf4b..4fdded0e545a 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * IPv6 input 2 * IPv6 input
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
@@ -48,7 +48,7 @@
48 48
49 49
50 50
51inline int ip6_rcv_finish( struct sk_buff *skb) 51inline int ip6_rcv_finish( struct sk_buff *skb)
52{ 52{
53 if (skb->dst == NULL) 53 if (skb->dst == NULL)
54 ip6_route_input(skb); 54 ip6_route_input(skb);
@@ -173,9 +173,9 @@ resubmit:
173 hash = nexthdr & (MAX_INET_PROTOS - 1); 173 hash = nexthdr & (MAX_INET_PROTOS - 1);
174 if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) { 174 if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
175 int ret; 175 int ret;
176 176
177 if (ipprot->flags & INET6_PROTO_FINAL) { 177 if (ipprot->flags & INET6_PROTO_FINAL) {
178 struct ipv6hdr *hdr; 178 struct ipv6hdr *hdr;
179 179
180 /* Free reference early: we don't need it any more, 180 /* Free reference early: we don't need it any more,
181 and it may hold ip_conntrack module loaded 181 and it may hold ip_conntrack module loaded
@@ -192,9 +192,9 @@ resubmit:
192 goto discard; 192 goto discard;
193 } 193 }
194 if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && 194 if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
195 !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 195 !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
196 goto discard; 196 goto discard;
197 197
198 ret = ipprot->handler(&skb); 198 ret = ipprot->handler(&skb);
199 if (ret > 0) 199 if (ret > 0)
200 goto resubmit; 200 goto resubmit;
@@ -205,8 +205,8 @@ resubmit:
205 if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 205 if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
206 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INUNKNOWNPROTOS); 206 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INUNKNOWNPROTOS);
207 icmpv6_send(skb, ICMPV6_PARAMPROB, 207 icmpv6_send(skb, ICMPV6_PARAMPROB,
208 ICMPV6_UNK_NEXTHDR, nhoff, 208 ICMPV6_UNK_NEXTHDR, nhoff,
209 skb->dev); 209 skb->dev);
210 } 210 }
211 } else 211 } else
212 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS); 212 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
@@ -253,7 +253,7 @@ int ip6_mc_input(struct sk_buff *skb)
253 struct dst_entry *dst; 253 struct dst_entry *dst;
254 254
255 dst = skb->dst; 255 dst = skb->dst;
256 256
257 if (deliver) { 257 if (deliver) {
258 skb2 = skb_clone(skb, GFP_ATOMIC); 258 skb2 = skb_clone(skb, GFP_ATOMIC);
259 dst_output(skb2); 259 dst_output(skb2);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7b7bd44fbf47..305516921aa8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * IPv6 output functions 2 * IPv6 output functions
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $ 8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9 * 9 *
@@ -363,7 +363,7 @@ int ip6_forward(struct sk_buff *skb)
363 struct dst_entry *dst = skb->dst; 363 struct dst_entry *dst = skb->dst;
364 struct ipv6hdr *hdr = skb->nh.ipv6h; 364 struct ipv6hdr *hdr = skb->nh.ipv6h;
365 struct inet6_skb_parm *opt = IP6CB(skb); 365 struct inet6_skb_parm *opt = IP6CB(skb);
366 366
367 if (ipv6_devconf.forwarding == 0) 367 if (ipv6_devconf.forwarding == 0)
368 goto error; 368 goto error;
369 369
@@ -473,7 +473,7 @@ int ip6_forward(struct sk_buff *skb)
473 hdr = skb->nh.ipv6h; 473 hdr = skb->nh.ipv6h;
474 474
475 /* Mangling hops number delayed to point after skb COW */ 475 /* Mangling hops number delayed to point after skb COW */
476 476
477 hdr->hop_limit--; 477 hdr->hop_limit--;
478 478
479 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); 479 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
@@ -659,7 +659,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
659 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); 659 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
660 ip6_copy_metadata(frag, skb); 660 ip6_copy_metadata(frag, skb);
661 } 661 }
662 662
663 err = output(skb); 663 err = output(skb);
664 if(!err) 664 if(!err)
665 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES); 665 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
@@ -792,7 +792,7 @@ slow_path:
792fail: 792fail:
793 IP6_INC_STATS(ip6_dst_idev(skb->dst), 793 IP6_INC_STATS(ip6_dst_idev(skb->dst),
794 IPSTATS_MIB_FRAGFAILS); 794 IPSTATS_MIB_FRAGFAILS);
795 kfree_skb(skb); 795 kfree_skb(skb);
796 return err; 796 return err;
797} 797}
798 798
@@ -955,7 +955,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
955 struct frag_hdr fhdr; 955 struct frag_hdr fhdr;
956 956
957 /* specify the length of each IP datagram fragment*/ 957 /* specify the length of each IP datagram fragment*/
958 skb_shinfo(skb)->gso_size = mtu - fragheaderlen - 958 skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
959 sizeof(struct frag_hdr); 959 sizeof(struct frag_hdr);
960 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 960 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
961 ipv6_select_ident(skb, &fhdr); 961 ipv6_select_ident(skb, &fhdr);
@@ -1058,13 +1058,13 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1058 * fragment alignment (= 8-15 octects, in total). 1058 * fragment alignment (= 8-15 octects, in total).
1059 * 1059 *
1060 * Note that we may need to "move" the data from the tail of 1060 * Note that we may need to "move" the data from the tail of
1061 * of the buffer to the new fragment when we split 1061 * of the buffer to the new fragment when we split
1062 * the message. 1062 * the message.
1063 * 1063 *
1064 * FIXME: It may be fragmented into multiple chunks 1064 * FIXME: It may be fragmented into multiple chunks
1065 * at once if non-fragmentable extension headers 1065 * at once if non-fragmentable extension headers
1066 * are too large. 1066 * are too large.
1067 * --yoshfuji 1067 * --yoshfuji
1068 */ 1068 */
1069 1069
1070 inet->cork.length += length; 1070 inet->cork.length += length;
@@ -1129,7 +1129,7 @@ alloc_new_skb:
1129 1129
1130 /* 1130 /*
1131 * We just reserve space for fragment header. 1131 * We just reserve space for fragment header.
1132 * Note: this may be overallocation if the message 1132 * Note: this may be overallocation if the message
1133 * (without MSG_MORE) fits into the MTU. 1133 * (without MSG_MORE) fits into the MTU.
1134 */ 1134 */
1135 alloclen += sizeof(struct frag_hdr); 1135 alloclen += sizeof(struct frag_hdr);
@@ -1310,7 +1310,7 @@ int ip6_push_pending_frames(struct sock *sk)
1310 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); 1310 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1311 1311
1312 skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr)); 1312 skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
1313 1313
1314 *(__be32*)hdr = fl->fl6_flowlabel | 1314 *(__be32*)hdr = fl->fl6_flowlabel |
1315 htonl(0x60000000 | ((int)np->cork.tclass << 20)); 1315 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1316 1316
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2b9e3bb7da65..367b74832986 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -3,7 +3,7 @@
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * 7 *
8 * $Id$ 8 * $Id$
9 * 9 *
@@ -67,8 +67,8 @@ MODULE_LICENSE("GPL");
67#define HASH_SIZE 32 67#define HASH_SIZE 32
68 68
69#define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ 69#define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
70 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ 70 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
71 (HASH_SIZE - 1)) 71 (HASH_SIZE - 1))
72 72
73static int ip6ip6_fb_tnl_dev_init(struct net_device *dev); 73static int ip6ip6_fb_tnl_dev_init(struct net_device *dev);
74static int ip6ip6_tnl_dev_init(struct net_device *dev); 74static int ip6ip6_tnl_dev_init(struct net_device *dev);
@@ -90,7 +90,7 @@ static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
90{ 90{
91 struct dst_entry *dst = t->dst_cache; 91 struct dst_entry *dst = t->dst_cache;
92 92
93 if (dst && dst->obsolete && 93 if (dst && dst->obsolete &&
94 dst->ops->check(dst, t->dst_cookie) == NULL) { 94 dst->ops->check(dst, t->dst_cookie) == NULL) {
95 t->dst_cache = NULL; 95 t->dst_cache = NULL;
96 dst_release(dst); 96 dst_release(dst);
@@ -116,12 +116,12 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
116 116
117/** 117/**
118 * ip6ip6_tnl_lookup - fetch tunnel matching the end-point addresses 118 * ip6ip6_tnl_lookup - fetch tunnel matching the end-point addresses
119 * @remote: the address of the tunnel exit-point 119 * @remote: the address of the tunnel exit-point
120 * @local: the address of the tunnel entry-point 120 * @local: the address of the tunnel entry-point
121 * 121 *
122 * Return: 122 * Return:
123 * tunnel matching given end-points if found, 123 * tunnel matching given end-points if found,
124 * else fallback tunnel if its device is up, 124 * else fallback tunnel if its device is up,
125 * else %NULL 125 * else %NULL
126 **/ 126 **/
127 127
@@ -146,13 +146,13 @@ ip6ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local)
146 146
147/** 147/**
148 * ip6ip6_bucket - get head of list matching given tunnel parameters 148 * ip6ip6_bucket - get head of list matching given tunnel parameters
149 * @p: parameters containing tunnel end-points 149 * @p: parameters containing tunnel end-points
150 * 150 *
151 * Description: 151 * Description:
152 * ip6ip6_bucket() returns the head of the list matching the 152 * ip6ip6_bucket() returns the head of the list matching the
153 * &struct in6_addr entries laddr and raddr in @p. 153 * &struct in6_addr entries laddr and raddr in @p.
154 * 154 *
155 * Return: head of IPv6 tunnel list 155 * Return: head of IPv6 tunnel list
156 **/ 156 **/
157 157
158static struct ip6_tnl ** 158static struct ip6_tnl **
@@ -213,8 +213,8 @@ ip6ip6_tnl_unlink(struct ip6_tnl *t)
213 * 213 *
214 * Description: 214 * Description:
215 * Create tunnel matching given parameters. 215 * Create tunnel matching given parameters.
216 * 216 *
217 * Return: 217 * Return:
218 * created tunnel or NULL 218 * created tunnel or NULL
219 **/ 219 **/
220 220
@@ -234,7 +234,7 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
234 if (__dev_get_by_name(name) == NULL) 234 if (__dev_get_by_name(name) == NULL)
235 break; 235 break;
236 } 236 }
237 if (i == IP6_TNL_MAX) 237 if (i == IP6_TNL_MAX)
238 goto failed; 238 goto failed;
239 } 239 }
240 dev = alloc_netdev(sizeof (*t), name, ip6ip6_tnl_dev_setup); 240 dev = alloc_netdev(sizeof (*t), name, ip6ip6_tnl_dev_setup);
@@ -258,7 +258,7 @@ failed:
258 258
259/** 259/**
260 * ip6ip6_tnl_locate - find or create tunnel matching given parameters 260 * ip6ip6_tnl_locate - find or create tunnel matching given parameters
261 * @p: tunnel parameters 261 * @p: tunnel parameters
262 * @create: != 0 if allowed to create new tunnel if no match found 262 * @create: != 0 if allowed to create new tunnel if no match found
263 * 263 *
264 * Description: 264 * Description:
@@ -289,7 +289,7 @@ static struct ip6_tnl *ip6ip6_tnl_locate(struct ip6_tnl_parm *p, int create)
289/** 289/**
290 * ip6ip6_tnl_dev_uninit - tunnel device uninitializer 290 * ip6ip6_tnl_dev_uninit - tunnel device uninitializer
291 * @dev: the device to be destroyed 291 * @dev: the device to be destroyed
292 * 292 *
293 * Description: 293 * Description:
294 * ip6ip6_tnl_dev_uninit() removes tunnel from its list 294 * ip6ip6_tnl_dev_uninit() removes tunnel from its list
295 **/ 295 **/
@@ -314,8 +314,8 @@ ip6ip6_tnl_dev_uninit(struct net_device *dev)
314 * parse_tvl_tnl_enc_lim - handle encapsulation limit option 314 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
315 * @skb: received socket buffer 315 * @skb: received socket buffer
316 * 316 *
317 * Return: 317 * Return:
318 * 0 if none was found, 318 * 0 if none was found,
319 * else index to encapsulation limit 319 * else index to encapsulation limit
320 **/ 320 **/
321 321
@@ -392,8 +392,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
392 __u16 len; 392 __u16 len;
393 int err = -ENOENT; 393 int err = -ENOENT;
394 394
395 /* If the packet doesn't contain the original IPv6 header we are 395 /* If the packet doesn't contain the original IPv6 header we are
396 in trouble since we might need the source address for further 396 in trouble since we might need the source address for further
397 processing of the error. */ 397 processing of the error. */
398 398
399 read_lock(&ip6ip6_lock); 399 read_lock(&ip6ip6_lock);
@@ -418,7 +418,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
418 if (net_ratelimit()) 418 if (net_ratelimit())
419 printk(KERN_WARNING 419 printk(KERN_WARNING
420 "%s: Too small hop limit or " 420 "%s: Too small hop limit or "
421 "routing loop in tunnel!\n", 421 "routing loop in tunnel!\n",
422 t->parms.name); 422 t->parms.name);
423 rel_msg = 1; 423 rel_msg = 1;
424 } 424 }
@@ -502,7 +502,7 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
502 int ret = 0; 502 int ret = 0;
503 503
504 if (p->flags & IP6_TNL_F_CAP_RCV) { 504 if (p->flags & IP6_TNL_F_CAP_RCV) {
505 struct net_device *ldev = NULL; 505 struct net_device *ldev = NULL;
506 506
507 if (p->link) 507 if (p->link)
508 ldev = dev_get_by_index(p->link); 508 ldev = dev_get_by_index(p->link);
@@ -525,7 +525,7 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
525 * Return: 0 525 * Return: 0
526 **/ 526 **/
527 527
528static int 528static int
529ip6ip6_rcv(struct sk_buff *skb) 529ip6ip6_rcv(struct sk_buff *skb)
530{ 530{
531 struct ipv6hdr *ipv6h; 531 struct ipv6hdr *ipv6h;
@@ -595,13 +595,13 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
595/** 595/**
596 * ip6ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 596 * ip6ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
597 * @t: the outgoing tunnel device 597 * @t: the outgoing tunnel device
598 * @hdr: IPv6 header from the incoming packet 598 * @hdr: IPv6 header from the incoming packet
599 * 599 *
600 * Description: 600 * Description:
601 * Avoid trivial tunneling loop by checking that tunnel exit-point 601 * Avoid trivial tunneling loop by checking that tunnel exit-point
602 * doesn't match source of incoming packet. 602 * doesn't match source of incoming packet.
603 * 603 *
604 * Return: 604 * Return:
605 * 1 if conflict, 605 * 1 if conflict,
606 * 0 else 606 * 0 else
607 **/ 607 **/
@@ -617,7 +617,7 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
617 struct ip6_tnl_parm *p = &t->parms; 617 struct ip6_tnl_parm *p = &t->parms;
618 int ret = 0; 618 int ret = 0;
619 619
620 if (p->flags & IP6_TNL_F_CAP_XMIT) { 620 if (p->flags & IP6_TNL_F_CAP_XMIT) {
621 struct net_device *ldev = NULL; 621 struct net_device *ldev = NULL;
622 622
623 if (p->link) 623 if (p->link)
@@ -641,19 +641,19 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
641 return ret; 641 return ret;
642} 642}
643/** 643/**
644 * ip6ip6_tnl_xmit - encapsulate packet and send 644 * ip6ip6_tnl_xmit - encapsulate packet and send
645 * @skb: the outgoing socket buffer 645 * @skb: the outgoing socket buffer
646 * @dev: the outgoing tunnel device 646 * @dev: the outgoing tunnel device
647 * 647 *
648 * Description: 648 * Description:
649 * Build new header and do some sanity checks on the packet before sending 649 * Build new header and do some sanity checks on the packet before sending
650 * it. 650 * it.
651 * 651 *
652 * Return: 652 * Return:
653 * 0 653 * 0
654 **/ 654 **/
655 655
656static int 656static int
657ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 657ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
658{ 658{
659 struct ip6_tnl *t = netdev_priv(dev); 659 struct ip6_tnl *t = netdev_priv(dev);
@@ -715,7 +715,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
715 if (tdev == dev) { 715 if (tdev == dev) {
716 stats->collisions++; 716 stats->collisions++;
717 if (net_ratelimit()) 717 if (net_ratelimit())
718 printk(KERN_WARNING 718 printk(KERN_WARNING
719 "%s: Local routing loop detected!\n", 719 "%s: Local routing loop detected!\n",
720 t->parms.name); 720 t->parms.name);
721 goto tx_err_dst_release; 721 goto tx_err_dst_release;
@@ -741,11 +741,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
741 * Okay, now see if we can stuff it in the buffer as-is. 741 * Okay, now see if we can stuff it in the buffer as-is.
742 */ 742 */
743 max_headroom += LL_RESERVED_SPACE(tdev); 743 max_headroom += LL_RESERVED_SPACE(tdev);
744 744
745 if (skb_headroom(skb) < max_headroom || 745 if (skb_headroom(skb) < max_headroom ||
746 skb_cloned(skb) || skb_shared(skb)) { 746 skb_cloned(skb) || skb_shared(skb)) {
747 struct sk_buff *new_skb; 747 struct sk_buff *new_skb;
748 748
749 if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) 749 if (!(new_skb = skb_realloc_headroom(skb, max_headroom)))
750 goto tx_err_dst_release; 750 goto tx_err_dst_release;
751 751
@@ -775,7 +775,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
775 ipv6_addr_copy(&ipv6h->daddr, &fl.fl6_dst); 775 ipv6_addr_copy(&ipv6h->daddr, &fl.fl6_dst);
776 nf_reset(skb); 776 nf_reset(skb);
777 pkt_len = skb->len; 777 pkt_len = skb->len;
778 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, 778 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL,
779 skb->dst->dev, dst_output); 779 skb->dst->dev, dst_output);
780 780
781 if (net_xmit_eval(err) == 0) { 781 if (net_xmit_eval(err) == 0) {
@@ -898,14 +898,14 @@ ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
898} 898}
899 899
900/** 900/**
901 * ip6ip6_tnl_ioctl - configure ipv6 tunnels from userspace 901 * ip6ip6_tnl_ioctl - configure ipv6 tunnels from userspace
902 * @dev: virtual device associated with tunnel 902 * @dev: virtual device associated with tunnel
903 * @ifr: parameters passed from userspace 903 * @ifr: parameters passed from userspace
904 * @cmd: command to be performed 904 * @cmd: command to be performed
905 * 905 *
906 * Description: 906 * Description:
907 * ip6ip6_tnl_ioctl() is used for managing IPv6 tunnels 907 * ip6ip6_tnl_ioctl() is used for managing IPv6 tunnels
908 * from userspace. 908 * from userspace.
909 * 909 *
910 * The possible commands are the following: 910 * The possible commands are the following:
911 * %SIOCGETTUNNEL: get tunnel parameters for device 911 * %SIOCGETTUNNEL: get tunnel parameters for device
@@ -913,7 +913,7 @@ ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
913 * %SIOCCHGTUNNEL: change tunnel parameters to those given 913 * %SIOCCHGTUNNEL: change tunnel parameters to those given
914 * %SIOCDELTUNNEL: delete tunnel 914 * %SIOCDELTUNNEL: delete tunnel
915 * 915 *
916 * The fallback device "ip6tnl0", created during module 916 * The fallback device "ip6tnl0", created during module
917 * initialization, can be used for creating other tunnel devices. 917 * initialization, can be used for creating other tunnel devices.
918 * 918 *
919 * Return: 919 * Return:
@@ -1009,7 +1009,7 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1009} 1009}
1010 1010
1011/** 1011/**
1012 * ip6ip6_tnl_get_stats - return the stats for tunnel device 1012 * ip6ip6_tnl_get_stats - return the stats for tunnel device
1013 * @dev: virtual device associated with tunnel 1013 * @dev: virtual device associated with tunnel
1014 * 1014 *
1015 * Return: stats for device 1015 * Return: stats for device
@@ -1102,7 +1102,7 @@ ip6ip6_tnl_dev_init(struct net_device *dev)
1102 * Return: 0 1102 * Return: 0
1103 **/ 1103 **/
1104 1104
1105static int 1105static int
1106ip6ip6_fb_tnl_dev_init(struct net_device *dev) 1106ip6ip6_fb_tnl_dev_init(struct net_device *dev)
1107{ 1107{
1108 struct ip6_tnl *t = netdev_priv(dev); 1108 struct ip6_tnl *t = netdev_priv(dev);
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 511730b67e97..5724ba9f75de 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -9,25 +9,25 @@
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version. 11 * (at your option) any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22/* 22/*
23 * [Memo] 23 * [Memo]
24 * 24 *
25 * Outbound: 25 * Outbound:
26 * The compression of IP datagram MUST be done before AH/ESP processing, 26 * The compression of IP datagram MUST be done before AH/ESP processing,
27 * fragmentation, and the addition of Hop-by-Hop/Routing header. 27 * fragmentation, and the addition of Hop-by-Hop/Routing header.
28 * 28 *
29 * Inbound: 29 * Inbound:
30 * The decompression of IP datagram MUST be done after the reassembly, 30 * The decompression of IP datagram MUST be done after the reassembly,
31 * AH/ESP processing. 31 * AH/ESP processing.
32 */ 32 */
33#include <linux/module.h> 33#include <linux/module.h>
@@ -176,7 +176,7 @@ out_ok:
176} 176}
177 177
178static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 178static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
179 int type, int code, int offset, __be32 info) 179 int type, int code, int offset, __be32 info)
180{ 180{
181 __be32 spi; 181 __be32 spi;
182 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 182 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
@@ -422,7 +422,7 @@ static int ipcomp6_init_state(struct xfrm_state *x)
422 x->props.header_len = 0; 422 x->props.header_len = 0;
423 if (x->props.mode == XFRM_MODE_TUNNEL) 423 if (x->props.mode == XFRM_MODE_TUNNEL)
424 x->props.header_len += sizeof(struct ipv6hdr); 424 x->props.header_len += sizeof(struct ipv6hdr);
425 425
426 mutex_lock(&ipcomp6_resource_mutex); 426 mutex_lock(&ipcomp6_resource_mutex);
427 if (!ipcomp6_alloc_scratches()) 427 if (!ipcomp6_alloc_scratches())
428 goto error; 428 goto error;
@@ -455,7 +455,7 @@ error:
455 goto out; 455 goto out;
456} 456}
457 457
458static struct xfrm_type ipcomp6_type = 458static struct xfrm_type ipcomp6_type =
459{ 459{
460 .description = "IPCOMP6", 460 .description = "IPCOMP6",
461 .owner = THIS_MODULE, 461 .owner = THIS_MODULE,
@@ -467,7 +467,7 @@ static struct xfrm_type ipcomp6_type =
467 .hdr_offset = xfrm6_find_1stfragopt, 467 .hdr_offset = xfrm6_find_1stfragopt,
468}; 468};
469 469
470static struct inet6_protocol ipcomp6_protocol = 470static struct inet6_protocol ipcomp6_protocol =
471{ 471{
472 .handler = xfrm6_rcv, 472 .handler = xfrm6_rcv,
473 .err_handler = ipcomp6_err, 473 .err_handler = ipcomp6_err,
@@ -490,7 +490,7 @@ static int __init ipcomp6_init(void)
490 490
491static void __exit ipcomp6_fini(void) 491static void __exit ipcomp6_fini(void)
492{ 492{
493 if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) 493 if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0)
494 printk(KERN_INFO "ipv6 ipcomp close: can't remove protocol\n"); 494 printk(KERN_INFO "ipv6 ipcomp close: can't remove protocol\n");
495 if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0) 495 if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0)
496 printk(KERN_INFO "ipv6 ipcomp close: can't remove xfrm type\n"); 496 printk(KERN_INFO "ipv6 ipcomp close: can't remove xfrm type\n");
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 352690e2ab82..3f1e779ea5c5 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * IPv6 BSD socket options interface 2 * IPv6 BSD socket options interface
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * Based on linux/net/ipv4/ip_sockglue.c 8 * Based on linux/net/ipv4/ip_sockglue.c
9 * 9 *
@@ -164,7 +164,7 @@ out:
164} 164}
165 165
166static struct packet_type ipv6_packet_type = { 166static struct packet_type ipv6_packet_type = {
167 .type = __constant_htons(ETH_P_IPV6), 167 .type = __constant_htons(ETH_P_IPV6),
168 .func = ipv6_rcv, 168 .func = ipv6_rcv,
169 .gso_send_check = ipv6_gso_send_check, 169 .gso_send_check = ipv6_gso_send_check,
170 .gso_segment = ipv6_gso_segment, 170 .gso_segment = ipv6_gso_segment,
@@ -320,7 +320,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
320 np->rxopt.bits.rxinfo = valbool; 320 np->rxopt.bits.rxinfo = valbool;
321 retv = 0; 321 retv = 0;
322 break; 322 break;
323 323
324 case IPV6_2292PKTINFO: 324 case IPV6_2292PKTINFO:
325 np->rxopt.bits.rxoinfo = valbool; 325 np->rxopt.bits.rxoinfo = valbool;
326 retv = 0; 326 retv = 0;
@@ -376,7 +376,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
376 np->tclass = val; 376 np->tclass = val;
377 retv = 0; 377 retv = 0;
378 break; 378 break;
379 379
380 case IPV6_RECVTCLASS: 380 case IPV6_RECVTCLASS:
381 np->rxopt.bits.rxtclass = valbool; 381 np->rxopt.bits.rxtclass = valbool;
382 retv = 0; 382 retv = 0;
@@ -893,7 +893,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
893 case IPV6_MTU: 893 case IPV6_MTU:
894 { 894 {
895 struct dst_entry *dst; 895 struct dst_entry *dst;
896 val = 0; 896 val = 0;
897 lock_sock(sk); 897 lock_sock(sk);
898 dst = sk_dst_get(sk); 898 dst = sk_dst_get(sk);
899 if (dst) { 899 if (dst) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index e3ec21695832..c9db5bc5b0f9 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * Multicast support for IPv6 2 * Multicast support for IPv6
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: mcast.c,v 1.40 2002/02/08 03:57:19 davem Exp $ 8 * $Id: mcast.c,v 1.40 2002/02/08 03:57:19 davem Exp $
9 * 9 *
10 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c 10 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
11 * 11 *
12 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 13 * modify it under the terms of the GNU General Public License
@@ -644,7 +644,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
644 memset(&ss, 0, sizeof(ss)); 644 memset(&ss, 0, sizeof(ss));
645 psin6->sin6_family = AF_INET6; 645 psin6->sin6_family = AF_INET6;
646 psin6->sin6_addr = psl->sl_addr[i]; 646 psin6->sin6_addr = psl->sl_addr[i];
647 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss))) 647 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
648 return -EFAULT; 648 return -EFAULT;
649 } 649 }
650 return 0; 650 return 0;
@@ -1168,7 +1168,7 @@ int igmp6_event_query(struct sk_buff *skb)
1168 1168
1169 /* compute payload length excluding extension headers */ 1169 /* compute payload length excluding extension headers */
1170 len = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr); 1170 len = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr);
1171 len -= (char *)skb->h.raw - (char *)skb->nh.ipv6h; 1171 len -= (char *)skb->h.raw - (char *)skb->nh.ipv6h;
1172 1172
1173 /* Drop queries with not link local source */ 1173 /* Drop queries with not link local source */
1174 if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr)&IPV6_ADDR_LINKLOCAL)) 1174 if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr)&IPV6_ADDR_LINKLOCAL))
@@ -1206,7 +1206,7 @@ int igmp6_event_query(struct sk_buff *skb)
1206 /* clear deleted report items */ 1206 /* clear deleted report items */
1207 mld_clear_delrec(idev); 1207 mld_clear_delrec(idev);
1208 } else if (len >= 28) { 1208 } else if (len >= 28) {
1209 int srcs_offset = sizeof(struct mld2_query) - 1209 int srcs_offset = sizeof(struct mld2_query) -
1210 sizeof(struct icmp6hdr); 1210 sizeof(struct icmp6hdr);
1211 if (!pskb_may_pull(skb, srcs_offset)) { 1211 if (!pskb_may_pull(skb, srcs_offset)) {
1212 in6_dev_put(idev); 1212 in6_dev_put(idev);
@@ -1230,7 +1230,7 @@ int igmp6_event_query(struct sk_buff *skb)
1230 } 1230 }
1231 /* mark sources to include, if group & source-specific */ 1231 /* mark sources to include, if group & source-specific */
1232 if (mlh2->nsrcs != 0) { 1232 if (mlh2->nsrcs != 0) {
1233 if (!pskb_may_pull(skb, srcs_offset + 1233 if (!pskb_may_pull(skb, srcs_offset +
1234 ntohs(mlh2->nsrcs) * sizeof(struct in6_addr))) { 1234 ntohs(mlh2->nsrcs) * sizeof(struct in6_addr))) {
1235 in6_dev_put(idev); 1235 in6_dev_put(idev);
1236 return -EINVAL; 1236 return -EINVAL;
@@ -1304,7 +1304,7 @@ int igmp6_event_report(struct sk_buff *skb)
1304 1304
1305 /* Drop reports with not link local source */ 1305 /* Drop reports with not link local source */
1306 addr_type = ipv6_addr_type(&skb->nh.ipv6h->saddr); 1306 addr_type = ipv6_addr_type(&skb->nh.ipv6h->saddr);
1307 if (addr_type != IPV6_ADDR_ANY && 1307 if (addr_type != IPV6_ADDR_ANY &&
1308 !(addr_type&IPV6_ADDR_LINKLOCAL)) 1308 !(addr_type&IPV6_ADDR_LINKLOCAL))
1309 return -EINVAL; 1309 return -EINVAL;
1310 1310
@@ -1413,7 +1413,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1413 1413
1414 if (ipv6_get_lladdr(dev, &addr_buf)) { 1414 if (ipv6_get_lladdr(dev, &addr_buf)) {
1415 /* <draft-ietf-magma-mld-source-05.txt>: 1415 /* <draft-ietf-magma-mld-source-05.txt>:
1416 * use unspecified address as the source address 1416 * use unspecified address as the source address
1417 * when a valid link-local address is not available. 1417 * when a valid link-local address is not available.
1418 */ 1418 */
1419 memset(&addr_buf, 0, sizeof(addr_buf)); 1419 memset(&addr_buf, 0, sizeof(addr_buf));
@@ -1454,7 +1454,7 @@ static inline int mld_dev_queue_xmit2(struct sk_buff *skb)
1454static inline int mld_dev_queue_xmit(struct sk_buff *skb) 1454static inline int mld_dev_queue_xmit(struct sk_buff *skb)
1455{ 1455{
1456 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dev, 1456 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dev,
1457 mld_dev_queue_xmit2); 1457 mld_dev_queue_xmit2);
1458} 1458}
1459 1459
1460static void mld_sendpack(struct sk_buff *skb) 1460static void mld_sendpack(struct sk_buff *skb)
@@ -1754,8 +1754,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1754{ 1754{
1755 struct sock *sk = igmp6_socket->sk; 1755 struct sock *sk = igmp6_socket->sk;
1756 struct inet6_dev *idev; 1756 struct inet6_dev *idev;
1757 struct sk_buff *skb; 1757 struct sk_buff *skb;
1758 struct icmp6hdr *hdr; 1758 struct icmp6hdr *hdr;
1759 struct in6_addr *snd_addr; 1759 struct in6_addr *snd_addr;
1760 struct in6_addr *addrp; 1760 struct in6_addr *addrp;
1761 struct in6_addr addr_buf; 1761 struct in6_addr addr_buf;
@@ -1793,7 +1793,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1793 1793
1794 if (ipv6_get_lladdr(dev, &addr_buf)) { 1794 if (ipv6_get_lladdr(dev, &addr_buf)) {
1795 /* <draft-ietf-magma-mld-source-05.txt>: 1795 /* <draft-ietf-magma-mld-source-05.txt>:
1796 * use unspecified address as the source address 1796 * use unspecified address as the source address
1797 * when a valid link-local address is not available. 1797 * when a valid link-local address is not available.
1798 */ 1798 */
1799 memset(&addr_buf, 0, sizeof(addr_buf)); 1799 memset(&addr_buf, 0, sizeof(addr_buf));
@@ -2330,7 +2330,7 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2330 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2330 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2331 2331
2332 for (state->dev = dev_base, state->idev = NULL; 2332 for (state->dev = dev_base, state->idev = NULL;
2333 state->dev; 2333 state->dev;
2334 state->dev = state->dev->next) { 2334 state->dev = state->dev->next) {
2335 struct inet6_dev *idev; 2335 struct inet6_dev *idev;
2336 idev = in6_dev_get(state->dev); 2336 idev = in6_dev_get(state->dev);
@@ -2413,7 +2413,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2413 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2413 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2414 2414
2415 seq_printf(seq, 2415 seq_printf(seq,
2416 "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n", 2416 "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n",
2417 state->dev->ifindex, state->dev->name, 2417 state->dev->ifindex, state->dev->name,
2418 NIP6(im->mca_addr), 2418 NIP6(im->mca_addr),
2419 im->mca_users, im->mca_flags, 2419 im->mca_users, im->mca_flags,
@@ -2474,7 +2474,7 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2474 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2474 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2475 2475
2476 for (state->dev = dev_base, state->idev = NULL, state->im = NULL; 2476 for (state->dev = dev_base, state->idev = NULL, state->im = NULL;
2477 state->dev; 2477 state->dev;
2478 state->dev = state->dev->next) { 2478 state->dev = state->dev->next) {
2479 struct inet6_dev *idev; 2479 struct inet6_dev *idev;
2480 idev = in6_dev_get(state->dev); 2480 idev = in6_dev_get(state->dev);
@@ -2579,7 +2579,7 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
2579 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2579 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2580 2580
2581 if (v == SEQ_START_TOKEN) { 2581 if (v == SEQ_START_TOKEN) {
2582 seq_printf(seq, 2582 seq_printf(seq,
2583 "%3s %6s " 2583 "%3s %6s "
2584 "%32s %32s %6s %6s\n", "Idx", 2584 "%32s %32s %6s %6s\n", "Idx",
2585 "Device", "Multicast Address", 2585 "Device", "Multicast Address",
@@ -2608,7 +2608,7 @@ static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
2608 struct seq_file *seq; 2608 struct seq_file *seq;
2609 int rc = -ENOMEM; 2609 int rc = -ENOMEM;
2610 struct igmp6_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 2610 struct igmp6_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
2611 2611
2612 if (!s) 2612 if (!s)
2613 goto out; 2613 goto out;
2614 2614
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 681bb077eacc..0afcabdd8ed6 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -333,7 +333,7 @@ static struct xfrm_type mip6_destopt_type =
333 .destructor = mip6_destopt_destroy, 333 .destructor = mip6_destopt_destroy,
334 .input = mip6_destopt_input, 334 .input = mip6_destopt_input,
335 .output = mip6_destopt_output, 335 .output = mip6_destopt_output,
336 .reject = mip6_destopt_reject, 336 .reject = mip6_destopt_reject,
337 .hdr_offset = mip6_destopt_offset, 337 .hdr_offset = mip6_destopt_offset,
338 .local_addr = mip6_xfrm_addr, 338 .local_addr = mip6_xfrm_addr,
339}; 339};
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 39bb658f3c44..121f31c283f8 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * Neighbour Discovery for IPv6 2 * Neighbour Discovery for IPv6
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Mike Shaver <shaver@ingenia.com> 7 * Mike Shaver <shaver@ingenia.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -427,25 +427,25 @@ static inline void ndisc_flow_init(struct flowi *fl, u8 type,
427 427
428static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, 428static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
429 struct in6_addr *daddr, struct in6_addr *solicited_addr, 429 struct in6_addr *daddr, struct in6_addr *solicited_addr,
430 int router, int solicited, int override, int inc_opt) 430 int router, int solicited, int override, int inc_opt)
431{ 431{
432 struct in6_addr tmpaddr; 432 struct in6_addr tmpaddr;
433 struct inet6_ifaddr *ifp; 433 struct inet6_ifaddr *ifp;
434 struct inet6_dev *idev; 434 struct inet6_dev *idev;
435 struct flowi fl; 435 struct flowi fl;
436 struct dst_entry* dst; 436 struct dst_entry* dst;
437 struct sock *sk = ndisc_socket->sk; 437 struct sock *sk = ndisc_socket->sk;
438 struct in6_addr *src_addr; 438 struct in6_addr *src_addr;
439 struct nd_msg *msg; 439 struct nd_msg *msg;
440 int len; 440 int len;
441 struct sk_buff *skb; 441 struct sk_buff *skb;
442 int err; 442 int err;
443 443
444 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr); 444 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
445 445
446 /* for anycast or proxy, solicited_addr != src_addr */ 446 /* for anycast or proxy, solicited_addr != src_addr */
447 ifp = ipv6_get_ifaddr(solicited_addr, dev, 1); 447 ifp = ipv6_get_ifaddr(solicited_addr, dev, 1);
448 if (ifp) { 448 if (ifp) {
449 src_addr = solicited_addr; 449 src_addr = solicited_addr;
450 in6_ifa_put(ifp); 450 in6_ifa_put(ifp);
451 } else { 451 } else {
@@ -479,7 +479,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
479 479
480 if (skb == NULL) { 480 if (skb == NULL) {
481 ND_PRINTK0(KERN_ERR 481 ND_PRINTK0(KERN_ERR
482 "ICMPv6 NA: %s() failed to allocate an skb.\n", 482 "ICMPv6 NA: %s() failed to allocate an skb.\n",
483 __FUNCTION__); 483 __FUNCTION__);
484 dst_release(dst); 484 dst_release(dst);
485 return; 485 return;
@@ -491,16 +491,16 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
491 msg = (struct nd_msg *)skb_put(skb, len); 491 msg = (struct nd_msg *)skb_put(skb, len);
492 skb->h.raw = (unsigned char*)msg; 492 skb->h.raw = (unsigned char*)msg;
493 493
494 msg->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 494 msg->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
495 msg->icmph.icmp6_code = 0; 495 msg->icmph.icmp6_code = 0;
496 msg->icmph.icmp6_cksum = 0; 496 msg->icmph.icmp6_cksum = 0;
497 497
498 msg->icmph.icmp6_unused = 0; 498 msg->icmph.icmp6_unused = 0;
499 msg->icmph.icmp6_router = router; 499 msg->icmph.icmp6_router = router;
500 msg->icmph.icmp6_solicited = solicited; 500 msg->icmph.icmp6_solicited = solicited;
501 msg->icmph.icmp6_override = override; 501 msg->icmph.icmp6_override = override;
502 502
503 /* Set the target address. */ 503 /* Set the target address. */
504 ipv6_addr_copy(&msg->target, solicited_addr); 504 ipv6_addr_copy(&msg->target, solicited_addr);
505 505
506 if (inc_opt) 506 if (inc_opt)
@@ -508,9 +508,9 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
508 dev->addr_len, dev->type); 508 dev->addr_len, dev->type);
509 509
510 /* checksum */ 510 /* checksum */
511 msg->icmph.icmp6_cksum = csum_ipv6_magic(src_addr, daddr, len, 511 msg->icmph.icmp6_cksum = csum_ipv6_magic(src_addr, daddr, len,
512 IPPROTO_ICMPV6, 512 IPPROTO_ICMPV6,
513 csum_partial((__u8 *) msg, 513 csum_partial((__u8 *) msg,
514 len, 0)); 514 len, 0));
515 515
516 skb->dst = dst; 516 skb->dst = dst;
@@ -524,20 +524,20 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
524 524
525 if (likely(idev != NULL)) 525 if (likely(idev != NULL))
526 in6_dev_put(idev); 526 in6_dev_put(idev);
527} 527}
528 528
529void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, 529void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
530 struct in6_addr *solicit, 530 struct in6_addr *solicit,
531 struct in6_addr *daddr, struct in6_addr *saddr) 531 struct in6_addr *daddr, struct in6_addr *saddr)
532{ 532{
533 struct flowi fl; 533 struct flowi fl;
534 struct dst_entry* dst; 534 struct dst_entry* dst;
535 struct inet6_dev *idev; 535 struct inet6_dev *idev;
536 struct sock *sk = ndisc_socket->sk; 536 struct sock *sk = ndisc_socket->sk;
537 struct sk_buff *skb; 537 struct sk_buff *skb;
538 struct nd_msg *msg; 538 struct nd_msg *msg;
539 struct in6_addr addr_buf; 539 struct in6_addr addr_buf;
540 int len; 540 int len;
541 int err; 541 int err;
542 int send_llinfo; 542 int send_llinfo;
543 543
@@ -569,7 +569,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
569 1, &err); 569 1, &err);
570 if (skb == NULL) { 570 if (skb == NULL) {
571 ND_PRINTK0(KERN_ERR 571 ND_PRINTK0(KERN_ERR
572 "ICMPv6 NA: %s() failed to allocate an skb.\n", 572 "ICMPv6 NA: %s() failed to allocate an skb.\n",
573 __FUNCTION__); 573 __FUNCTION__);
574 dst_release(dst); 574 dst_release(dst);
575 return; 575 return;
@@ -594,9 +594,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
594 594
595 /* checksum */ 595 /* checksum */
596 msg->icmph.icmp6_cksum = csum_ipv6_magic(&skb->nh.ipv6h->saddr, 596 msg->icmph.icmp6_cksum = csum_ipv6_magic(&skb->nh.ipv6h->saddr,
597 daddr, len, 597 daddr, len,
598 IPPROTO_ICMPV6, 598 IPPROTO_ICMPV6,
599 csum_partial((__u8 *) msg, 599 csum_partial((__u8 *) msg,
600 len, 0)); 600 len, 0));
601 /* send it! */ 601 /* send it! */
602 skb->dst = dst; 602 skb->dst = dst;
@@ -619,10 +619,10 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
619 struct dst_entry* dst; 619 struct dst_entry* dst;
620 struct inet6_dev *idev; 620 struct inet6_dev *idev;
621 struct sock *sk = ndisc_socket->sk; 621 struct sock *sk = ndisc_socket->sk;
622 struct sk_buff *skb; 622 struct sk_buff *skb;
623 struct icmp6hdr *hdr; 623 struct icmp6hdr *hdr;
624 __u8 * opt; 624 __u8 * opt;
625 int len; 625 int len;
626 int err; 626 int err;
627 627
628 ndisc_flow_init(&fl, NDISC_ROUTER_SOLICITATION, saddr, daddr, 628 ndisc_flow_init(&fl, NDISC_ROUTER_SOLICITATION, saddr, daddr,
@@ -640,13 +640,13 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
640 if (dev->addr_len) 640 if (dev->addr_len)
641 len += ndisc_opt_addr_space(dev); 641 len += ndisc_opt_addr_space(dev);
642 642
643 skb = sock_alloc_send_skb(sk, 643 skb = sock_alloc_send_skb(sk,
644 (MAX_HEADER + sizeof(struct ipv6hdr) + 644 (MAX_HEADER + sizeof(struct ipv6hdr) +
645 len + LL_RESERVED_SPACE(dev)), 645 len + LL_RESERVED_SPACE(dev)),
646 1, &err); 646 1, &err);
647 if (skb == NULL) { 647 if (skb == NULL) {
648 ND_PRINTK0(KERN_ERR 648 ND_PRINTK0(KERN_ERR
649 "ICMPv6 RS: %s() failed to allocate an skb.\n", 649 "ICMPv6 RS: %s() failed to allocate an skb.\n",
650 __FUNCTION__); 650 __FUNCTION__);
651 dst_release(dst); 651 dst_release(dst);
652 return; 652 return;
@@ -655,12 +655,12 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
655 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 655 skb_reserve(skb, LL_RESERVED_SPACE(dev));
656 ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); 656 ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
657 657
658 hdr = (struct icmp6hdr *)skb_put(skb, len); 658 hdr = (struct icmp6hdr *)skb_put(skb, len);
659 skb->h.raw = (unsigned char*)hdr; 659 skb->h.raw = (unsigned char*)hdr;
660 hdr->icmp6_type = NDISC_ROUTER_SOLICITATION; 660 hdr->icmp6_type = NDISC_ROUTER_SOLICITATION;
661 hdr->icmp6_code = 0; 661 hdr->icmp6_code = 0;
662 hdr->icmp6_cksum = 0; 662 hdr->icmp6_cksum = 0;
663 hdr->icmp6_unused = 0; 663 hdr->icmp6_unused = 0;
664 664
665 opt = (u8*) (hdr + 1); 665 opt = (u8*) (hdr + 1);
666 666
@@ -686,7 +686,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
686 if (likely(idev != NULL)) 686 if (likely(idev != NULL))
687 in6_dev_put(idev); 687 in6_dev_put(idev);
688} 688}
689 689
690 690
691static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb) 691static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb)
692{ 692{
@@ -748,7 +748,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
748 int is_router; 748 int is_router;
749 749
750 if (ipv6_addr_is_multicast(&msg->target)) { 750 if (ipv6_addr_is_multicast(&msg->target)) {
751 ND_PRINTK2(KERN_WARNING 751 ND_PRINTK2(KERN_WARNING
752 "ICMPv6 NS: multicast target address"); 752 "ICMPv6 NS: multicast target address");
753 return; 753 return;
754 } 754 }
@@ -768,7 +768,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
768 } 768 }
769 769
770 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { 770 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
771 ND_PRINTK2(KERN_WARNING 771 ND_PRINTK2(KERN_WARNING
772 "ICMPv6 NS: invalid ND options\n"); 772 "ICMPv6 NS: invalid ND options\n");
773 return; 773 return;
774 } 774 }
@@ -782,12 +782,12 @@ static void ndisc_recv_ns(struct sk_buff *skb)
782 } 782 }
783 783
784 /* RFC2461 7.1.1: 784 /* RFC2461 7.1.1:
785 * If the IP source address is the unspecified address, 785 * If the IP source address is the unspecified address,
786 * there MUST NOT be source link-layer address option 786 * there MUST NOT be source link-layer address option
787 * in the message. 787 * in the message.
788 */ 788 */
789 if (dad) { 789 if (dad) {
790 ND_PRINTK2(KERN_WARNING 790 ND_PRINTK2(KERN_WARNING
791 "ICMPv6 NS: bad DAD packet (link-layer address option)\n"); 791 "ICMPv6 NS: bad DAD packet (link-layer address option)\n");
792 return; 792 return;
793 } 793 }
@@ -816,7 +816,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
816 goto out; 816 goto out;
817 } 817 }
818 } 818 }
819 addrconf_dad_failure(ifp); 819 addrconf_dad_failure(ifp);
820 return; 820 return;
821 } 821 }
822 822
@@ -829,7 +829,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
829 } 829 }
830 830
831 if (ipv6_chk_acast_addr(dev, &msg->target) || 831 if (ipv6_chk_acast_addr(dev, &msg->target) ||
832 (idev->cnf.forwarding && 832 (idev->cnf.forwarding &&
833 (ipv6_devconf.proxy_ndp || idev->cnf.proxy_ndp) && 833 (ipv6_devconf.proxy_ndp || idev->cnf.proxy_ndp) &&
834 (pneigh = pneigh_lookup(&nd_tbl, 834 (pneigh = pneigh_lookup(&nd_tbl,
835 &msg->target, dev, 0)) != NULL)) { 835 &msg->target, dev, 0)) != NULL)) {
@@ -839,8 +839,8 @@ static void ndisc_recv_ns(struct sk_buff *skb)
839 idev->nd_parms->proxy_delay != 0) { 839 idev->nd_parms->proxy_delay != 0) {
840 /* 840 /*
841 * for anycast or proxy, 841 * for anycast or proxy,
842 * sender should delay its response 842 * sender should delay its response
843 * by a random time between 0 and 843 * by a random time between 0 and
844 * MAX_ANYCAST_DELAY_TIME seconds. 844 * MAX_ANYCAST_DELAY_TIME seconds.
845 * (RFC2461) -- yoshfuji 845 * (RFC2461) -- yoshfuji
846 */ 846 */
@@ -869,14 +869,14 @@ static void ndisc_recv_ns(struct sk_buff *skb)
869 else 869 else
870 NEIGH_CACHE_STAT_INC(&nd_tbl, rcv_probes_ucast); 870 NEIGH_CACHE_STAT_INC(&nd_tbl, rcv_probes_ucast);
871 871
872 /* 872 /*
873 * update / create cache entry 873 * update / create cache entry
874 * for the source address 874 * for the source address
875 */ 875 */
876 neigh = __neigh_lookup(&nd_tbl, saddr, dev, 876 neigh = __neigh_lookup(&nd_tbl, saddr, dev,
877 !inc || lladdr || !dev->addr_len); 877 !inc || lladdr || !dev->addr_len);
878 if (neigh) 878 if (neigh)
879 neigh_update(neigh, lladdr, NUD_STALE, 879 neigh_update(neigh, lladdr, NUD_STALE,
880 NEIGH_UPDATE_F_WEAK_OVERRIDE| 880 NEIGH_UPDATE_F_WEAK_OVERRIDE|
881 NEIGH_UPDATE_F_OVERRIDE); 881 NEIGH_UPDATE_F_OVERRIDE);
882 if (neigh || !dev->hard_header) { 882 if (neigh || !dev->hard_header) {
@@ -926,7 +926,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
926 "ICMPv6 NA: solicited NA is multicasted.\n"); 926 "ICMPv6 NA: solicited NA is multicasted.\n");
927 return; 927 return;
928 } 928 }
929 929
930 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { 930 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
931 ND_PRINTK2(KERN_WARNING 931 ND_PRINTK2(KERN_WARNING
932 "ICMPv6 NS: invalid ND option\n"); 932 "ICMPv6 NS: invalid ND option\n");
@@ -1057,7 +1057,7 @@ out:
1057 1057
1058static void ndisc_router_discovery(struct sk_buff *skb) 1058static void ndisc_router_discovery(struct sk_buff *skb)
1059{ 1059{
1060 struct ra_msg *ra_msg = (struct ra_msg *) skb->h.raw; 1060 struct ra_msg *ra_msg = (struct ra_msg *) skb->h.raw;
1061 struct neighbour *neigh = NULL; 1061 struct neighbour *neigh = NULL;
1062 struct inet6_dev *in6_dev; 1062 struct inet6_dev *in6_dev;
1063 struct rt6_info *rt = NULL; 1063 struct rt6_info *rt = NULL;
@@ -1076,7 +1076,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1076 return; 1076 return;
1077 } 1077 }
1078 if (optlen < 0) { 1078 if (optlen < 0) {
1079 ND_PRINTK2(KERN_WARNING 1079 ND_PRINTK2(KERN_WARNING
1080 "ICMPv6 RA: packet too short\n"); 1080 "ICMPv6 RA: packet too short\n");
1081 return; 1081 return;
1082 } 1082 }
@@ -1286,7 +1286,7 @@ skip_defrtr:
1286 rt6_mtu_change(skb->dev, mtu); 1286 rt6_mtu_change(skb->dev, mtu);
1287 } 1287 }
1288 } 1288 }
1289 1289
1290 if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) { 1290 if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
1291 ND_PRINTK2(KERN_WARNING 1291 ND_PRINTK2(KERN_WARNING
1292 "ICMPv6 RA: invalid RA options"); 1292 "ICMPv6 RA: invalid RA options");
@@ -1339,7 +1339,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1339 if (ipv6_addr_equal(dest, target)) { 1339 if (ipv6_addr_equal(dest, target)) {
1340 on_link = 1; 1340 on_link = 1;
1341 } else if (!(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { 1341 } else if (!(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) {
1342 ND_PRINTK2(KERN_WARNING 1342 ND_PRINTK2(KERN_WARNING
1343 "ICMPv6 Redirect: target address is not link-local.\n"); 1343 "ICMPv6 Redirect: target address is not link-local.\n");
1344 return; 1344 return;
1345 } 1345 }
@@ -1352,11 +1352,11 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1352 return; 1352 return;
1353 } 1353 }
1354 1354
1355 /* RFC2461 8.1: 1355 /* RFC2461 8.1:
1356 * The IP source address of the Redirect MUST be the same as the current 1356 * The IP source address of the Redirect MUST be the same as the current
1357 * first-hop router for the specified ICMP Destination Address. 1357 * first-hop router for the specified ICMP Destination Address.
1358 */ 1358 */
1359 1359
1360 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { 1360 if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
1361 ND_PRINTK2(KERN_WARNING 1361 ND_PRINTK2(KERN_WARNING
1362 "ICMPv6 Redirect: invalid ND options\n"); 1362 "ICMPv6 Redirect: invalid ND options\n");
@@ -1410,8 +1410,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1410 ND_PRINTK2(KERN_WARNING 1410 ND_PRINTK2(KERN_WARNING
1411 "ICMPv6 Redirect: no link-local address on %s\n", 1411 "ICMPv6 Redirect: no link-local address on %s\n",
1412 dev->name); 1412 dev->name);
1413 return; 1413 return;
1414 } 1414 }
1415 1415
1416 if (!ipv6_addr_equal(&skb->nh.ipv6h->daddr, target) && 1416 if (!ipv6_addr_equal(&skb->nh.ipv6h->daddr, target) &&
1417 !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { 1417 !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) {
@@ -1507,7 +1507,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1507 * build redirect option and copy skb over to the new packet. 1507 * build redirect option and copy skb over to the new packet.
1508 */ 1508 */
1509 1509
1510 memset(opt, 0, 8); 1510 memset(opt, 0, 8);
1511 *(opt++) = ND_OPT_REDIRECT_HDR; 1511 *(opt++) = ND_OPT_REDIRECT_HDR;
1512 *(opt++) = (rd_len >> 3); 1512 *(opt++) = (rd_len >> 3);
1513 opt += 6; 1513 opt += 6;
@@ -1556,7 +1556,7 @@ int ndisc_rcv(struct sk_buff *skb)
1556 } 1556 }
1557 1557
1558 if (msg->icmph.icmp6_code != 0) { 1558 if (msg->icmph.icmp6_code != 0) {
1559 ND_PRINTK2(KERN_WARNING 1559 ND_PRINTK2(KERN_WARNING
1560 "ICMPv6 NDISC: invalid ICMPv6 code: %d\n", 1560 "ICMPv6 NDISC: invalid ICMPv6 code: %d\n",
1561 msg->icmph.icmp6_code); 1561 msg->icmph.icmp6_code);
1562 return 0; 1562 return 0;
@@ -1717,12 +1717,12 @@ int __init ndisc_init(struct net_proto_family *ops)
1717{ 1717{
1718 struct ipv6_pinfo *np; 1718 struct ipv6_pinfo *np;
1719 struct sock *sk; 1719 struct sock *sk;
1720 int err; 1720 int err;
1721 1721
1722 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &ndisc_socket); 1722 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &ndisc_socket);
1723 if (err < 0) { 1723 if (err < 0) {
1724 ND_PRINTK0(KERN_ERR 1724 ND_PRINTK0(KERN_ERR
1725 "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n", 1725 "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n",
1726 err); 1726 err);
1727 ndisc_socket = NULL; /* For safety. */ 1727 ndisc_socket = NULL; /* For safety. */
1728 return err; 1728 return err;
@@ -1736,14 +1736,14 @@ int __init ndisc_init(struct net_proto_family *ops)
1736 np->mc_loop = 0; 1736 np->mc_loop = 0;
1737 sk->sk_prot->unhash(sk); 1737 sk->sk_prot->unhash(sk);
1738 1738
1739 /* 1739 /*
1740 * Initialize the neighbour table 1740 * Initialize the neighbour table
1741 */ 1741 */
1742 1742
1743 neigh_table_init(&nd_tbl); 1743 neigh_table_init(&nd_tbl);
1744 1744
1745#ifdef CONFIG_SYSCTL 1745#ifdef CONFIG_SYSCTL
1746 neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6, NET_IPV6_NEIGH, 1746 neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6, NET_IPV6_NEIGH,
1747 "ipv6", 1747 "ipv6",
1748 &ndisc_ifinfo_sysctl_change, 1748 &ndisc_ifinfo_sysctl_change,
1749 &ndisc_ifinfo_sysctl_strategy); 1749 &ndisc_ifinfo_sysctl_strategy);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index f6294e5bcb31..0b2d265e7da7 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -91,7 +91,7 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
91 if (hook != NF_IP6_PRE_ROUTING && hook != NF_IP6_LOCAL_IN) 91 if (hook != NF_IP6_PRE_ROUTING && hook != NF_IP6_LOCAL_IN)
92 break; 92 break;
93 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 93 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
94 skb->len - dataoff, protocol, 94 skb->len - dataoff, protocol,
95 csum_sub(skb->csum, 95 csum_sub(skb->csum,
96 skb_checksum(skb, 0, 96 skb_checksum(skb, 0,
97 dataoff, 0)))) { 97 dataoff, 0)))) {
@@ -106,7 +106,7 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
106 protocol, 106 protocol,
107 csum_sub(0, 107 csum_sub(0,
108 skb_checksum(skb, 0, 108 skb_checksum(skb, 0,
109 dataoff, 0)))); 109 dataoff, 0))));
110 csum = __skb_checksum_complete(skb); 110 csum = __skb_checksum_complete(skb);
111 } 111 }
112 return csum; 112 return csum;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index d4d9f182441a..58847d3b61e5 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -95,7 +95,7 @@ __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data)
95 95
96 list_for_each_prev(p, &queue_list) { 96 list_for_each_prev(p, &queue_list) {
97 struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p; 97 struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p;
98 98
99 if (!cmpfn || cmpfn(entry, data)) 99 if (!cmpfn || cmpfn(entry, data))
100 return entry; 100 return entry;
101 } 101 }
@@ -127,7 +127,7 @@ static inline void
127__ipq_flush(int verdict) 127__ipq_flush(int verdict)
128{ 128{
129 struct ipq_queue_entry *entry; 129 struct ipq_queue_entry *entry;
130 130
131 while ((entry = __ipq_find_dequeue_entry(NULL, 0))) 131 while ((entry = __ipq_find_dequeue_entry(NULL, 0)))
132 ipq_issue_verdict(entry, verdict); 132 ipq_issue_verdict(entry, verdict);
133} 133}
@@ -136,21 +136,21 @@ static inline int
136__ipq_set_mode(unsigned char mode, unsigned int range) 136__ipq_set_mode(unsigned char mode, unsigned int range)
137{ 137{
138 int status = 0; 138 int status = 0;
139 139
140 switch(mode) { 140 switch(mode) {
141 case IPQ_COPY_NONE: 141 case IPQ_COPY_NONE:
142 case IPQ_COPY_META: 142 case IPQ_COPY_META:
143 copy_mode = mode; 143 copy_mode = mode;
144 copy_range = 0; 144 copy_range = 0;
145 break; 145 break;
146 146
147 case IPQ_COPY_PACKET: 147 case IPQ_COPY_PACKET:
148 copy_mode = mode; 148 copy_mode = mode;
149 copy_range = range; 149 copy_range = range;
150 if (copy_range > 0xFFFF) 150 if (copy_range > 0xFFFF)
151 copy_range = 0xFFFF; 151 copy_range = 0xFFFF;
152 break; 152 break;
153 153
154 default: 154 default:
155 status = -EINVAL; 155 status = -EINVAL;
156 156
@@ -171,7 +171,7 @@ static struct ipq_queue_entry *
171ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data) 171ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
172{ 172{
173 struct ipq_queue_entry *entry; 173 struct ipq_queue_entry *entry;
174 174
175 write_lock_bh(&queue_lock); 175 write_lock_bh(&queue_lock);
176 entry = __ipq_find_dequeue_entry(cmpfn, data); 176 entry = __ipq_find_dequeue_entry(cmpfn, data);
177 write_unlock_bh(&queue_lock); 177 write_unlock_bh(&queue_lock);
@@ -197,14 +197,14 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
197 struct nlmsghdr *nlh; 197 struct nlmsghdr *nlh;
198 198
199 read_lock_bh(&queue_lock); 199 read_lock_bh(&queue_lock);
200 200
201 switch (copy_mode) { 201 switch (copy_mode) {
202 case IPQ_COPY_META: 202 case IPQ_COPY_META:
203 case IPQ_COPY_NONE: 203 case IPQ_COPY_NONE:
204 size = NLMSG_SPACE(sizeof(*pmsg)); 204 size = NLMSG_SPACE(sizeof(*pmsg));
205 data_len = 0; 205 data_len = 0;
206 break; 206 break;
207 207
208 case IPQ_COPY_PACKET: 208 case IPQ_COPY_PACKET:
209 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL || 209 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
210 entry->skb->ip_summed == CHECKSUM_COMPLETE) && 210 entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
@@ -216,10 +216,10 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
216 data_len = entry->skb->len; 216 data_len = entry->skb->len;
217 else 217 else
218 data_len = copy_range; 218 data_len = copy_range;
219 219
220 size = NLMSG_SPACE(sizeof(*pmsg) + data_len); 220 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
221 break; 221 break;
222 222
223 default: 223 default:
224 *errp = -EINVAL; 224 *errp = -EINVAL;
225 read_unlock_bh(&queue_lock); 225 read_unlock_bh(&queue_lock);
@@ -231,7 +231,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
231 skb = alloc_skb(size, GFP_ATOMIC); 231 skb = alloc_skb(size, GFP_ATOMIC);
232 if (!skb) 232 if (!skb)
233 goto nlmsg_failure; 233 goto nlmsg_failure;
234 234
235 old_tail= skb->tail; 235 old_tail= skb->tail;
236 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); 236 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
237 pmsg = NLMSG_DATA(nlh); 237 pmsg = NLMSG_DATA(nlh);
@@ -244,29 +244,29 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
244 pmsg->mark = entry->skb->mark; 244 pmsg->mark = entry->skb->mark;
245 pmsg->hook = entry->info->hook; 245 pmsg->hook = entry->info->hook;
246 pmsg->hw_protocol = entry->skb->protocol; 246 pmsg->hw_protocol = entry->skb->protocol;
247 247
248 if (entry->info->indev) 248 if (entry->info->indev)
249 strcpy(pmsg->indev_name, entry->info->indev->name); 249 strcpy(pmsg->indev_name, entry->info->indev->name);
250 else 250 else
251 pmsg->indev_name[0] = '\0'; 251 pmsg->indev_name[0] = '\0';
252 252
253 if (entry->info->outdev) 253 if (entry->info->outdev)
254 strcpy(pmsg->outdev_name, entry->info->outdev->name); 254 strcpy(pmsg->outdev_name, entry->info->outdev->name);
255 else 255 else
256 pmsg->outdev_name[0] = '\0'; 256 pmsg->outdev_name[0] = '\0';
257 257
258 if (entry->info->indev && entry->skb->dev) { 258 if (entry->info->indev && entry->skb->dev) {
259 pmsg->hw_type = entry->skb->dev->type; 259 pmsg->hw_type = entry->skb->dev->type;
260 if (entry->skb->dev->hard_header_parse) 260 if (entry->skb->dev->hard_header_parse)
261 pmsg->hw_addrlen = 261 pmsg->hw_addrlen =
262 entry->skb->dev->hard_header_parse(entry->skb, 262 entry->skb->dev->hard_header_parse(entry->skb,
263 pmsg->hw_addr); 263 pmsg->hw_addr);
264 } 264 }
265 265
266 if (data_len) 266 if (data_len)
267 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len)) 267 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
268 BUG(); 268 BUG();
269 269
270 nlh->nlmsg_len = skb->tail - old_tail; 270 nlh->nlmsg_len = skb->tail - old_tail;
271 return skb; 271 return skb;
272 272
@@ -279,7 +279,7 @@ nlmsg_failure:
279} 279}
280 280
281static int 281static int
282ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, 282ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
283 unsigned int queuenum, void *data) 283 unsigned int queuenum, void *data)
284{ 284{
285 int status = -EINVAL; 285 int status = -EINVAL;
@@ -301,37 +301,37 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
301 nskb = ipq_build_packet_message(entry, &status); 301 nskb = ipq_build_packet_message(entry, &status);
302 if (nskb == NULL) 302 if (nskb == NULL)
303 goto err_out_free; 303 goto err_out_free;
304 304
305 write_lock_bh(&queue_lock); 305 write_lock_bh(&queue_lock);
306 306
307 if (!peer_pid) 307 if (!peer_pid)
308 goto err_out_free_nskb; 308 goto err_out_free_nskb;
309 309
310 if (queue_total >= queue_maxlen) { 310 if (queue_total >= queue_maxlen) {
311 queue_dropped++; 311 queue_dropped++;
312 status = -ENOSPC; 312 status = -ENOSPC;
313 if (net_ratelimit()) 313 if (net_ratelimit())
314 printk (KERN_WARNING "ip6_queue: fill at %d entries, " 314 printk (KERN_WARNING "ip6_queue: fill at %d entries, "
315 "dropping packet(s). Dropped: %d\n", queue_total, 315 "dropping packet(s). Dropped: %d\n", queue_total,
316 queue_dropped); 316 queue_dropped);
317 goto err_out_free_nskb; 317 goto err_out_free_nskb;
318 } 318 }
319 319
320 /* netlink_unicast will either free the nskb or attach it to a socket */ 320 /* netlink_unicast will either free the nskb or attach it to a socket */
321 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT); 321 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
322 if (status < 0) { 322 if (status < 0) {
323 queue_user_dropped++; 323 queue_user_dropped++;
324 goto err_out_unlock; 324 goto err_out_unlock;
325 } 325 }
326 326
327 __ipq_enqueue_entry(entry); 327 __ipq_enqueue_entry(entry);
328 328
329 write_unlock_bh(&queue_lock); 329 write_unlock_bh(&queue_lock);
330 return status; 330 return status;
331 331
332err_out_free_nskb: 332err_out_free_nskb:
333 kfree_skb(nskb); 333 kfree_skb(nskb);
334 334
335err_out_unlock: 335err_out_unlock:
336 write_unlock_bh(&queue_lock); 336 write_unlock_bh(&queue_lock);
337 337
@@ -357,11 +357,11 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
357 return -EINVAL; 357 return -EINVAL;
358 if (diff > skb_tailroom(e->skb)) { 358 if (diff > skb_tailroom(e->skb)) {
359 struct sk_buff *newskb; 359 struct sk_buff *newskb;
360 360
361 newskb = skb_copy_expand(e->skb, 361 newskb = skb_copy_expand(e->skb,
362 skb_headroom(e->skb), 362 skb_headroom(e->skb),
363 diff, 363 diff,
364 GFP_ATOMIC); 364 GFP_ATOMIC);
365 if (newskb == NULL) { 365 if (newskb == NULL) {
366 printk(KERN_WARNING "ip6_queue: OOM " 366 printk(KERN_WARNING "ip6_queue: OOM "
367 "in mangle, dropping packet\n"); 367 "in mangle, dropping packet\n");
@@ -401,11 +401,11 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
401 return -ENOENT; 401 return -ENOENT;
402 else { 402 else {
403 int verdict = vmsg->value; 403 int verdict = vmsg->value;
404 404
405 if (vmsg->data_len && vmsg->data_len == len) 405 if (vmsg->data_len && vmsg->data_len == len)
406 if (ipq_mangle_ipv6(vmsg, entry) < 0) 406 if (ipq_mangle_ipv6(vmsg, entry) < 0)
407 verdict = NF_DROP; 407 verdict = NF_DROP;
408 408
409 ipq_issue_verdict(entry, verdict); 409 ipq_issue_verdict(entry, verdict);
410 return 0; 410 return 0;
411 } 411 }
@@ -424,7 +424,7 @@ ipq_set_mode(unsigned char mode, unsigned int range)
424 424
425static int 425static int
426ipq_receive_peer(struct ipq_peer_msg *pmsg, 426ipq_receive_peer(struct ipq_peer_msg *pmsg,
427 unsigned char type, unsigned int len) 427 unsigned char type, unsigned int len)
428{ 428{
429 int status = 0; 429 int status = 0;
430 430
@@ -434,15 +434,15 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
434 switch (type) { 434 switch (type) {
435 case IPQM_MODE: 435 case IPQM_MODE:
436 status = ipq_set_mode(pmsg->msg.mode.value, 436 status = ipq_set_mode(pmsg->msg.mode.value,
437 pmsg->msg.mode.range); 437 pmsg->msg.mode.range);
438 break; 438 break;
439 439
440 case IPQM_VERDICT: 440 case IPQM_VERDICT:
441 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 441 if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
442 status = -EINVAL; 442 status = -EINVAL;
443 else 443 else
444 status = ipq_set_verdict(&pmsg->msg.verdict, 444 status = ipq_set_verdict(&pmsg->msg.verdict,
445 len - sizeof(*pmsg)); 445 len - sizeof(*pmsg));
446 break; 446 break;
447 default: 447 default:
448 status = -EINVAL; 448 status = -EINVAL;
@@ -456,7 +456,7 @@ dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex)
456 if (entry->info->indev) 456 if (entry->info->indev)
457 if (entry->info->indev->ifindex == ifindex) 457 if (entry->info->indev->ifindex == ifindex)
458 return 1; 458 return 1;
459 459
460 if (entry->info->outdev) 460 if (entry->info->outdev)
461 if (entry->info->outdev->ifindex == ifindex) 461 if (entry->info->outdev->ifindex == ifindex)
462 return 1; 462 return 1;
@@ -468,7 +468,7 @@ static void
468ipq_dev_drop(int ifindex) 468ipq_dev_drop(int ifindex)
469{ 469{
470 struct ipq_queue_entry *entry; 470 struct ipq_queue_entry *entry;
471 471
472 while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL) 472 while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL)
473 ipq_issue_verdict(entry, NF_DROP); 473 ipq_issue_verdict(entry, NF_DROP);
474} 474}
@@ -492,25 +492,25 @@ ipq_rcv_skb(struct sk_buff *skb)
492 492
493 pid = nlh->nlmsg_pid; 493 pid = nlh->nlmsg_pid;
494 flags = nlh->nlmsg_flags; 494 flags = nlh->nlmsg_flags;
495 495
496 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI) 496 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
497 RCV_SKB_FAIL(-EINVAL); 497 RCV_SKB_FAIL(-EINVAL);
498 498
499 if (flags & MSG_TRUNC) 499 if (flags & MSG_TRUNC)
500 RCV_SKB_FAIL(-ECOMM); 500 RCV_SKB_FAIL(-ECOMM);
501 501
502 type = nlh->nlmsg_type; 502 type = nlh->nlmsg_type;
503 if (type < NLMSG_NOOP || type >= IPQM_MAX) 503 if (type < NLMSG_NOOP || type >= IPQM_MAX)
504 RCV_SKB_FAIL(-EINVAL); 504 RCV_SKB_FAIL(-EINVAL);
505 505
506 if (type <= IPQM_BASE) 506 if (type <= IPQM_BASE)
507 return; 507 return;
508 508
509 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 509 if (security_netlink_recv(skb, CAP_NET_ADMIN))
510 RCV_SKB_FAIL(-EPERM); 510 RCV_SKB_FAIL(-EPERM);
511 511
512 write_lock_bh(&queue_lock); 512 write_lock_bh(&queue_lock);
513 513
514 if (peer_pid) { 514 if (peer_pid) {
515 if (peer_pid != pid) { 515 if (peer_pid != pid) {
516 write_unlock_bh(&queue_lock); 516 write_unlock_bh(&queue_lock);
@@ -520,17 +520,17 @@ ipq_rcv_skb(struct sk_buff *skb)
520 net_enable_timestamp(); 520 net_enable_timestamp();
521 peer_pid = pid; 521 peer_pid = pid;
522 } 522 }
523 523
524 write_unlock_bh(&queue_lock); 524 write_unlock_bh(&queue_lock);
525 525
526 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 526 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
527 nlmsglen - NLMSG_LENGTH(0)); 527 nlmsglen - NLMSG_LENGTH(0));
528 if (status < 0) 528 if (status < 0)
529 RCV_SKB_FAIL(status); 529 RCV_SKB_FAIL(status);
530 530
531 if (flags & NLM_F_ACK) 531 if (flags & NLM_F_ACK)
532 netlink_ack(skb, nlh, 0); 532 netlink_ack(skb, nlh, 0);
533 return; 533 return;
534} 534}
535 535
536static void 536static void
@@ -540,19 +540,19 @@ ipq_rcv_sk(struct sock *sk, int len)
540 unsigned int qlen; 540 unsigned int qlen;
541 541
542 mutex_lock(&ipqnl_mutex); 542 mutex_lock(&ipqnl_mutex);
543 543
544 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { 544 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
545 skb = skb_dequeue(&sk->sk_receive_queue); 545 skb = skb_dequeue(&sk->sk_receive_queue);
546 ipq_rcv_skb(skb); 546 ipq_rcv_skb(skb);
547 kfree_skb(skb); 547 kfree_skb(skb);
548 } 548 }
549 549
550 mutex_unlock(&ipqnl_mutex); 550 mutex_unlock(&ipqnl_mutex);
551} 551}
552 552
553static int 553static int
554ipq_rcv_dev_event(struct notifier_block *this, 554ipq_rcv_dev_event(struct notifier_block *this,
555 unsigned long event, void *ptr) 555 unsigned long event, void *ptr)
556{ 556{
557 struct net_device *dev = ptr; 557 struct net_device *dev = ptr;
558 558
@@ -568,7 +568,7 @@ static struct notifier_block ipq_dev_notifier = {
568 568
569static int 569static int
570ipq_rcv_nl_event(struct notifier_block *this, 570ipq_rcv_nl_event(struct notifier_block *this,
571 unsigned long event, void *ptr) 571 unsigned long event, void *ptr)
572{ 572{
573 struct netlink_notify *n = ptr; 573 struct netlink_notify *n = ptr;
574 574
@@ -597,7 +597,7 @@ static ctl_table ipq_table[] = {
597 .mode = 0644, 597 .mode = 0644,
598 .proc_handler = proc_dointvec 598 .proc_handler = proc_dointvec
599 }, 599 },
600 { .ctl_name = 0 } 600 { .ctl_name = 0 }
601}; 601};
602 602
603static ctl_table ipq_dir_table[] = { 603static ctl_table ipq_dir_table[] = {
@@ -627,25 +627,25 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
627 int len; 627 int len;
628 628
629 read_lock_bh(&queue_lock); 629 read_lock_bh(&queue_lock);
630 630
631 len = sprintf(buffer, 631 len = sprintf(buffer,
632 "Peer PID : %d\n" 632 "Peer PID : %d\n"
633 "Copy mode : %hu\n" 633 "Copy mode : %hu\n"
634 "Copy range : %u\n" 634 "Copy range : %u\n"
635 "Queue length : %u\n" 635 "Queue length : %u\n"
636 "Queue max. length : %u\n" 636 "Queue max. length : %u\n"
637 "Queue dropped : %u\n" 637 "Queue dropped : %u\n"
638 "Netfilter dropped : %u\n", 638 "Netfilter dropped : %u\n",
639 peer_pid, 639 peer_pid,
640 copy_mode, 640 copy_mode,
641 copy_range, 641 copy_range,
642 queue_total, 642 queue_total,
643 queue_maxlen, 643 queue_maxlen,
644 queue_dropped, 644 queue_dropped,
645 queue_user_dropped); 645 queue_user_dropped);
646 646
647 read_unlock_bh(&queue_lock); 647 read_unlock_bh(&queue_lock);
648 648
649 *start = buffer + offset; 649 *start = buffer + offset;
650 len -= offset; 650 len -= offset;
651 if (len > length) 651 if (len > length)
@@ -665,10 +665,10 @@ static int __init ip6_queue_init(void)
665{ 665{
666 int status = -ENOMEM; 666 int status = -ENOMEM;
667 struct proc_dir_entry *proc; 667 struct proc_dir_entry *proc;
668 668
669 netlink_register_notifier(&ipq_nl_notifier); 669 netlink_register_notifier(&ipq_nl_notifier);
670 ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk, 670 ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk,
671 THIS_MODULE); 671 THIS_MODULE);
672 if (ipqnl == NULL) { 672 if (ipqnl == NULL) {
673 printk(KERN_ERR "ip6_queue: failed to create netlink socket\n"); 673 printk(KERN_ERR "ip6_queue: failed to create netlink socket\n");
674 goto cleanup_netlink_notifier; 674 goto cleanup_netlink_notifier;
@@ -681,10 +681,10 @@ static int __init ip6_queue_init(void)
681 printk(KERN_ERR "ip6_queue: failed to create proc entry\n"); 681 printk(KERN_ERR "ip6_queue: failed to create proc entry\n");
682 goto cleanup_ipqnl; 682 goto cleanup_ipqnl;
683 } 683 }
684 684
685 register_netdevice_notifier(&ipq_dev_notifier); 685 register_netdevice_notifier(&ipq_dev_notifier);
686 ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0); 686 ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0);
687 687
688 status = nf_register_queue_handler(PF_INET6, &nfqh); 688 status = nf_register_queue_handler(PF_INET6, &nfqh);
689 if (status < 0) { 689 if (status < 0) {
690 printk(KERN_ERR "ip6_queue: failed to register queue handler\n"); 690 printk(KERN_ERR "ip6_queue: failed to register queue handler\n");
@@ -696,12 +696,12 @@ cleanup_sysctl:
696 unregister_sysctl_table(ipq_sysctl_header); 696 unregister_sysctl_table(ipq_sysctl_header);
697 unregister_netdevice_notifier(&ipq_dev_notifier); 697 unregister_netdevice_notifier(&ipq_dev_notifier);
698 proc_net_remove(IPQ_PROC_FS_NAME); 698 proc_net_remove(IPQ_PROC_FS_NAME);
699 699
700cleanup_ipqnl: 700cleanup_ipqnl:
701 sock_release(ipqnl->sk_socket); 701 sock_release(ipqnl->sk_socket);
702 mutex_lock(&ipqnl_mutex); 702 mutex_lock(&ipqnl_mutex);
703 mutex_unlock(&ipqnl_mutex); 703 mutex_unlock(&ipqnl_mutex);
704 704
705cleanup_netlink_notifier: 705cleanup_netlink_notifier:
706 netlink_unregister_notifier(&ipq_nl_notifier); 706 netlink_unregister_notifier(&ipq_nl_notifier);
707 return status; 707 return status;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 7083e1cfb2f5..7c512e13f956 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -92,16 +92,16 @@ do { \
92#endif 92#endif
93 93
94/* Check for an extension */ 94/* Check for an extension */
95int 95int
96ip6t_ext_hdr(u8 nexthdr) 96ip6t_ext_hdr(u8 nexthdr)
97{ 97{
98 return ( (nexthdr == IPPROTO_HOPOPTS) || 98 return ( (nexthdr == IPPROTO_HOPOPTS) ||
99 (nexthdr == IPPROTO_ROUTING) || 99 (nexthdr == IPPROTO_ROUTING) ||
100 (nexthdr == IPPROTO_FRAGMENT) || 100 (nexthdr == IPPROTO_FRAGMENT) ||
101 (nexthdr == IPPROTO_ESP) || 101 (nexthdr == IPPROTO_ESP) ||
102 (nexthdr == IPPROTO_AH) || 102 (nexthdr == IPPROTO_AH) ||
103 (nexthdr == IPPROTO_NONE) || 103 (nexthdr == IPPROTO_NONE) ||
104 (nexthdr == IPPROTO_DSTOPTS) ); 104 (nexthdr == IPPROTO_DSTOPTS) );
105} 105}
106 106
107/* Returns whether matches rule or not. */ 107/* Returns whether matches rule or not. */
@@ -120,9 +120,9 @@ ip6_packet_match(const struct sk_buff *skb,
120#define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg)) 120#define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
121 121
122 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, 122 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
123 &ip6info->src), IP6T_INV_SRCIP) 123 &ip6info->src), IP6T_INV_SRCIP)
124 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, 124 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
125 &ip6info->dst), IP6T_INV_DSTIP)) { 125 &ip6info->dst), IP6T_INV_DSTIP)) {
126 dprintf("Source or dest mismatch.\n"); 126 dprintf("Source or dest mismatch.\n");
127/* 127/*
128 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, 128 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
@@ -177,7 +177,7 @@ ip6_packet_match(const struct sk_buff *skb,
177 *fragoff = _frag_off; 177 *fragoff = _frag_off;
178 178
179 dprintf("Packet protocol %hi ?= %s%hi.\n", 179 dprintf("Packet protocol %hi ?= %s%hi.\n",
180 protohdr, 180 protohdr,
181 ip6info->invflags & IP6T_INV_PROTO ? "!":"", 181 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
182 ip6info->proto); 182 ip6info->proto);
183 183
@@ -197,7 +197,7 @@ ip6_packet_match(const struct sk_buff *skb,
197} 197}
198 198
199/* should be ip6 safe */ 199/* should be ip6 safe */
200static inline int 200static inline int
201ip6_checkentry(const struct ip6t_ip6 *ipv6) 201ip6_checkentry(const struct ip6t_ip6 *ipv6)
202{ 202{
203 if (ipv6->flags & ~IP6T_F_MASK) { 203 if (ipv6->flags & ~IP6T_F_MASK) {
@@ -337,7 +337,7 @@ ip6t_do_table(struct sk_buff **pskb,
337 e = get_entry(table_base, v); 337 e = get_entry(table_base, v);
338 } else { 338 } else {
339 /* Targets which reenter must return 339 /* Targets which reenter must return
340 abs. verdicts */ 340 abs. verdicts */
341#ifdef CONFIG_NETFILTER_DEBUG 341#ifdef CONFIG_NETFILTER_DEBUG
342 ((struct ip6t_entry *)table_base)->comefrom 342 ((struct ip6t_entry *)table_base)->comefrom
343 = 0xeeeeeeec; 343 = 0xeeeeeeec;
@@ -534,10 +534,10 @@ check_match(struct ip6t_entry_match *m,
534 int ret; 534 int ret;
535 535
536 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name, 536 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
537 m->u.user.revision), 537 m->u.user.revision),
538 "ip6t_%s", m->u.user.name); 538 "ip6t_%s", m->u.user.name);
539 if (IS_ERR(match) || !match) { 539 if (IS_ERR(match) || !match) {
540 duprintf("check_match: `%s' not found\n", m->u.user.name); 540 duprintf("check_match: `%s' not found\n", m->u.user.name);
541 return match ? PTR_ERR(match) : -ENOENT; 541 return match ? PTR_ERR(match) : -ENOENT;
542 } 542 }
543 m->u.kernel.match = match; 543 m->u.kernel.match = match;
@@ -661,7 +661,7 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
661 } 661 }
662 662
663 /* FIXME: underflows must be unconditional, standard verdicts 663 /* FIXME: underflows must be unconditional, standard verdicts
664 < 0 (not IP6T_RETURN). --RR */ 664 < 0 (not IP6T_RETURN). --RR */
665 665
666 /* Clear counters and comefrom */ 666 /* Clear counters and comefrom */
667 e->counters = ((struct xt_counters) { 0, 0 }); 667 e->counters = ((struct xt_counters) { 0, 0 });
@@ -1003,8 +1003,8 @@ do_replace(void __user *user, unsigned int len)
1003 /* Update module usage count based on number of rules */ 1003 /* Update module usage count based on number of rules */
1004 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 1004 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1005 oldinfo->number, oldinfo->initial_entries, newinfo->number); 1005 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1006 if ((oldinfo->number > oldinfo->initial_entries) || 1006 if ((oldinfo->number > oldinfo->initial_entries) ||
1007 (newinfo->number <= oldinfo->initial_entries)) 1007 (newinfo->number <= oldinfo->initial_entries))
1008 module_put(t->me); 1008 module_put(t->me);
1009 if ((oldinfo->number > oldinfo->initial_entries) && 1009 if ((oldinfo->number > oldinfo->initial_entries) &&
1010 (newinfo->number <= oldinfo->initial_entries)) 1010 (newinfo->number <= oldinfo->initial_entries))
@@ -1492,9 +1492,9 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
1492 } 1492 }
1493 hdrlen = 8; 1493 hdrlen = 8;
1494 } else if (nexthdr == NEXTHDR_AUTH) 1494 } else if (nexthdr == NEXTHDR_AUTH)
1495 hdrlen = (hp->hdrlen + 2) << 2; 1495 hdrlen = (hp->hdrlen + 2) << 2;
1496 else 1496 else
1497 hdrlen = ipv6_optlen(hp); 1497 hdrlen = ipv6_optlen(hp);
1498 1498
1499 nexthdr = hp->nexthdr; 1499 nexthdr = hp->nexthdr;
1500 len -= hdrlen; 1500 len -= hdrlen;
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c
index 04e500172fb4..ccbab66277e3 100644
--- a/net/ipv6/netfilter/ip6t_HL.c
+++ b/net/ipv6/netfilter/ip6t_HL.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Hop Limit modification target for ip6tables 2 * Hop Limit modification target for ip6tables
3 * Maciej Soltysiak <solt@dns.toxicfilms.tv> 3 * Maciej Soltysiak <solt@dns.toxicfilms.tv>
4 * Based on HW's TTL module 4 * Based on HW's TTL module
@@ -18,7 +18,7 @@ MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
18MODULE_DESCRIPTION("IP6 tables Hop Limit modification module"); 18MODULE_DESCRIPTION("IP6 tables Hop Limit modification module");
19MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
20 20
21static unsigned int ip6t_hl_target(struct sk_buff **pskb, 21static unsigned int ip6t_hl_target(struct sk_buff **pskb,
22 const struct net_device *in, 22 const struct net_device *in,
23 const struct net_device *out, 23 const struct net_device *out,
24 unsigned int hooknum, 24 unsigned int hooknum,
@@ -67,7 +67,7 @@ static int ip6t_hl_checkentry(const char *tablename,
67 struct ip6t_HL_info *info = targinfo; 67 struct ip6t_HL_info *info = targinfo;
68 68
69 if (info->mode > IP6T_HL_MAXMODE) { 69 if (info->mode > IP6T_HL_MAXMODE) {
70 printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n", 70 printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n",
71 info->mode); 71 info->mode);
72 return 0; 72 return 0;
73 } 73 }
@@ -80,12 +80,12 @@ static int ip6t_hl_checkentry(const char *tablename,
80} 80}
81 81
82static struct xt_target ip6t_HL = { 82static struct xt_target ip6t_HL = {
83 .name = "HL", 83 .name = "HL",
84 .family = AF_INET6, 84 .family = AF_INET6,
85 .target = ip6t_hl_target, 85 .target = ip6t_hl_target,
86 .targetsize = sizeof(struct ip6t_HL_info), 86 .targetsize = sizeof(struct ip6t_HL_info),
87 .table = "mangle", 87 .table = "mangle",
88 .checkentry = ip6t_hl_checkentry, 88 .checkentry = ip6t_hl_checkentry,
89 .me = THIS_MODULE 89 .me = THIS_MODULE
90}; 90};
91 91
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 5587a77b884c..dc9ec9305778 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -145,7 +145,7 @@ static void dump_packet(const struct nf_loginfo *info,
145 &_ahdr); 145 &_ahdr);
146 if (ah == NULL) { 146 if (ah == NULL) {
147 /* 147 /*
148 * Max length: 26 "INCOMPLETE [65535 148 * Max length: 26 "INCOMPLETE [65535
149 * bytes] )" 149 * bytes] )"
150 */ 150 */
151 printk("INCOMPLETE [%u bytes] )", 151 printk("INCOMPLETE [%u bytes] )",
@@ -387,7 +387,7 @@ ip6t_log_packet(unsigned int pf,
387 loginfo = &default_loginfo; 387 loginfo = &default_loginfo;
388 388
389 spin_lock_bh(&log_lock); 389 spin_lock_bh(&log_lock);
390 printk("<%d>%sIN=%s OUT=%s ", loginfo->u.log.level, 390 printk("<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
391 prefix, 391 prefix,
392 in ? in->name : "", 392 in ? in->name : "",
393 out ? out->name : ""); 393 out ? out->name : "");
@@ -442,7 +442,7 @@ ip6t_log_target(struct sk_buff **pskb,
442 li.u.log.logflags = loginfo->logflags; 442 li.u.log.logflags = loginfo->logflags;
443 443
444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
445 loginfo->prefix); 445 loginfo->prefix);
446 return XT_CONTINUE; 446 return XT_CONTINUE;
447} 447}
448 448
@@ -470,9 +470,9 @@ static int ip6t_log_checkentry(const char *tablename,
470static struct xt_target ip6t_log_reg = { 470static struct xt_target ip6t_log_reg = {
471 .name = "LOG", 471 .name = "LOG",
472 .family = AF_INET6, 472 .family = AF_INET6,
473 .target = ip6t_log_target, 473 .target = ip6t_log_target,
474 .targetsize = sizeof(struct ip6t_log_info), 474 .targetsize = sizeof(struct ip6t_log_info),
475 .checkentry = ip6t_log_checkentry, 475 .checkentry = ip6t_log_checkentry,
476 .me = THIS_MODULE, 476 .me = THIS_MODULE,
477}; 477};
478 478
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 278349c18793..6abee94c929f 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -189,23 +189,23 @@ static unsigned int reject6_target(struct sk_buff **pskb,
189 /* WARNING: This code causes reentry within ip6tables. 189 /* WARNING: This code causes reentry within ip6tables.
190 This means that the ip6tables jump stack is now crap. We 190 This means that the ip6tables jump stack is now crap. We
191 must return an absolute verdict. --RR */ 191 must return an absolute verdict. --RR */
192 switch (reject->with) { 192 switch (reject->with) {
193 case IP6T_ICMP6_NO_ROUTE: 193 case IP6T_ICMP6_NO_ROUTE:
194 send_unreach(*pskb, ICMPV6_NOROUTE, hooknum); 194 send_unreach(*pskb, ICMPV6_NOROUTE, hooknum);
195 break; 195 break;
196 case IP6T_ICMP6_ADM_PROHIBITED: 196 case IP6T_ICMP6_ADM_PROHIBITED:
197 send_unreach(*pskb, ICMPV6_ADM_PROHIBITED, hooknum); 197 send_unreach(*pskb, ICMPV6_ADM_PROHIBITED, hooknum);
198 break; 198 break;
199 case IP6T_ICMP6_NOT_NEIGHBOUR: 199 case IP6T_ICMP6_NOT_NEIGHBOUR:
200 send_unreach(*pskb, ICMPV6_NOT_NEIGHBOUR, hooknum); 200 send_unreach(*pskb, ICMPV6_NOT_NEIGHBOUR, hooknum);
201 break; 201 break;
202 case IP6T_ICMP6_ADDR_UNREACH: 202 case IP6T_ICMP6_ADDR_UNREACH:
203 send_unreach(*pskb, ICMPV6_ADDR_UNREACH, hooknum); 203 send_unreach(*pskb, ICMPV6_ADDR_UNREACH, hooknum);
204 break; 204 break;
205 case IP6T_ICMP6_PORT_UNREACH: 205 case IP6T_ICMP6_PORT_UNREACH:
206 send_unreach(*pskb, ICMPV6_PORT_UNREACH, hooknum); 206 send_unreach(*pskb, ICMPV6_PORT_UNREACH, hooknum);
207 break; 207 break;
208 case IP6T_ICMP6_ECHOREPLY: 208 case IP6T_ICMP6_ECHOREPLY:
209 /* Do nothing */ 209 /* Do nothing */
210 break; 210 break;
211 case IP6T_TCP_RESET: 211 case IP6T_TCP_RESET:
@@ -226,7 +226,7 @@ static int check(const char *tablename,
226 void *targinfo, 226 void *targinfo,
227 unsigned int hook_mask) 227 unsigned int hook_mask)
228{ 228{
229 const struct ip6t_reject_info *rejinfo = targinfo; 229 const struct ip6t_reject_info *rejinfo = targinfo;
230 const struct ip6t_entry *e = entry; 230 const struct ip6t_entry *e = entry;
231 231
232 if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { 232 if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 456c76adcbf6..d3c154371b41 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -78,13 +78,13 @@ match(const struct sk_buff *skb,
78 78
79 DEBUGP("IPv6 AH spi %02X ", 79 DEBUGP("IPv6 AH spi %02X ",
80 (spi_match(ahinfo->spis[0], ahinfo->spis[1], 80 (spi_match(ahinfo->spis[0], ahinfo->spis[1],
81 ntohl(ah->spi), 81 ntohl(ah->spi),
82 !!(ahinfo->invflags & IP6T_AH_INV_SPI)))); 82 !!(ahinfo->invflags & IP6T_AH_INV_SPI))));
83 DEBUGP("len %02X %04X %02X ", 83 DEBUGP("len %02X %04X %02X ",
84 ahinfo->hdrlen, hdrlen, 84 ahinfo->hdrlen, hdrlen,
85 (!ahinfo->hdrlen || 85 (!ahinfo->hdrlen ||
86 (ahinfo->hdrlen == hdrlen) ^ 86 (ahinfo->hdrlen == hdrlen) ^
87 !!(ahinfo->invflags & IP6T_AH_INV_LEN))); 87 !!(ahinfo->invflags & IP6T_AH_INV_LEN)));
88 DEBUGP("res %02X %04X %02X\n", 88 DEBUGP("res %02X %04X %02X\n",
89 ahinfo->hdrres, ah->reserved, 89 ahinfo->hdrres, ah->reserved,
90 !(ahinfo->hdrres && ah->reserved)); 90 !(ahinfo->hdrres && ah->reserved));
@@ -92,12 +92,12 @@ match(const struct sk_buff *skb,
92 return (ah != NULL) 92 return (ah != NULL)
93 && 93 &&
94 (spi_match(ahinfo->spis[0], ahinfo->spis[1], 94 (spi_match(ahinfo->spis[0], ahinfo->spis[1],
95 ntohl(ah->spi), 95 ntohl(ah->spi),
96 !!(ahinfo->invflags & IP6T_AH_INV_SPI))) 96 !!(ahinfo->invflags & IP6T_AH_INV_SPI)))
97 && 97 &&
98 (!ahinfo->hdrlen || 98 (!ahinfo->hdrlen ||
99 (ahinfo->hdrlen == hdrlen) ^ 99 (ahinfo->hdrlen == hdrlen) ^
100 !!(ahinfo->invflags & IP6T_AH_INV_LEN)) 100 !!(ahinfo->invflags & IP6T_AH_INV_LEN))
101 && 101 &&
102 !(ahinfo->hdrres && ah->reserved); 102 !(ahinfo->hdrres && ah->reserved);
103} 103}
@@ -105,10 +105,10 @@ match(const struct sk_buff *skb,
105/* Called when user tries to insert an entry of this type. */ 105/* Called when user tries to insert an entry of this type. */
106static int 106static int
107checkentry(const char *tablename, 107checkentry(const char *tablename,
108 const void *entry, 108 const void *entry,
109 const struct xt_match *match, 109 const struct xt_match *match,
110 void *matchinfo, 110 void *matchinfo,
111 unsigned int hook_mask) 111 unsigned int hook_mask)
112{ 112{
113 const struct ip6t_ah *ahinfo = matchinfo; 113 const struct ip6t_ah *ahinfo = matchinfo;
114 114
diff --git a/net/ipv6/netfilter/ip6t_hl.c b/net/ipv6/netfilter/ip6t_hl.c
index 601cc1211c62..37c8a4d4ed78 100644
--- a/net/ipv6/netfilter/ip6t_hl.c
+++ b/net/ipv6/netfilter/ip6t_hl.c
@@ -41,7 +41,7 @@ static int match(const struct sk_buff *skb,
41 return (ip6h->hop_limit > info->hop_limit); 41 return (ip6h->hop_limit > info->hop_limit);
42 break; 42 break;
43 default: 43 default:
44 printk(KERN_WARNING "ip6t_hl: unknown mode %d\n", 44 printk(KERN_WARNING "ip6t_hl: unknown mode %d\n",
45 info->mode); 45 info->mode);
46 return 0; 46 return 0;
47 } 47 }
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 26ac084adefc..700a11d25deb 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -1,7 +1,7 @@
1/* ipv6header match - matches IPv6 packets based 1/* ipv6header match - matches IPv6 packets based
2 on whether they contain certain headers */ 2 on whether they contain certain headers */
3 3
4/* Original idea: Brad Chapman 4/* Original idea: Brad Chapman
5 * Rewritten by: Andras Kis-Szabo <kisza@sch.bme.hu> */ 5 * Rewritten by: Andras Kis-Szabo <kisza@sch.bme.hu> */
6 6
7/* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> 7/* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu>
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 5f5aa0e51478..0c468d35a937 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -50,7 +50,7 @@ static struct
50 0, NULL, { } }, 50 0, NULL, { } },
51 { 51 {
52 /* PRE_ROUTING */ 52 /* PRE_ROUTING */
53 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 53 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
54 0, 54 0,
55 sizeof(struct ip6t_entry), 55 sizeof(struct ip6t_entry),
56 sizeof(struct ip6t_standard), 56 sizeof(struct ip6t_standard),
@@ -58,7 +58,7 @@ static struct
58 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, 58 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
59 -NF_ACCEPT - 1 } }, 59 -NF_ACCEPT - 1 } },
60 /* LOCAL_IN */ 60 /* LOCAL_IN */
61 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 61 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
62 0, 62 0,
63 sizeof(struct ip6t_entry), 63 sizeof(struct ip6t_entry),
64 sizeof(struct ip6t_standard), 64 sizeof(struct ip6t_standard),
@@ -66,7 +66,7 @@ static struct
66 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, 66 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
67 -NF_ACCEPT - 1 } }, 67 -NF_ACCEPT - 1 } },
68 /* FORWARD */ 68 /* FORWARD */
69 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 69 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
70 0, 70 0,
71 sizeof(struct ip6t_entry), 71 sizeof(struct ip6t_entry),
72 sizeof(struct ip6t_standard), 72 sizeof(struct ip6t_standard),
@@ -74,7 +74,7 @@ static struct
74 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, 74 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
75 -NF_ACCEPT - 1 } }, 75 -NF_ACCEPT - 1 } },
76 /* LOCAL_OUT */ 76 /* LOCAL_OUT */
77 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 77 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
78 0, 78 0,
79 sizeof(struct ip6t_entry), 79 sizeof(struct ip6t_entry),
80 sizeof(struct ip6t_standard), 80 sizeof(struct ip6t_standard),
@@ -156,7 +156,7 @@ ip6t_local_hook(unsigned int hook,
156 156
157 ret = ip6t_do_table(pskb, hook, in, out, &packet_mangler); 157 ret = ip6t_do_table(pskb, hook, in, out, &packet_mangler);
158 158
159 if (ret != NF_DROP && ret != NF_STOLEN 159 if (ret != NF_DROP && ret != NF_STOLEN
160 && (memcmp(&(*pskb)->nh.ipv6h->saddr, &saddr, sizeof(saddr)) 160 && (memcmp(&(*pskb)->nh.ipv6h->saddr, &saddr, sizeof(saddr))
161 || memcmp(&(*pskb)->nh.ipv6h->daddr, &daddr, sizeof(daddr)) 161 || memcmp(&(*pskb)->nh.ipv6h->daddr, &daddr, sizeof(daddr))
162 || (*pskb)->mark != mark 162 || (*pskb)->mark != mark
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 277bf34638b4..a3eb5b8ce18d 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -87,10 +87,10 @@ static struct
87 } 87 }
88}; 88};
89 89
90static struct xt_table packet_raw = { 90static struct xt_table packet_raw = {
91 .name = "raw", 91 .name = "raw",
92 .valid_hooks = RAW_VALID_HOOKS, 92 .valid_hooks = RAW_VALID_HOOKS,
93 .lock = RW_LOCK_UNLOCKED, 93 .lock = RW_LOCK_UNLOCKED,
94 .me = THIS_MODULE, 94 .me = THIS_MODULE,
95 .af = AF_INET6, 95 .af = AF_INET6,
96}; 96};
@@ -106,17 +106,17 @@ ip6t_hook(unsigned int hook,
106 return ip6t_do_table(pskb, hook, in, out, &packet_raw); 106 return ip6t_do_table(pskb, hook, in, out, &packet_raw);
107} 107}
108 108
109static struct nf_hook_ops ip6t_ops[] = { 109static struct nf_hook_ops ip6t_ops[] = {
110 { 110 {
111 .hook = ip6t_hook, 111 .hook = ip6t_hook,
112 .pf = PF_INET6, 112 .pf = PF_INET6,
113 .hooknum = NF_IP6_PRE_ROUTING, 113 .hooknum = NF_IP6_PRE_ROUTING,
114 .priority = NF_IP6_PRI_FIRST, 114 .priority = NF_IP6_PRI_FIRST,
115 .owner = THIS_MODULE, 115 .owner = THIS_MODULE,
116 }, 116 },
117 { 117 {
118 .hook = ip6t_hook, 118 .hook = ip6t_hook,
119 .pf = PF_INET6, 119 .pf = PF_INET6,
120 .hooknum = NF_IP6_LOCAL_OUT, 120 .hooknum = NF_IP6_LOCAL_OUT,
121 .priority = NF_IP6_PRI_FIRST, 121 .priority = NF_IP6_PRI_FIRST,
122 .owner = THIS_MODULE, 122 .owner = THIS_MODULE,
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index a20615ffccff..a2353edf4ebc 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -349,7 +349,7 @@ static ctl_table nf_ct_ipv6_sysctl_table[] = {
349 .mode = 0644, 349 .mode = 0644,
350 .proc_handler = &proc_dointvec, 350 .proc_handler = &proc_dointvec,
351 }, 351 },
352 { .ctl_name = 0 } 352 { .ctl_name = 0 }
353}; 353};
354#endif 354#endif
355 355
@@ -386,7 +386,7 @@ static int ipv6_nfattr_to_tuple(struct nfattr *tb[],
386 if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip)) 386 if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip))
387 return -EINVAL; 387 return -EINVAL;
388 388
389 memcpy(&t->src.u3.ip6, NFA_DATA(tb[CTA_IP_V6_SRC-1]), 389 memcpy(&t->src.u3.ip6, NFA_DATA(tb[CTA_IP_V6_SRC-1]),
390 sizeof(u_int32_t) * 4); 390 sizeof(u_int32_t) * 4);
391 memcpy(&t->dst.u3.ip6, NFA_DATA(tb[CTA_IP_V6_DST-1]), 391 memcpy(&t->dst.u3.ip6, NFA_DATA(tb[CTA_IP_V6_DST-1]),
392 sizeof(u_int32_t) * 4); 392 sizeof(u_int32_t) * 4);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 3905cacc69af..b08622c992b2 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -104,9 +104,9 @@ static int icmpv6_packet(struct nf_conn *ct,
104 unsigned int hooknum) 104 unsigned int hooknum)
105{ 105{
106 /* Try to delete connection immediately after all replies: 106 /* Try to delete connection immediately after all replies:
107 won't actually vanish as we still have skb, and del_timer 107 won't actually vanish as we still have skb, and del_timer
108 means this will only run once even if count hits zero twice 108 means this will only run once even if count hits zero twice
109 (theoretically possible with SMP) */ 109 (theoretically possible with SMP) */
110 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 110 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
111 if (atomic_dec_and_test(&ct->proto.icmp.count) 111 if (atomic_dec_and_test(&ct->proto.icmp.count)
112 && del_timer(&ct->timeout)) 112 && del_timer(&ct->timeout))
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index d9c15402ba66..15ab1e3e8b56 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -311,7 +311,7 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
311 write_lock(&nf_ct_frag6_lock); 311 write_lock(&nf_ct_frag6_lock);
312#ifdef CONFIG_SMP 312#ifdef CONFIG_SMP
313 hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) { 313 hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) {
314 if (fq->id == fq_in->id && 314 if (fq->id == fq_in->id &&
315 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && 315 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
316 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { 316 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
317 atomic_inc(&fq->refcnt); 317 atomic_inc(&fq->refcnt);
@@ -374,7 +374,7 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
374 374
375 read_lock(&nf_ct_frag6_lock); 375 read_lock(&nf_ct_frag6_lock);
376 hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) { 376 hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) {
377 if (fq->id == id && 377 if (fq->id == id &&
378 ipv6_addr_equal(src, &fq->saddr) && 378 ipv6_addr_equal(src, &fq->saddr) &&
379 ipv6_addr_equal(dst, &fq->daddr)) { 379 ipv6_addr_equal(dst, &fq->daddr)) {
380 atomic_inc(&fq->refcnt); 380 atomic_inc(&fq->refcnt);
@@ -388,7 +388,7 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
388} 388}
389 389
390 390
391static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, 391static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
392 struct frag_hdr *fhdr, int nhoff) 392 struct frag_hdr *fhdr, int nhoff)
393{ 393{
394 struct sk_buff *prev, *next; 394 struct sk_buff *prev, *next;
@@ -405,12 +405,12 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
405 405
406 if ((unsigned int)end > IPV6_MAXPLEN) { 406 if ((unsigned int)end > IPV6_MAXPLEN) {
407 DEBUGP("offset is too large.\n"); 407 DEBUGP("offset is too large.\n");
408 return -1; 408 return -1;
409 } 409 }
410 410
411 if (skb->ip_summed == CHECKSUM_COMPLETE) 411 if (skb->ip_summed == CHECKSUM_COMPLETE)
412 skb->csum = csum_sub(skb->csum, 412 skb->csum = csum_sub(skb->csum,
413 csum_partial(skb->nh.raw, 413 csum_partial(skb->nh.raw,
414 (u8*)(fhdr + 1) - skb->nh.raw, 414 (u8*)(fhdr + 1) - skb->nh.raw,
415 0)); 415 0));
416 416
@@ -625,7 +625,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
625 /* We have to remove fragment header from datagram and to relocate 625 /* We have to remove fragment header from datagram and to relocate
626 * header in order to calculate ICV correctly. */ 626 * header in order to calculate ICV correctly. */
627 head->nh.raw[fq->nhoffset] = head->h.raw[0]; 627 head->nh.raw[fq->nhoffset] = head->h.raw[0];
628 memmove(head->head + sizeof(struct frag_hdr), head->head, 628 memmove(head->head + sizeof(struct frag_hdr), head->head,
629 (head->data - head->head) - sizeof(struct frag_hdr)); 629 (head->data - head->head) - sizeof(struct frag_hdr));
630 head->mac.raw += sizeof(struct frag_hdr); 630 head->mac.raw += sizeof(struct frag_hdr);
631 head->nh.raw += sizeof(struct frag_hdr); 631 head->nh.raw += sizeof(struct frag_hdr);
@@ -701,41 +701,41 @@ out_fail:
701static int 701static int
702find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) 702find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
703{ 703{
704 u8 nexthdr = skb->nh.ipv6h->nexthdr; 704 u8 nexthdr = skb->nh.ipv6h->nexthdr;
705 u8 prev_nhoff = (u8 *)&skb->nh.ipv6h->nexthdr - skb->data; 705 u8 prev_nhoff = (u8 *)&skb->nh.ipv6h->nexthdr - skb->data;
706 int start = (u8 *)(skb->nh.ipv6h+1) - skb->data; 706 int start = (u8 *)(skb->nh.ipv6h+1) - skb->data;
707 int len = skb->len - start; 707 int len = skb->len - start;
708 u8 prevhdr = NEXTHDR_IPV6; 708 u8 prevhdr = NEXTHDR_IPV6;
709 709
710 while (nexthdr != NEXTHDR_FRAGMENT) { 710 while (nexthdr != NEXTHDR_FRAGMENT) {
711 struct ipv6_opt_hdr hdr; 711 struct ipv6_opt_hdr hdr;
712 int hdrlen; 712 int hdrlen;
713 713
714 if (!ipv6_ext_hdr(nexthdr)) { 714 if (!ipv6_ext_hdr(nexthdr)) {
715 return -1; 715 return -1;
716 } 716 }
717 if (len < (int)sizeof(struct ipv6_opt_hdr)) { 717 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
718 DEBUGP("too short\n"); 718 DEBUGP("too short\n");
719 return -1; 719 return -1;
720 } 720 }
721 if (nexthdr == NEXTHDR_NONE) { 721 if (nexthdr == NEXTHDR_NONE) {
722 DEBUGP("next header is none\n"); 722 DEBUGP("next header is none\n");
723 return -1; 723 return -1;
724 } 724 }
725 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) 725 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
726 BUG(); 726 BUG();
727 if (nexthdr == NEXTHDR_AUTH) 727 if (nexthdr == NEXTHDR_AUTH)
728 hdrlen = (hdr.hdrlen+2)<<2; 728 hdrlen = (hdr.hdrlen+2)<<2;
729 else 729 else
730 hdrlen = ipv6_optlen(&hdr); 730 hdrlen = ipv6_optlen(&hdr);
731 731
732 prevhdr = nexthdr; 732 prevhdr = nexthdr;
733 prev_nhoff = start; 733 prev_nhoff = start;
734 734
735 nexthdr = hdr.nexthdr; 735 nexthdr = hdr.nexthdr;
736 len -= hdrlen; 736 len -= hdrlen;
737 start += hdrlen; 737 start += hdrlen;
738 } 738 }
739 739
740 if (len < 0) 740 if (len < 0)
741 return -1; 741 return -1;
@@ -749,7 +749,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
749 749
750struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) 750struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
751{ 751{
752 struct sk_buff *clone; 752 struct sk_buff *clone;
753 struct net_device *dev = skb->dev; 753 struct net_device *dev = skb->dev;
754 struct frag_hdr *fhdr; 754 struct frag_hdr *fhdr;
755 struct nf_ct_frag6_queue *fq; 755 struct nf_ct_frag6_queue *fq;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 35249d8487bb..032ef95c5b00 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -50,7 +50,7 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
50 seq_printf(seq, "UDP6: inuse %d\n", 50 seq_printf(seq, "UDP6: inuse %d\n",
51 fold_prot_inuse(&udpv6_prot)); 51 fold_prot_inuse(&udpv6_prot));
52 seq_printf(seq, "UDPLITE6: inuse %d\n", 52 seq_printf(seq, "UDPLITE6: inuse %d\n",
53 fold_prot_inuse(&udplitev6_prot)); 53 fold_prot_inuse(&udplitev6_prot));
54 seq_printf(seq, "RAW6: inuse %d\n", 54 seq_printf(seq, "RAW6: inuse %d\n",
55 fold_prot_inuse(&rawv6_prot)); 55 fold_prot_inuse(&rawv6_prot));
56 seq_printf(seq, "FRAG6: inuse %d memory %d\n", 56 seq_printf(seq, "FRAG6: inuse %d memory %d\n",
@@ -89,7 +89,7 @@ static struct snmp_mib snmp6_icmp6_list[] = {
89/* icmpv6 mib according to RFC 2466 89/* icmpv6 mib according to RFC 2466
90 90
91 Exceptions: {In|Out}AdminProhibs are removed, because I see 91 Exceptions: {In|Out}AdminProhibs are removed, because I see
92 no good reasons to account them separately 92 no good reasons to account them separately
93 of another dest.unreachs. 93 of another dest.unreachs.
94 OutErrs is zero identically. 94 OutErrs is zero identically.
95 OutEchos too. 95 OutEchos too.
@@ -146,14 +146,14 @@ static struct snmp_mib snmp6_udplite6_list[] = {
146static unsigned long 146static unsigned long
147fold_field(void *mib[], int offt) 147fold_field(void *mib[], int offt)
148{ 148{
149 unsigned long res = 0; 149 unsigned long res = 0;
150 int i; 150 int i;
151 151
152 for_each_possible_cpu(i) { 152 for_each_possible_cpu(i) {
153 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); 153 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
154 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); 154 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
155 } 155 }
156 return res; 156 return res;
157} 157}
158 158
159static inline void 159static inline void
@@ -161,7 +161,7 @@ snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist)
161{ 161{
162 int i; 162 int i;
163 for (i=0; itemlist[i].name; i++) 163 for (i=0; itemlist[i].name; i++)
164 seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, 164 seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
165 fold_field(mib, itemlist[i].entry)); 165 fold_field(mib, itemlist[i].entry));
166} 166}
167 167
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 52c1d58b6ca6..ad0410c99675 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -64,7 +64,7 @@ int inet6_add_protocol(struct inet6_protocol *prot, unsigned char protocol)
64/* 64/*
65 * Remove a protocol from the hash tables. 65 * Remove a protocol from the hash tables.
66 */ 66 */
67 67
68int inet6_del_protocol(struct inet6_protocol *prot, unsigned char protocol) 68int inet6_del_protocol(struct inet6_protocol *prot, unsigned char protocol)
69{ 69{
70 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 70 int ret, hash = protocol & (MAX_INET_PROTOS - 1);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c2d8059e754e..16d4c63ff554 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * RAW sockets for IPv6 2 * RAW sockets for IPv6
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * Adapted from linux/net/ipv4/raw.c 8 * Adapted from linux/net/ipv4/raw.c
9 * 9 *
@@ -11,7 +11,7 @@
11 * 11 *
12 * Fixes: 12 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) 14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
16 * 16 *
17 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
@@ -71,12 +71,12 @@ static void raw_v6_hash(struct sock *sk)
71 write_lock_bh(&raw_v6_lock); 71 write_lock_bh(&raw_v6_lock);
72 sk_add_node(sk, list); 72 sk_add_node(sk, list);
73 sock_prot_inc_use(sk->sk_prot); 73 sock_prot_inc_use(sk->sk_prot);
74 write_unlock_bh(&raw_v6_lock); 74 write_unlock_bh(&raw_v6_lock);
75} 75}
76 76
77static void raw_v6_unhash(struct sock *sk) 77static void raw_v6_unhash(struct sock *sk)
78{ 78{
79 write_lock_bh(&raw_v6_lock); 79 write_lock_bh(&raw_v6_lock);
80 if (sk_del_node_init(sk)) 80 if (sk_del_node_init(sk))
81 sock_prot_dec_use(sk->sk_prot); 81 sock_prot_dec_use(sk->sk_prot);
82 write_unlock_bh(&raw_v6_lock); 82 write_unlock_bh(&raw_v6_lock);
@@ -250,7 +250,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
250 */ 250 */
251 sk->sk_bound_dev_if = addr->sin6_scope_id; 251 sk->sk_bound_dev_if = addr->sin6_scope_id;
252 } 252 }
253 253
254 /* Binding to link-local address requires an interface */ 254 /* Binding to link-local address requires an interface */
255 if (!sk->sk_bound_dev_if) 255 if (!sk->sk_bound_dev_if)
256 goto out; 256 goto out;
@@ -261,7 +261,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
261 goto out; 261 goto out;
262 } 262 }
263 } 263 }
264 264
265 /* ipv4 addr of the socket is invalid. Only the 265 /* ipv4 addr of the socket is invalid. Only the
266 * unspecified and mapped address have a v4 equivalent. 266 * unspecified and mapped address have a v4 equivalent.
267 */ 267 */
@@ -324,7 +324,7 @@ void rawv6_err(struct sock *sk, struct sk_buff *skb,
324 324
325static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) 325static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
326{ 326{
327 if ((raw6_sk(sk)->checksum || sk->sk_filter) && 327 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
328 skb_checksum_complete(skb)) { 328 skb_checksum_complete(skb)) {
329 /* FIXME: increment a raw6 drops counter here */ 329 /* FIXME: increment a raw6 drops counter here */
330 kfree_skb(skb); 330 kfree_skb(skb);
@@ -342,10 +342,10 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
342} 342}
343 343
344/* 344/*
345 * This is next to useless... 345 * This is next to useless...
346 * if we demultiplex in network layer we don't need the extra call 346 * if we demultiplex in network layer we don't need the extra call
347 * just to queue the skb... 347 * just to queue the skb...
348 * maybe we could have the network decide upon a hint if it 348 * maybe we could have the network decide upon a hint if it
349 * should call raw_rcv for demultiplexing 349 * should call raw_rcv for demultiplexing
350 */ 350 */
351int rawv6_rcv(struct sock *sk, struct sk_buff *skb) 351int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
@@ -353,17 +353,17 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
353 struct inet_sock *inet = inet_sk(sk); 353 struct inet_sock *inet = inet_sk(sk);
354 struct raw6_sock *rp = raw6_sk(sk); 354 struct raw6_sock *rp = raw6_sk(sk);
355 355
356 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 356 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
357 kfree_skb(skb); 357 kfree_skb(skb);
358 return NET_RX_DROP; 358 return NET_RX_DROP;
359 } 359 }
360 360
361 if (!rp->checksum) 361 if (!rp->checksum)
362 skb->ip_summed = CHECKSUM_UNNECESSARY; 362 skb->ip_summed = CHECKSUM_UNNECESSARY;
363 363
364 if (skb->ip_summed == CHECKSUM_COMPLETE) { 364 if (skb->ip_summed == CHECKSUM_COMPLETE) {
365 skb_postpull_rcsum(skb, skb->nh.raw, 365 skb_postpull_rcsum(skb, skb->nh.raw,
366 skb->h.raw - skb->nh.raw); 366 skb->h.raw - skb->nh.raw);
367 if (!csum_ipv6_magic(&skb->nh.ipv6h->saddr, 367 if (!csum_ipv6_magic(&skb->nh.ipv6h->saddr,
368 &skb->nh.ipv6h->daddr, 368 &skb->nh.ipv6h->daddr,
369 skb->len, inet->num, skb->csum)) 369 skb->len, inet->num, skb->csum))
@@ -404,8 +404,8 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
404 404
405 if (flags & MSG_OOB) 405 if (flags & MSG_OOB)
406 return -EOPNOTSUPP; 406 return -EOPNOTSUPP;
407 407
408 if (addr_len) 408 if (addr_len)
409 *addr_len=sizeof(*sin6); 409 *addr_len=sizeof(*sin6);
410 410
411 if (flags & MSG_ERRQUEUE) 411 if (flags & MSG_ERRQUEUE)
@@ -416,10 +416,10 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
416 goto out; 416 goto out;
417 417
418 copied = skb->len; 418 copied = skb->len;
419 if (copied > len) { 419 if (copied > len) {
420 copied = len; 420 copied = len;
421 msg->msg_flags |= MSG_TRUNC; 421 msg->msg_flags |= MSG_TRUNC;
422 } 422 }
423 423
424 if (skb->ip_summed==CHECKSUM_UNNECESSARY) { 424 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
425 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 425 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
@@ -549,7 +549,7 @@ out:
549} 549}
550 550
551static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, 551static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
552 struct flowi *fl, struct rt6_info *rt, 552 struct flowi *fl, struct rt6_info *rt,
553 unsigned int flags) 553 unsigned int flags)
554{ 554{
555 struct ipv6_pinfo *np = inet6_sk(sk); 555 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -570,7 +570,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
570 skb = sock_alloc_send_skb(sk, length+hh_len+15, 570 skb = sock_alloc_send_skb(sk, length+hh_len+15,
571 flags&MSG_DONTWAIT, &err); 571 flags&MSG_DONTWAIT, &err);
572 if (skb == NULL) 572 if (skb == NULL)
573 goto error; 573 goto error;
574 skb_reserve(skb, hh_len); 574 skb_reserve(skb, hh_len);
575 575
576 skb->priority = sk->sk_priority; 576 skb->priority = sk->sk_priority;
@@ -600,7 +600,7 @@ error_fault:
600 kfree_skb(skb); 600 kfree_skb(skb);
601error: 601error:
602 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 602 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
603 return err; 603 return err;
604} 604}
605 605
606static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg) 606static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
@@ -694,19 +694,19 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
694 return -EMSGSIZE; 694 return -EMSGSIZE;
695 695
696 /* Mirror BSD error message compatibility */ 696 /* Mirror BSD error message compatibility */
697 if (msg->msg_flags & MSG_OOB) 697 if (msg->msg_flags & MSG_OOB)
698 return -EOPNOTSUPP; 698 return -EOPNOTSUPP;
699 699
700 /* 700 /*
701 * Get and verify the address. 701 * Get and verify the address.
702 */ 702 */
703 memset(&fl, 0, sizeof(fl)); 703 memset(&fl, 0, sizeof(fl));
704 704
705 if (sin6) { 705 if (sin6) {
706 if (addr_len < SIN6_LEN_RFC2133) 706 if (addr_len < SIN6_LEN_RFC2133)
707 return -EINVAL; 707 return -EINVAL;
708 708
709 if (sin6->sin6_family && sin6->sin6_family != AF_INET6) 709 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
710 return(-EAFNOSUPPORT); 710 return(-EAFNOSUPPORT);
711 711
712 /* port is the proto value [0..255] carried in nexthdr */ 712 /* port is the proto value [0..255] carried in nexthdr */
@@ -744,17 +744,17 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
744 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) 744 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
745 fl.oif = sin6->sin6_scope_id; 745 fl.oif = sin6->sin6_scope_id;
746 } else { 746 } else {
747 if (sk->sk_state != TCP_ESTABLISHED) 747 if (sk->sk_state != TCP_ESTABLISHED)
748 return -EDESTADDRREQ; 748 return -EDESTADDRREQ;
749 749
750 proto = inet->num; 750 proto = inet->num;
751 daddr = &np->daddr; 751 daddr = &np->daddr;
752 fl.fl6_flowlabel = np->flow_label; 752 fl.fl6_flowlabel = np->flow_label;
753 } 753 }
754 754
755 if (ipv6_addr_any(daddr)) { 755 if (ipv6_addr_any(daddr)) {
756 /* 756 /*
757 * unspecified destination address 757 * unspecified destination address
758 * treated as error... is this correct ? 758 * treated as error... is this correct ?
759 */ 759 */
760 fl6_sock_release(flowlabel); 760 fl6_sock_release(flowlabel);
@@ -792,7 +792,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
792 err = rawv6_probe_proto_opt(&fl, msg); 792 err = rawv6_probe_proto_opt(&fl, msg);
793 if (err) 793 if (err)
794 goto out; 794 goto out;
795 795
796 ipv6_addr_copy(&fl.fl6_dst, daddr); 796 ipv6_addr_copy(&fl.fl6_dst, daddr);
797 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) 797 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
798 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 798 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
@@ -856,7 +856,7 @@ done:
856 dst_release(dst); 856 dst_release(dst);
857 if (!inet->hdrincl) 857 if (!inet->hdrincl)
858 release_sock(sk); 858 release_sock(sk);
859out: 859out:
860 fl6_sock_release(flowlabel); 860 fl6_sock_release(flowlabel);
861 return err<0?err:len; 861 return err<0?err:len;
862do_confirm: 862do_confirm:
@@ -867,7 +867,7 @@ do_confirm:
867 goto done; 867 goto done;
868} 868}
869 869
870static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, 870static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
871 char __user *optval, int optlen) 871 char __user *optval, int optlen)
872{ 872{
873 switch (optname) { 873 switch (optname) {
@@ -884,7 +884,7 @@ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
884 return 0; 884 return 0;
885} 885}
886 886
887static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, 887static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
888 char __user *optval, int __user *optlen) 888 char __user *optval, int __user *optlen)
889{ 889{
890 int len; 890 int len;
@@ -916,7 +916,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
916 struct raw6_sock *rp = raw6_sk(sk); 916 struct raw6_sock *rp = raw6_sk(sk);
917 int val; 917 int val;
918 918
919 if (get_user(val, (int __user *)optval)) 919 if (get_user(val, (int __user *)optval))
920 return -EFAULT; 920 return -EFAULT;
921 921
922 switch (optname) { 922 switch (optname) {
@@ -1224,7 +1224,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1224 src->s6_addr32[2], src->s6_addr32[3], srcp, 1224 src->s6_addr32[2], src->s6_addr32[3], srcp,
1225 dest->s6_addr32[0], dest->s6_addr32[1], 1225 dest->s6_addr32[0], dest->s6_addr32[1],
1226 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1226 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1227 sp->sk_state, 1227 sp->sk_state,
1228 atomic_read(&sp->sk_wmem_alloc), 1228 atomic_read(&sp->sk_wmem_alloc),
1229 atomic_read(&sp->sk_rmem_alloc), 1229 atomic_read(&sp->sk_rmem_alloc),
1230 0, 0L, 0, 1230 0, 0L, 0,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 6f9a9046510f..7034c54e5010 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * IPv6 fragment reassembly 2 * IPv6 fragment reassembly
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $ 8 * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9 * 9 *
@@ -15,8 +15,8 @@
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17 17
18/* 18/*
19 * Fixes: 19 * Fixes:
20 * Andi Kleen Make it work with multiple hosts. 20 * Andi Kleen Make it work with multiple hosts.
21 * More RFC compliance. 21 * More RFC compliance.
22 * 22 *
@@ -343,7 +343,7 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
343 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); 343 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
344#ifdef CONFIG_SMP 344#ifdef CONFIG_SMP
345 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { 345 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
346 if (fq->id == fq_in->id && 346 if (fq->id == fq_in->id &&
347 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && 347 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
348 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { 348 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
349 atomic_inc(&fq->refcnt); 349 atomic_inc(&fq->refcnt);
@@ -406,7 +406,7 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
406 read_lock(&ip6_frag_lock); 406 read_lock(&ip6_frag_lock);
407 hash = ip6qhashfn(id, src, dst); 407 hash = ip6qhashfn(id, src, dst);
408 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { 408 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
409 if (fq->id == id && 409 if (fq->id == id &&
410 ipv6_addr_equal(src, &fq->saddr) && 410 ipv6_addr_equal(src, &fq->saddr) &&
411 ipv6_addr_equal(dst, &fq->daddr)) { 411 ipv6_addr_equal(dst, &fq->daddr)) {
412 atomic_inc(&fq->refcnt); 412 atomic_inc(&fq->refcnt);
@@ -420,7 +420,7 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
420} 420}
421 421
422 422
423static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 423static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
424 struct frag_hdr *fhdr, int nhoff) 424 struct frag_hdr *fhdr, int nhoff)
425{ 425{
426 struct sk_buff *prev, *next; 426 struct sk_buff *prev, *next;
@@ -436,13 +436,13 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
436 if ((unsigned int)end > IPV6_MAXPLEN) { 436 if ((unsigned int)end > IPV6_MAXPLEN) {
437 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 437 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
438 IPSTATS_MIB_INHDRERRORS); 438 IPSTATS_MIB_INHDRERRORS);
439 icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw); 439 icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
440 return; 440 return;
441 } 441 }
442 442
443 if (skb->ip_summed == CHECKSUM_COMPLETE) 443 if (skb->ip_summed == CHECKSUM_COMPLETE)
444 skb->csum = csum_sub(skb->csum, 444 skb->csum = csum_sub(skb->csum,
445 csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0)); 445 csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
446 446
447 /* Is this the final fragment? */ 447 /* Is this the final fragment? */
448 if (!(fhdr->frag_off & htons(IP6_MF))) { 448 if (!(fhdr->frag_off & htons(IP6_MF))) {
@@ -464,7 +464,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
464 */ 464 */
465 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 465 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
466 IPSTATS_MIB_INHDRERRORS); 466 IPSTATS_MIB_INHDRERRORS);
467 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 467 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
468 offsetof(struct ipv6hdr, payload_len)); 468 offsetof(struct ipv6hdr, payload_len));
469 return; 469 return;
470 } 470 }
@@ -482,7 +482,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
482 /* Point into the IP datagram 'data' part. */ 482 /* Point into the IP datagram 'data' part. */
483 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) 483 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
484 goto err; 484 goto err;
485 485
486 if (pskb_trim_rcsum(skb, end - offset)) 486 if (pskb_trim_rcsum(skb, end - offset))
487 goto err; 487 goto err;
488 488
@@ -640,7 +640,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
640 * header in order to calculate ICV correctly. */ 640 * header in order to calculate ICV correctly. */
641 nhoff = fq->nhoffset; 641 nhoff = fq->nhoffset;
642 head->nh.raw[nhoff] = head->h.raw[0]; 642 head->nh.raw[nhoff] = head->h.raw[0];
643 memmove(head->head + sizeof(struct frag_hdr), head->head, 643 memmove(head->head + sizeof(struct frag_hdr), head->head,
644 (head->data - head->head) - sizeof(struct frag_hdr)); 644 (head->data - head->head) - sizeof(struct frag_hdr));
645 head->mac.raw += sizeof(struct frag_hdr); 645 head->mac.raw += sizeof(struct frag_hdr);
646 head->nh.raw += sizeof(struct frag_hdr); 646 head->nh.raw += sizeof(struct frag_hdr);
@@ -695,7 +695,7 @@ out_fail:
695 695
696static int ipv6_frag_rcv(struct sk_buff **skbp) 696static int ipv6_frag_rcv(struct sk_buff **skbp)
697{ 697{
698 struct sk_buff *skb = *skbp; 698 struct sk_buff *skb = *skbp;
699 struct net_device *dev = skb->dev; 699 struct net_device *dev = skb->dev;
700 struct frag_hdr *fhdr; 700 struct frag_hdr *fhdr;
701 struct frag_queue *fq; 701 struct frag_queue *fq;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 19c906f6efa1..a415ac610e2d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3,7 +3,7 @@
3 * FIB front-end. 3 * FIB front-end.
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $ 8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
9 * 9 *
@@ -201,7 +201,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
201 if (idev != NULL) { 201 if (idev != NULL) {
202 rt->rt6i_idev = NULL; 202 rt->rt6i_idev = NULL;
203 in6_dev_put(idev); 203 in6_dev_put(idev);
204 } 204 }
205} 205}
206 206
207static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 207static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -243,7 +243,7 @@ static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
243 struct rt6_info *sprt; 243 struct rt6_info *sprt;
244 244
245 if (oif) { 245 if (oif) {
246 for (sprt = rt; sprt; sprt = sprt->u.next) { 246 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
247 struct net_device *dev = sprt->rt6i_dev; 247 struct net_device *dev = sprt->rt6i_dev;
248 if (dev->ifindex == oif) 248 if (dev->ifindex == oif)
249 return sprt; 249 return sprt;
@@ -252,7 +252,7 @@ static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
252 sprt->rt6i_idev->dev->ifindex != oif) { 252 sprt->rt6i_idev->dev->ifindex != oif) {
253 if (strict && oif) 253 if (strict && oif)
254 continue; 254 continue;
255 if (local && (!oif || 255 if (local && (!oif ||
256 local->rt6i_idev->dev->ifindex == oif)) 256 local->rt6i_idev->dev->ifindex == oif))
257 continue; 257 continue;
258 } 258 }
@@ -350,7 +350,7 @@ static int rt6_score_route(struct rt6_info *rt, int oif,
350 int strict) 350 int strict)
351{ 351{
352 int m, n; 352 int m, n;
353 353
354 m = rt6_check_dev(rt, oif); 354 m = rt6_check_dev(rt, oif);
355 if (!m && (strict & RT6_LOOKUP_F_IFACE)) 355 if (!m && (strict & RT6_LOOKUP_F_IFACE))
356 return -1; 356 return -1;
@@ -376,7 +376,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
376 376
377 for (rt = rt0, metric = rt0->rt6i_metric; 377 for (rt = rt0, metric = rt0->rt6i_metric;
378 rt && rt->rt6i_metric == metric && (!last || rt != rt0); 378 rt && rt->rt6i_metric == metric && (!last || rt != rt0);
379 rt = rt->u.next) { 379 rt = rt->u.dst.rt6_next) {
380 int m; 380 int m;
381 381
382 if (rt6_check_expired(rt)) 382 if (rt6_check_expired(rt))
@@ -404,9 +404,9 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
404 /* no entries matched; do round-robin */ 404 /* no entries matched; do round-robin */
405 static DEFINE_SPINLOCK(lock); 405 static DEFINE_SPINLOCK(lock);
406 spin_lock(&lock); 406 spin_lock(&lock);
407 *head = rt0->u.next; 407 *head = rt0->u.dst.rt6_next;
408 rt0->u.next = last->u.next; 408 rt0->u.dst.rt6_next = last->u.dst.rt6_next;
409 last->u.next = rt0; 409 last->u.dst.rt6_next = rt0;
410 spin_unlock(&lock); 410 spin_unlock(&lock);
411 } 411 }
412 412
@@ -723,7 +723,7 @@ void ip6_route_input(struct sk_buff *skb)
723 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK, 723 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
724 }, 724 },
725 }, 725 },
726 .mark = skb->mark, 726 .mark = skb->mark,
727 .proto = iph->nexthdr, 727 .proto = iph->nexthdr,
728 }; 728 };
729 729
@@ -888,9 +888,9 @@ static inline unsigned int ipv6_advmss(unsigned int mtu)
888 mtu = ip6_rt_min_advmss; 888 mtu = ip6_rt_min_advmss;
889 889
890 /* 890 /*
891 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and 891 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
892 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size. 892 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
893 * IPV6_MAXPLEN is also valid and means: "any MSS, 893 * IPV6_MAXPLEN is also valid and means: "any MSS,
894 * rely only on pmtu discovery" 894 * rely only on pmtu discovery"
895 */ 895 */
896 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) 896 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
@@ -901,7 +901,7 @@ static inline unsigned int ipv6_advmss(unsigned int mtu)
901static struct dst_entry *ndisc_dst_gc_list; 901static struct dst_entry *ndisc_dst_gc_list;
902static DEFINE_SPINLOCK(ndisc_lock); 902static DEFINE_SPINLOCK(ndisc_lock);
903 903
904struct dst_entry *ndisc_dst_alloc(struct net_device *dev, 904struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
905 struct neighbour *neigh, 905 struct neighbour *neigh,
906 struct in6_addr *addr, 906 struct in6_addr *addr,
907 int (*output)(struct sk_buff *)) 907 int (*output)(struct sk_buff *))
@@ -934,8 +934,8 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
934 rt->u.dst.output = output; 934 rt->u.dst.output = output;
935 935
936#if 0 /* there's no chance to use these for ndisc */ 936#if 0 /* there's no chance to use these for ndisc */
937 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST 937 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
938 ? DST_HOST 938 ? DST_HOST
939 : 0; 939 : 0;
940 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 940 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
941 rt->rt6i_dst.plen = 128; 941 rt->rt6i_dst.plen = 128;
@@ -958,7 +958,7 @@ int ndisc_dst_gc(int *more)
958 int freed; 958 int freed;
959 959
960 next = NULL; 960 next = NULL;
961 freed = 0; 961 freed = 0;
962 962
963 spin_lock_bh(&ndisc_lock); 963 spin_lock_bh(&ndisc_lock);
964 pprev = &ndisc_dst_gc_list; 964 pprev = &ndisc_dst_gc_list;
@@ -1276,9 +1276,9 @@ static int ip6_route_del(struct fib6_config *cfg)
1276 fn = fib6_locate(&table->tb6_root, 1276 fn = fib6_locate(&table->tb6_root,
1277 &cfg->fc_dst, cfg->fc_dst_len, 1277 &cfg->fc_dst, cfg->fc_dst_len,
1278 &cfg->fc_src, cfg->fc_src_len); 1278 &cfg->fc_src, cfg->fc_src_len);
1279 1279
1280 if (fn) { 1280 if (fn) {
1281 for (rt = fn->leaf; rt; rt = rt->u.next) { 1281 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1282 if (cfg->fc_ifindex && 1282 if (cfg->fc_ifindex &&
1283 (rt->rt6i_dev == NULL || 1283 (rt->rt6i_dev == NULL ||
1284 rt->rt6i_dev->ifindex != cfg->fc_ifindex)) 1284 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
@@ -1329,7 +1329,7 @@ static struct rt6_info *__ip6_route_redirect(struct fib6_table *table,
1329 read_lock_bh(&table->tb6_lock); 1329 read_lock_bh(&table->tb6_lock);
1330 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); 1330 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1331restart: 1331restart:
1332 for (rt = fn->leaf; rt; rt = rt->u.next) { 1332 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1333 /* 1333 /*
1334 * Current route is on-link; redirect is always invalid. 1334 * Current route is on-link; redirect is always invalid.
1335 * 1335 *
@@ -1405,7 +1405,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1405 * We have finally decided to accept it. 1405 * We have finally decided to accept it.
1406 */ 1406 */
1407 1407
1408 neigh_update(neigh, lladdr, NUD_STALE, 1408 neigh_update(neigh, lladdr, NUD_STALE,
1409 NEIGH_UPDATE_F_WEAK_OVERRIDE| 1409 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1410 NEIGH_UPDATE_F_OVERRIDE| 1410 NEIGH_UPDATE_F_OVERRIDE|
1411 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 1411 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
@@ -1454,7 +1454,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1454 } 1454 }
1455 1455
1456out: 1456out:
1457 dst_release(&rt->u.dst); 1457 dst_release(&rt->u.dst);
1458 return; 1458 return;
1459} 1459}
1460 1460
@@ -1478,7 +1478,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1478 1478
1479 if (pmtu < IPV6_MIN_MTU) { 1479 if (pmtu < IPV6_MIN_MTU) {
1480 /* 1480 /*
1481 * According to RFC2460, PMTU is set to the IPv6 Minimum Link 1481 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1482 * MTU (1280) and a fragment header should always be included 1482 * MTU (1280) and a fragment header should always be included
1483 * after a node receiving Too Big message reporting PMTU is 1483 * after a node receiving Too Big message reporting PMTU is
1484 * less than the IPv6 Minimum Link MTU. 1484 * less than the IPv6 Minimum Link MTU.
@@ -1590,7 +1590,7 @@ static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixle
1590 if (!fn) 1590 if (!fn)
1591 goto out; 1591 goto out;
1592 1592
1593 for (rt = fn->leaf; rt; rt = rt->u.next) { 1593 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1594 if (rt->rt6i_dev->ifindex != ifindex) 1594 if (rt->rt6i_dev->ifindex != ifindex)
1595 continue; 1595 continue;
1596 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) 1596 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
@@ -1632,7 +1632,7 @@ static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixle
1632#endif 1632#endif
1633 1633
1634struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev) 1634struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1635{ 1635{
1636 struct rt6_info *rt; 1636 struct rt6_info *rt;
1637 struct fib6_table *table; 1637 struct fib6_table *table;
1638 1638
@@ -1641,7 +1641,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1641 return NULL; 1641 return NULL;
1642 1642
1643 write_lock_bh(&table->tb6_lock); 1643 write_lock_bh(&table->tb6_lock);
1644 for (rt = table->tb6_root.leaf; rt; rt=rt->u.next) { 1644 for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
1645 if (dev == rt->rt6i_dev && 1645 if (dev == rt->rt6i_dev &&
1646 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && 1646 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1647 ipv6_addr_equal(&rt->rt6i_gateway, addr)) 1647 ipv6_addr_equal(&rt->rt6i_gateway, addr))
@@ -1684,7 +1684,7 @@ void rt6_purge_dflt_routers(void)
1684 1684
1685restart: 1685restart:
1686 read_lock_bh(&table->tb6_lock); 1686 read_lock_bh(&table->tb6_lock);
1687 for (rt = table->tb6_root.leaf; rt; rt = rt->u.next) { 1687 for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
1688 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { 1688 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1689 dst_hold(&rt->u.dst); 1689 dst_hold(&rt->u.dst);
1690 read_unlock_bh(&table->tb6_lock); 1690 read_unlock_bh(&table->tb6_lock);
@@ -1896,8 +1896,8 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1896 */ 1896 */
1897 if (rt->rt6i_dev == arg->dev && 1897 if (rt->rt6i_dev == arg->dev &&
1898 !dst_metric_locked(&rt->u.dst, RTAX_MTU) && 1898 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1899 (dst_mtu(&rt->u.dst) > arg->mtu || 1899 (dst_mtu(&rt->u.dst) > arg->mtu ||
1900 (dst_mtu(&rt->u.dst) < arg->mtu && 1900 (dst_mtu(&rt->u.dst) < arg->mtu &&
1901 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) 1901 dst_mtu(&rt->u.dst) == idev->cnf.mtu6)))
1902 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; 1902 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1903 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu); 1903 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
@@ -2083,13 +2083,13 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2083 2083
2084 if (dst) { 2084 if (dst) {
2085 NLA_PUT(skb, RTA_DST, 16, dst); 2085 NLA_PUT(skb, RTA_DST, 16, dst);
2086 rtm->rtm_dst_len = 128; 2086 rtm->rtm_dst_len = 128;
2087 } else if (rtm->rtm_dst_len) 2087 } else if (rtm->rtm_dst_len)
2088 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr); 2088 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2089#ifdef CONFIG_IPV6_SUBTREES 2089#ifdef CONFIG_IPV6_SUBTREES
2090 if (src) { 2090 if (src) {
2091 NLA_PUT(skb, RTA_SRC, 16, src); 2091 NLA_PUT(skb, RTA_SRC, 16, src);
2092 rtm->rtm_src_len = 128; 2092 rtm->rtm_src_len = 128;
2093 } else if (rtm->rtm_src_len) 2093 } else if (rtm->rtm_src_len)
2094 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr); 2094 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2095#endif 2095#endif
@@ -2299,7 +2299,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2299 arg->len += sprintf(arg->buffer + arg->len, 2299 arg->len += sprintf(arg->buffer + arg->len,
2300 " %08x %08x %08x %08x %8s\n", 2300 " %08x %08x %08x %08x %8s\n",
2301 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt), 2301 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2302 rt->u.dst.__use, rt->rt6i_flags, 2302 rt->u.dst.__use, rt->rt6i_flags,
2303 rt->rt6i_dev ? rt->rt6i_dev->name : ""); 2303 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2304 return 0; 2304 return 0;
2305} 2305}
@@ -2371,91 +2371,91 @@ int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2371} 2371}
2372 2372
2373ctl_table ipv6_route_table[] = { 2373ctl_table ipv6_route_table[] = {
2374 { 2374 {
2375 .ctl_name = NET_IPV6_ROUTE_FLUSH, 2375 .ctl_name = NET_IPV6_ROUTE_FLUSH,
2376 .procname = "flush", 2376 .procname = "flush",
2377 .data = &flush_delay, 2377 .data = &flush_delay,
2378 .maxlen = sizeof(int), 2378 .maxlen = sizeof(int),
2379 .mode = 0200, 2379 .mode = 0200,
2380 .proc_handler = &ipv6_sysctl_rtcache_flush 2380 .proc_handler = &ipv6_sysctl_rtcache_flush
2381 }, 2381 },
2382 { 2382 {
2383 .ctl_name = NET_IPV6_ROUTE_GC_THRESH, 2383 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2384 .procname = "gc_thresh", 2384 .procname = "gc_thresh",
2385 .data = &ip6_dst_ops.gc_thresh, 2385 .data = &ip6_dst_ops.gc_thresh,
2386 .maxlen = sizeof(int), 2386 .maxlen = sizeof(int),
2387 .mode = 0644, 2387 .mode = 0644,
2388 .proc_handler = &proc_dointvec, 2388 .proc_handler = &proc_dointvec,
2389 }, 2389 },
2390 { 2390 {
2391 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE, 2391 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE,
2392 .procname = "max_size", 2392 .procname = "max_size",
2393 .data = &ip6_rt_max_size, 2393 .data = &ip6_rt_max_size,
2394 .maxlen = sizeof(int), 2394 .maxlen = sizeof(int),
2395 .mode = 0644, 2395 .mode = 0644,
2396 .proc_handler = &proc_dointvec, 2396 .proc_handler = &proc_dointvec,
2397 }, 2397 },
2398 { 2398 {
2399 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL, 2399 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL,
2400 .procname = "gc_min_interval", 2400 .procname = "gc_min_interval",
2401 .data = &ip6_rt_gc_min_interval, 2401 .data = &ip6_rt_gc_min_interval,
2402 .maxlen = sizeof(int), 2402 .maxlen = sizeof(int),
2403 .mode = 0644, 2403 .mode = 0644,
2404 .proc_handler = &proc_dointvec_jiffies, 2404 .proc_handler = &proc_dointvec_jiffies,
2405 .strategy = &sysctl_jiffies, 2405 .strategy = &sysctl_jiffies,
2406 }, 2406 },
2407 { 2407 {
2408 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT, 2408 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT,
2409 .procname = "gc_timeout", 2409 .procname = "gc_timeout",
2410 .data = &ip6_rt_gc_timeout, 2410 .data = &ip6_rt_gc_timeout,
2411 .maxlen = sizeof(int), 2411 .maxlen = sizeof(int),
2412 .mode = 0644, 2412 .mode = 0644,
2413 .proc_handler = &proc_dointvec_jiffies, 2413 .proc_handler = &proc_dointvec_jiffies,
2414 .strategy = &sysctl_jiffies, 2414 .strategy = &sysctl_jiffies,
2415 }, 2415 },
2416 { 2416 {
2417 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL, 2417 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL,
2418 .procname = "gc_interval", 2418 .procname = "gc_interval",
2419 .data = &ip6_rt_gc_interval, 2419 .data = &ip6_rt_gc_interval,
2420 .maxlen = sizeof(int), 2420 .maxlen = sizeof(int),
2421 .mode = 0644, 2421 .mode = 0644,
2422 .proc_handler = &proc_dointvec_jiffies, 2422 .proc_handler = &proc_dointvec_jiffies,
2423 .strategy = &sysctl_jiffies, 2423 .strategy = &sysctl_jiffies,
2424 }, 2424 },
2425 { 2425 {
2426 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY, 2426 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY,
2427 .procname = "gc_elasticity", 2427 .procname = "gc_elasticity",
2428 .data = &ip6_rt_gc_elasticity, 2428 .data = &ip6_rt_gc_elasticity,
2429 .maxlen = sizeof(int), 2429 .maxlen = sizeof(int),
2430 .mode = 0644, 2430 .mode = 0644,
2431 .proc_handler = &proc_dointvec_jiffies, 2431 .proc_handler = &proc_dointvec_jiffies,
2432 .strategy = &sysctl_jiffies, 2432 .strategy = &sysctl_jiffies,
2433 }, 2433 },
2434 { 2434 {
2435 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES, 2435 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES,
2436 .procname = "mtu_expires", 2436 .procname = "mtu_expires",
2437 .data = &ip6_rt_mtu_expires, 2437 .data = &ip6_rt_mtu_expires,
2438 .maxlen = sizeof(int), 2438 .maxlen = sizeof(int),
2439 .mode = 0644, 2439 .mode = 0644,
2440 .proc_handler = &proc_dointvec_jiffies, 2440 .proc_handler = &proc_dointvec_jiffies,
2441 .strategy = &sysctl_jiffies, 2441 .strategy = &sysctl_jiffies,
2442 }, 2442 },
2443 { 2443 {
2444 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS, 2444 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS,
2445 .procname = "min_adv_mss", 2445 .procname = "min_adv_mss",
2446 .data = &ip6_rt_min_advmss, 2446 .data = &ip6_rt_min_advmss,
2447 .maxlen = sizeof(int), 2447 .maxlen = sizeof(int),
2448 .mode = 0644, 2448 .mode = 0644,
2449 .proc_handler = &proc_dointvec_jiffies, 2449 .proc_handler = &proc_dointvec_jiffies,
2450 .strategy = &sysctl_jiffies, 2450 .strategy = &sysctl_jiffies,
2451 }, 2451 },
2452 { 2452 {
2453 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, 2453 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS,
2454 .procname = "gc_min_interval_ms", 2454 .procname = "gc_min_interval_ms",
2455 .data = &ip6_rt_gc_min_interval, 2455 .data = &ip6_rt_gc_min_interval,
2456 .maxlen = sizeof(int), 2456 .maxlen = sizeof(int),
2457 .mode = 0644, 2457 .mode = 0644,
2458 .proc_handler = &proc_dointvec_ms_jiffies, 2458 .proc_handler = &proc_dointvec_ms_jiffies,
2459 .strategy = &sysctl_ms_jiffies, 2459 .strategy = &sysctl_ms_jiffies,
2460 }, 2460 },
2461 { .ctl_name = 0 } 2461 { .ctl_name = 0 }
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 47cfeadac6dd..4d3cf301e1fc 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -3,7 +3,7 @@
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * 8 *
9 * $Id: sit.c,v 1.53 2001/09/25 05:09:53 davem Exp $ 9 * $Id: sit.c,v 1.53 2001/09/25 05:09:53 davem Exp $
@@ -410,7 +410,7 @@ static inline __be32 try_6to4(struct in6_addr *v6dst)
410 __be32 dst = 0; 410 __be32 dst = 0;
411 411
412 if (v6dst->s6_addr16[0] == htons(0x2002)) { 412 if (v6dst->s6_addr16[0] == htons(0x2002)) {
413 /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ 413 /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
414 memcpy(&dst, &v6dst->s6_addr16[1], 4); 414 memcpy(&dst, &v6dst->s6_addr16[1], 4);
415 } 415 }
416 return dst; 416 return dst;
@@ -434,7 +434,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
434 int max_headroom; /* The extra header space needed */ 434 int max_headroom; /* The extra header space needed */
435 __be32 dst = tiph->daddr; 435 __be32 dst = tiph->daddr;
436 int mtu; 436 int mtu;
437 struct in6_addr *addr6; 437 struct in6_addr *addr6;
438 int addr_type; 438 int addr_type;
439 439
440 if (tunnel->recursion++) { 440 if (tunnel->recursion++) {
@@ -537,7 +537,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
537 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 537 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
538 if (!new_skb) { 538 if (!new_skb) {
539 ip_rt_put(rt); 539 ip_rt_put(rt);
540 stats->tx_dropped++; 540 stats->tx_dropped++;
541 dev_kfree_skb(skb); 541 dev_kfree_skb(skb);
542 tunnel->recursion--; 542 tunnel->recursion--;
543 return 0; 543 return 0;
@@ -831,7 +831,7 @@ static int __init sit_init(void)
831 return -EAGAIN; 831 return -EAGAIN;
832 } 832 }
833 833
834 ipip6_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0", 834 ipip6_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
835 ipip6_tunnel_setup); 835 ipip6_tunnel_setup);
836 if (!ipip6_fb_tunnel_dev) { 836 if (!ipip6_fb_tunnel_dev) {
837 err = -ENOMEM; 837 err = -ENOMEM;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 7a4639db1346..25e8e7783fee 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -92,7 +92,7 @@ static ctl_table ipv6_net_table[] = {
92 .mode = 0555, 92 .mode = 0555,
93 .child = ipv6_table 93 .child = ipv6_table
94 }, 94 },
95 { .ctl_name = 0 } 95 { .ctl_name = 0 }
96}; 96};
97 97
98static ctl_table ipv6_root_table[] = { 98static ctl_table ipv6_root_table[] = {
@@ -102,7 +102,7 @@ static ctl_table ipv6_root_table[] = {
102 .mode = 0555, 102 .mode = 0555,
103 .child = ipv6_net_table 103 .child = ipv6_net_table
104 }, 104 },
105 { .ctl_name = 0 } 105 { .ctl_name = 0 }
106}; 106};
107 107
108void ipv6_sysctl_register(void) 108void ipv6_sysctl_register(void)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index dcb7b00a737d..f57a9baa6b27 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * TCP over IPv6 2 * TCP over IPv6
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $ 8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Based on: 10 * Based on:
11 * linux/net/ipv4/tcp.c 11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c 12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c 13 * linux/net/ipv4/tcp_output.c
@@ -74,7 +74,7 @@ static struct socket *tcp6_socket;
74 74
75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); 75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); 76static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
77static void tcp_v6_send_check(struct sock *sk, int len, 77static void tcp_v6_send_check(struct sock *sk, int len,
78 struct sk_buff *skb); 78 struct sk_buff *skb);
79 79
80static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 80static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
@@ -106,8 +106,8 @@ static void tcp_v6_hash(struct sock *sk)
106} 106}
107 107
108static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, 108static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
109 struct in6_addr *saddr, 109 struct in6_addr *saddr,
110 struct in6_addr *daddr, 110 struct in6_addr *daddr,
111 __wsum base) 111 __wsum base)
112{ 112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
@@ -121,11 +121,11 @@ static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
121 skb->h.th->source); 121 skb->h.th->source);
122} 122}
123 123
124static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 124static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125 int addr_len) 125 int addr_len)
126{ 126{
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 struct inet_sock *inet = inet_sk(sk); 128 struct inet_sock *inet = inet_sk(sk);
129 struct inet_connection_sock *icsk = inet_csk(sk); 129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk); 130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk); 131 struct tcp_sock *tp = tcp_sk(sk);
@@ -135,10 +135,10 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
135 int addr_type; 135 int addr_type;
136 int err; 136 int err;
137 137
138 if (addr_len < SIN6_LEN_RFC2133) 138 if (addr_len < SIN6_LEN_RFC2133)
139 return -EINVAL; 139 return -EINVAL;
140 140
141 if (usin->sin6_family != AF_INET6) 141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT); 142 return(-EAFNOSUPPORT);
143 143
144 memset(&fl, 0, sizeof(fl)); 144 memset(&fl, 0, sizeof(fl));
@@ -157,11 +157,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
157 } 157 }
158 158
159 /* 159 /*
160 * connect() to INADDR_ANY means loopback (BSD'ism). 160 * connect() to INADDR_ANY means loopback (BSD'ism).
161 */ 161 */
162 162
163 if(ipv6_addr_any(&usin->sin6_addr)) 163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1; 164 usin->sin6_addr.s6_addr[15] = 0x1;
165 165
166 addr_type = ipv6_addr_type(&usin->sin6_addr); 166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167 167
@@ -323,7 +323,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
323 struct ipv6_pinfo *np; 323 struct ipv6_pinfo *np;
324 struct sock *sk; 324 struct sock *sk;
325 int err; 325 int err;
326 struct tcp_sock *tp; 326 struct tcp_sock *tp;
327 __u32 seq; 327 __u32 seq;
328 328
329 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr, 329 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
@@ -347,7 +347,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
347 goto out; 347 goto out;
348 348
349 tp = tcp_sk(sk); 349 tp = tcp_sk(sk);
350 seq = ntohl(th->seq); 350 seq = ntohl(th->seq);
351 if (sk->sk_state != TCP_LISTEN && 351 if (sk->sk_state != TCP_LISTEN &&
352 !between(seq, tp->snd_una, tp->snd_nxt)) { 352 !between(seq, tp->snd_una, tp->snd_nxt)) {
353 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 353 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
@@ -434,7 +434,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
434 434
435 case TCP_SYN_SENT: 435 case TCP_SYN_SENT:
436 case TCP_SYN_RECV: /* Cannot happen. 436 case TCP_SYN_RECV: /* Cannot happen.
437 It can, it SYNs are crossed. --ANK */ 437 It can, it SYNs are crossed. --ANK */
438 if (!sock_owned_by_user(sk)) { 438 if (!sock_owned_by_user(sk)) {
439 sk->sk_err = err; 439 sk->sk_err = err;
440 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 440 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
@@ -519,7 +519,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
519 } 519 }
520 520
521done: 521done:
522 if (opt && opt != np->opt) 522 if (opt && opt != np->opt)
523 sock_kfree_s(sk, opt, opt->tot_len); 523 sock_kfree_s(sk, opt, opt->tot_len);
524 dst_release(dst); 524 dst_release(dst);
525 return err; 525 return err;
@@ -950,8 +950,8 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
950 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); 950 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
951 skb->csum_offset = offsetof(struct tcphdr, check); 951 skb->csum_offset = offsetof(struct tcphdr, check);
952 } else { 952 } else {
953 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 953 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
954 csum_partial((char *)th, th->doff<<2, 954 csum_partial((char *)th, th->doff<<2,
955 skb->csum)); 955 skb->csum));
956 } 956 }
957} 957}
@@ -977,7 +977,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
977 977
978static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) 978static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
979{ 979{
980 struct tcphdr *th = skb->h.th, *t1; 980 struct tcphdr *th = skb->h.th, *t1;
981 struct sk_buff *buff; 981 struct sk_buff *buff;
982 struct flowi fl; 982 struct flowi fl;
983 int tot_len = sizeof(*th); 983 int tot_len = sizeof(*th);
@@ -989,7 +989,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
989 return; 989 return;
990 990
991 if (!ipv6_unicast_destination(skb)) 991 if (!ipv6_unicast_destination(skb))
992 return; 992 return;
993 993
994#ifdef CONFIG_TCP_MD5SIG 994#ifdef CONFIG_TCP_MD5SIG
995 if (sk) 995 if (sk)
@@ -1008,8 +1008,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1008 1008
1009 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 1009 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1010 GFP_ATOMIC); 1010 GFP_ATOMIC);
1011 if (buff == NULL) 1011 if (buff == NULL)
1012 return; 1012 return;
1013 1013
1014 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1014 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1015 1015
@@ -1021,9 +1021,9 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1021 t1->source = th->dest; 1021 t1->source = th->dest;
1022 t1->doff = tot_len / 4; 1022 t1->doff = tot_len / 4;
1023 t1->rst = 1; 1023 t1->rst = 1;
1024 1024
1025 if(th->ack) { 1025 if(th->ack) {
1026 t1->seq = th->ack_seq; 1026 t1->seq = th->ack_seq;
1027 } else { 1027 } else {
1028 t1->ack = 1; 1028 t1->ack = 1;
1029 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin 1029 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
@@ -1128,7 +1128,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1128 t1->window = htons(win); 1128 t1->window = htons(win);
1129 1129
1130 topt = (__be32 *)(t1 + 1); 1130 topt = (__be32 *)(t1 + 1);
1131 1131
1132 if (ts) { 1132 if (ts) {
1133 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 1133 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1134 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 1134 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
@@ -1243,15 +1243,15 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1243 return tcp_v4_conn_request(sk, skb); 1243 return tcp_v4_conn_request(sk, skb);
1244 1244
1245 if (!ipv6_unicast_destination(skb)) 1245 if (!ipv6_unicast_destination(skb))
1246 goto drop; 1246 goto drop;
1247 1247
1248 /* 1248 /*
1249 * There are no SYN attacks on IPv6, yet... 1249 * There are no SYN attacks on IPv6, yet...
1250 */ 1250 */
1251 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1251 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1252 if (net_ratelimit()) 1252 if (net_ratelimit())
1253 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 1253 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1254 goto drop; 1254 goto drop;
1255 } 1255 }
1256 1256
1257 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1257 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
@@ -1292,7 +1292,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1292 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1292 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1293 treq->iif = inet6_iif(skb); 1293 treq->iif = inet6_iif(skb);
1294 1294
1295 if (isn == 0) 1295 if (isn == 0)
1296 isn = tcp_v6_init_sequence(skb); 1296 isn = tcp_v6_init_sequence(skb);
1297 1297
1298 tcp_rsk(req)->snt_isn = isn; 1298 tcp_rsk(req)->snt_isn = isn;
@@ -1334,7 +1334,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1334 1334
1335 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); 1335 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1336 1336
1337 if (newsk == NULL) 1337 if (newsk == NULL)
1338 return NULL; 1338 return NULL;
1339 1339
1340 newtcp6sk = (struct tcp6_sock *)newsk; 1340 newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1419,7 +1419,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1419 1419
1420 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) 1420 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1421 goto out; 1421 goto out;
1422 } 1422 }
1423 1423
1424 newsk = tcp_create_openreq_child(sk, req, skb); 1424 newsk = tcp_create_openreq_child(sk, req, skb);
1425 if (newsk == NULL) 1425 if (newsk == NULL)
@@ -1448,7 +1448,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1448 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr); 1448 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1449 newsk->sk_bound_dev_if = treq->iif; 1449 newsk->sk_bound_dev_if = treq->iif;
1450 1450
1451 /* Now IPv6 options... 1451 /* Now IPv6 options...
1452 1452
1453 First: no IPv4 options. 1453 First: no IPv4 options.
1454 */ 1454 */
@@ -1592,7 +1592,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1592 looks not very well thought. For now we latch 1592 looks not very well thought. For now we latch
1593 options, received in the last packet, enqueued 1593 options, received in the last packet, enqueued
1594 by tcp. Feel free to propose better solution. 1594 by tcp. Feel free to propose better solution.
1595 --ANK (980728) 1595 --ANK (980728)
1596 */ 1596 */
1597 if (np->rxopt.all) 1597 if (np->rxopt.all)
1598 opt_skb = skb_clone(skb, GFP_ATOMIC); 1598 opt_skb = skb_clone(skb, GFP_ATOMIC);
@@ -1610,7 +1610,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1610 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb)) 1610 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1611 goto csum_err; 1611 goto csum_err;
1612 1612
1613 if (sk->sk_state == TCP_LISTEN) { 1613 if (sk->sk_state == TCP_LISTEN) {
1614 struct sock *nsk = tcp_v6_hnd_req(sk, skb); 1614 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1615 if (!nsk) 1615 if (!nsk)
1616 goto discard; 1616 goto discard;
@@ -1620,7 +1620,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1620 * otherwise we just shortcircuit this and continue with 1620 * otherwise we just shortcircuit this and continue with
1621 * the new socket.. 1621 * the new socket..
1622 */ 1622 */
1623 if(nsk != sk) { 1623 if(nsk != sk) {
1624 if (tcp_child_process(sk, nsk, skb)) 1624 if (tcp_child_process(sk, nsk, skb))
1625 goto reset; 1625 goto reset;
1626 if (opt_skb) 1626 if (opt_skb)
@@ -1681,7 +1681,7 @@ ipv6_pktoptions:
1681static int tcp_v6_rcv(struct sk_buff **pskb) 1681static int tcp_v6_rcv(struct sk_buff **pskb)
1682{ 1682{
1683 struct sk_buff *skb = *pskb; 1683 struct sk_buff *skb = *pskb;
1684 struct tcphdr *th; 1684 struct tcphdr *th;
1685 struct sock *sk; 1685 struct sock *sk;
1686 int ret; 1686 int ret;
1687 1687
@@ -1739,10 +1739,10 @@ process:
1739 ret = 0; 1739 ret = 0;
1740 if (!sock_owned_by_user(sk)) { 1740 if (!sock_owned_by_user(sk)) {
1741#ifdef CONFIG_NET_DMA 1741#ifdef CONFIG_NET_DMA
1742 struct tcp_sock *tp = tcp_sk(sk); 1742 struct tcp_sock *tp = tcp_sk(sk);
1743 if (tp->ucopy.dma_chan) 1743 if (tp->ucopy.dma_chan)
1744 ret = tcp_v6_do_rcv(sk, skb); 1744 ret = tcp_v6_do_rcv(sk, skb);
1745 else 1745 else
1746#endif 1746#endif
1747 { 1747 {
1748 if (!tcp_prequeue(sk, skb)) 1748 if (!tcp_prequeue(sk, skb))
@@ -1945,7 +1945,7 @@ static int tcp_v6_destroy_sock(struct sock *sk)
1945} 1945}
1946 1946
1947/* Proc filesystem TCPv6 sock list dumping. */ 1947/* Proc filesystem TCPv6 sock list dumping. */
1948static void get_openreq6(struct seq_file *seq, 1948static void get_openreq6(struct seq_file *seq,
1949 struct sock *sk, struct request_sock *req, int i, int uid) 1949 struct sock *sk, struct request_sock *req, int i, int uid)
1950{ 1950{
1951 int ttd = req->expires - jiffies; 1951 int ttd = req->expires - jiffies;
@@ -1967,11 +1967,11 @@ static void get_openreq6(struct seq_file *seq,
1967 ntohs(inet_rsk(req)->rmt_port), 1967 ntohs(inet_rsk(req)->rmt_port),
1968 TCP_SYN_RECV, 1968 TCP_SYN_RECV,
1969 0,0, /* could print option size, but that is af dependent. */ 1969 0,0, /* could print option size, but that is af dependent. */
1970 1, /* timers active (only the expire timer) */ 1970 1, /* timers active (only the expire timer) */
1971 jiffies_to_clock_t(ttd), 1971 jiffies_to_clock_t(ttd),
1972 req->retrans, 1972 req->retrans,
1973 uid, 1973 uid,
1974 0, /* non standard timer */ 1974 0, /* non standard timer */
1975 0, /* open_requests have no inode */ 1975 0, /* open_requests have no inode */
1976 0, req); 1976 0, req);
1977} 1977}
@@ -2014,7 +2014,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2014 src->s6_addr32[2], src->s6_addr32[3], srcp, 2014 src->s6_addr32[2], src->s6_addr32[3], srcp,
2015 dest->s6_addr32[0], dest->s6_addr32[1], 2015 dest->s6_addr32[0], dest->s6_addr32[1],
2016 dest->s6_addr32[2], dest->s6_addr32[3], destp, 2016 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2017 sp->sk_state, 2017 sp->sk_state,
2018 tp->write_seq-tp->snd_una, 2018 tp->write_seq-tp->snd_una,
2019 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 2019 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2020 timer_active, 2020 timer_active,
@@ -2031,7 +2031,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2031 ); 2031 );
2032} 2032}
2033 2033
2034static void get_timewait6_sock(struct seq_file *seq, 2034static void get_timewait6_sock(struct seq_file *seq,
2035 struct inet_timewait_sock *tw, int i) 2035 struct inet_timewait_sock *tw, int i)
2036{ 2036{
2037 struct in6_addr *dest, *src; 2037 struct in6_addr *dest, *src;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 15e5195549cb..ccf2f4d196be 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * UDP over IPv6 2 * UDP over IPv6
3 * Linux INET6 implementation 3 * Linux INET6 implementation
4 * 4 *
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * Based on linux/ipv4/udp.c 8 * Based on linux/ipv4/udp.c
9 * 9 *
@@ -67,11 +67,11 @@ static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
67 unsigned short hnum = ntohs(dport); 67 unsigned short hnum = ntohs(dport);
68 int badness = -1; 68 int badness = -1;
69 69
70 read_lock(&udp_hash_lock); 70 read_lock(&udp_hash_lock);
71 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 71 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
72 struct inet_sock *inet = inet_sk(sk); 72 struct inet_sock *inet = inet_sk(sk);
73 73
74 if (inet->num == hnum && sk->sk_family == PF_INET6) { 74 if (sk->sk_hash == hnum && sk->sk_family == PF_INET6) {
75 struct ipv6_pinfo *np = inet6_sk(sk); 75 struct ipv6_pinfo *np = inet6_sk(sk);
76 int score = 0; 76 int score = 0;
77 if (inet->dport) { 77 if (inet->dport) {
@@ -105,7 +105,7 @@ static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
105 } 105 }
106 if (result) 106 if (result)
107 sock_hold(result); 107 sock_hold(result);
108 read_unlock(&udp_hash_lock); 108 read_unlock(&udp_hash_lock);
109 return result; 109 return result;
110} 110}
111 111
@@ -120,13 +120,13 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
120{ 120{
121 struct ipv6_pinfo *np = inet6_sk(sk); 121 struct ipv6_pinfo *np = inet6_sk(sk);
122 struct inet_sock *inet = inet_sk(sk); 122 struct inet_sock *inet = inet_sk(sk);
123 struct sk_buff *skb; 123 struct sk_buff *skb;
124 size_t copied; 124 size_t copied;
125 int err, copy_only, is_udplite = IS_UDPLITE(sk); 125 int err, copy_only, is_udplite = IS_UDPLITE(sk);
126 126
127 if (addr_len) 127 if (addr_len)
128 *addr_len=sizeof(struct sockaddr_in6); 128 *addr_len=sizeof(struct sockaddr_in6);
129 129
130 if (flags & MSG_ERRQUEUE) 130 if (flags & MSG_ERRQUEUE)
131 return ipv6_recv_error(sk, msg, len); 131 return ipv6_recv_error(sk, msg, len);
132 132
@@ -135,11 +135,11 @@ try_again:
135 if (!skb) 135 if (!skb)
136 goto out; 136 goto out;
137 137
138 copied = skb->len - sizeof(struct udphdr); 138 copied = skb->len - sizeof(struct udphdr);
139 if (copied > len) { 139 if (copied > len) {
140 copied = len; 140 copied = len;
141 msg->msg_flags |= MSG_TRUNC; 141 msg->msg_flags |= MSG_TRUNC;
142 } 142 }
143 143
144 /* 144 /*
145 * Decide whether to checksum and/or copy data. 145 * Decide whether to checksum and/or copy data.
@@ -168,7 +168,7 @@ try_again:
168 /* Copy the address. */ 168 /* Copy the address. */
169 if (msg->msg_name) { 169 if (msg->msg_name) {
170 struct sockaddr_in6 *sin6; 170 struct sockaddr_in6 *sin6;
171 171
172 sin6 = (struct sockaddr_in6 *) msg->msg_name; 172 sin6 = (struct sockaddr_in6 *) msg->msg_name;
173 sin6->sin6_family = AF_INET6; 173 sin6->sin6_family = AF_INET6;
174 sin6->sin6_port = skb->h.uh->source; 174 sin6->sin6_port = skb->h.uh->source;
@@ -191,7 +191,7 @@ try_again:
191 } else { 191 } else {
192 if (np->rxopt.all) 192 if (np->rxopt.all)
193 datagram_recv_ctl(sk, msg, skb); 193 datagram_recv_ctl(sk, msg, skb);
194 } 194 }
195 195
196 err = copied; 196 err = copied;
197 if (flags & MSG_TRUNC) 197 if (flags & MSG_TRUNC)
@@ -309,7 +309,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
309 sk_for_each_from(s, node) { 309 sk_for_each_from(s, node) {
310 struct inet_sock *inet = inet_sk(s); 310 struct inet_sock *inet = inet_sk(s);
311 311
312 if (inet->num == num && s->sk_family == PF_INET6) { 312 if (s->sk_hash == num && s->sk_family == PF_INET6) {
313 struct ipv6_pinfo *np = inet6_sk(s); 313 struct ipv6_pinfo *np = inet6_sk(s);
314 if (inet->dport) { 314 if (inet->dport) {
315 if (inet->dport != rmt_port) 315 if (inet->dport != rmt_port)
@@ -339,7 +339,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
339 * so we don't need to lock the hashes. 339 * so we don't need to lock the hashes.
340 */ 340 */
341static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr, 341static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
342 struct in6_addr *daddr, struct hlist_head udptable[]) 342 struct in6_addr *daddr, struct hlist_head udptable[])
343{ 343{
344 struct sock *sk, *sk2; 344 struct sock *sk, *sk2;
345 const struct udphdr *uh = skb->h.uh; 345 const struct udphdr *uh = skb->h.uh;
@@ -379,7 +379,7 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh)
379 } 379 }
380 if (skb->ip_summed == CHECKSUM_COMPLETE && 380 if (skb->ip_summed == CHECKSUM_COMPLETE &&
381 !csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, 381 !csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
382 skb->len, IPPROTO_UDP, skb->csum )) 382 skb->len, IPPROTO_UDP, skb->csum ))
383 skb->ip_summed = CHECKSUM_UNNECESSARY; 383 skb->ip_summed = CHECKSUM_UNNECESSARY;
384 384
385 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 385 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
@@ -396,7 +396,7 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[],
396{ 396{
397 struct sk_buff *skb = *pskb; 397 struct sk_buff *skb = *pskb;
398 struct sock *sk; 398 struct sock *sk;
399 struct udphdr *uh; 399 struct udphdr *uh;
400 struct net_device *dev = skb->dev; 400 struct net_device *dev = skb->dev;
401 struct in6_addr *saddr, *daddr; 401 struct in6_addr *saddr, *daddr;
402 u32 ulen = 0; 402 u32 ulen = 0;
@@ -437,15 +437,15 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[],
437 goto discard; 437 goto discard;
438 } 438 }
439 439
440 /* 440 /*
441 * Multicast receive code 441 * Multicast receive code
442 */ 442 */
443 if (ipv6_addr_is_multicast(daddr)) 443 if (ipv6_addr_is_multicast(daddr))
444 return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable); 444 return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable);
445 445
446 /* Unicast */ 446 /* Unicast */
447 447
448 /* 448 /*
449 * check socket cache ... must talk to Alan about his plans 449 * check socket cache ... must talk to Alan about his plans
450 * for sock caches... i'll skip this for now. 450 * for sock caches... i'll skip this for now.
451 */ 451 */
@@ -465,21 +465,21 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[],
465 kfree_skb(skb); 465 kfree_skb(skb);
466 return(0); 466 return(0);
467 } 467 }
468 468
469 /* deliver */ 469 /* deliver */
470 470
471 udpv6_queue_rcv_skb(sk, skb); 471 udpv6_queue_rcv_skb(sk, skb);
472 sock_put(sk); 472 sock_put(sk);
473 return(0); 473 return(0);
474 474
475short_packet: 475short_packet:
476 LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n", 476 LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
477 is_udplite? "-Lite" : "", ulen, skb->len); 477 is_udplite? "-Lite" : "", ulen, skb->len);
478 478
479discard: 479discard:
480 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 480 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
481 kfree_skb(skb); 481 kfree_skb(skb);
482 return(0); 482 return(0);
483} 483}
484 484
485static __inline__ int udpv6_rcv(struct sk_buff **pskb) 485static __inline__ int udpv6_rcv(struct sk_buff **pskb)
@@ -498,7 +498,7 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
498 up->len = 0; 498 up->len = 0;
499 up->pending = 0; 499 up->pending = 0;
500 ip6_flush_pending_frames(sk); 500 ip6_flush_pending_frames(sk);
501 } 501 }
502} 502}
503 503
504/* 504/*
@@ -594,7 +594,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
594 if (sk->sk_state != TCP_ESTABLISHED) 594 if (sk->sk_state != TCP_ESTABLISHED)
595 return -EDESTADDRREQ; 595 return -EDESTADDRREQ;
596 daddr = &np->daddr; 596 daddr = &np->daddr;
597 } else 597 } else
598 daddr = NULL; 598 daddr = NULL;
599 599
600 if (daddr) { 600 if (daddr) {
@@ -620,7 +620,7 @@ do_udp_sendmsg:
620 */ 620 */
621 if (len > INT_MAX - sizeof(struct udphdr)) 621 if (len > INT_MAX - sizeof(struct udphdr))
622 return -EMSGSIZE; 622 return -EMSGSIZE;
623 623
624 if (up->pending) { 624 if (up->pending) {
625 /* 625 /*
626 * There are pending frames. 626 * There are pending frames.
@@ -713,7 +713,7 @@ do_udp_sendmsg:
713 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) 713 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
714 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 714 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
715 fl.fl_ip_sport = inet->sport; 715 fl.fl_ip_sport = inet->sport;
716 716
717 /* merge ip6_build_xmit from ip6_output */ 717 /* merge ip6_build_xmit from ip6_output */
718 if (opt && opt->srcrt) { 718 if (opt && opt->srcrt) {
719 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 719 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
@@ -911,7 +911,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
911 src->s6_addr32[2], src->s6_addr32[3], srcp, 911 src->s6_addr32[2], src->s6_addr32[3], srcp,
912 dest->s6_addr32[0], dest->s6_addr32[1], 912 dest->s6_addr32[0], dest->s6_addr32[1],
913 dest->s6_addr32[2], dest->s6_addr32[3], destp, 913 dest->s6_addr32[2], dest->s6_addr32[3], destp,
914 sp->sk_state, 914 sp->sk_state,
915 atomic_read(&sp->sk_wmem_alloc), 915 atomic_read(&sp->sk_wmem_alloc),
916 atomic_read(&sp->sk_rmem_alloc), 916 atomic_read(&sp->sk_rmem_alloc),
917 0, 0L, 0, 917 0, 0L, 0,
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index ec9878899128..6e252f318f7c 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -11,9 +11,9 @@ extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
11 int , int , int , __be32 , struct hlist_head []); 11 int , int , int , __be32 , struct hlist_head []);
12 12
13extern int udpv6_getsockopt(struct sock *sk, int level, int optname, 13extern int udpv6_getsockopt(struct sock *sk, int level, int optname,
14 char __user *optval, int __user *optlen); 14 char __user *optval, int __user *optlen);
15extern int udpv6_setsockopt(struct sock *sk, int level, int optname, 15extern int udpv6_setsockopt(struct sock *sk, int level, int optname,
16 char __user *optval, int optlen); 16 char __user *optval, int optlen);
17#ifdef CONFIG_COMPAT 17#ifdef CONFIG_COMPAT
18extern int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 18extern int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
19 char __user *optval, int optlen); 19 char __user *optval, int optlen);
@@ -24,7 +24,7 @@ extern int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
24 struct msghdr *msg, size_t len); 24 struct msghdr *msg, size_t len);
25extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, 25extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
26 struct msghdr *msg, size_t len, 26 struct msghdr *msg, size_t len,
27 int noblock, int flags, int *addr_len); 27 int noblock, int flags, int *addr_len);
28extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 28extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
29extern int udpv6_destroy_sock(struct sock *sk); 29extern int udpv6_destroy_sock(struct sock *sk);
30 30
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 5c8b7a568800..25250147bdc3 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -33,7 +33,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
33 seq = 0; 33 seq = 0;
34 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) 34 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
35 goto drop; 35 goto drop;
36 36
37 do { 37 do {
38 struct ipv6hdr *iph = skb->nh.ipv6h; 38 struct ipv6hdr *iph = skb->nh.ipv6h;
39 39
@@ -115,7 +115,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
115 __skb_push(skb, skb->data - skb->nh.raw); 115 __skb_push(skb, skb->data - skb->nh.raw);
116 116
117 NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, 117 NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL,
118 ip6_rcv_finish); 118 ip6_rcv_finish);
119 return -1; 119 return -1;
120#else 120#else
121 return 1; 121 return 1;
@@ -142,12 +142,12 @@ int xfrm6_rcv(struct sk_buff **pskb)
142int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, 142int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
143 xfrm_address_t *saddr, u8 proto) 143 xfrm_address_t *saddr, u8 proto)
144{ 144{
145 struct xfrm_state *x = NULL; 145 struct xfrm_state *x = NULL;
146 int wildcard = 0; 146 int wildcard = 0;
147 struct in6_addr any; 147 struct in6_addr any;
148 xfrm_address_t *xany; 148 xfrm_address_t *xany;
149 struct xfrm_state *xfrm_vec_one = NULL; 149 struct xfrm_state *xfrm_vec_one = NULL;
150 int nh = 0; 150 int nh = 0;
151 int i = 0; 151 int i = 0;
152 152
153 ipv6_addr_set(&any, 0, 0, 0, 0); 153 ipv6_addr_set(&any, 0, 0, 0, 0);
@@ -168,12 +168,12 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
168 break; 168 break;
169 case 2: 169 case 2:
170 default: 170 default:
171 /* lookup state with wild-card addresses */ 171 /* lookup state with wild-card addresses */
172 wildcard = 1; /* XXX */ 172 wildcard = 1; /* XXX */
173 dst = xany; 173 dst = xany;
174 src = xany; 174 src = xany;
175 break; 175 break;
176 } 176 }
177 177
178 x = xfrm_state_lookup_byaddr(dst, src, proto, AF_INET6); 178 x = xfrm_state_lookup_byaddr(dst, src, proto, AF_INET6);
179 if (!x) 179 if (!x)
@@ -193,8 +193,8 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
193 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 193 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
194 spin_unlock(&x->lock); 194 spin_unlock(&x->lock);
195 xfrm_state_put(x); 195 xfrm_state_put(x);
196 x = NULL; 196 x = NULL;
197 continue; 197 continue;
198 } 198 }
199 if (xfrm_state_check_expire(x)) { 199 if (xfrm_state_check_expire(x)) {
200 spin_unlock(&x->lock); 200 spin_unlock(&x->lock);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index c260ea104c52..d6d786b89d2b 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -2,7 +2,7 @@
2 * xfrm6_output.c - Common IPsec encapsulation code for IPv6. 2 * xfrm6_output.c - Common IPsec encapsulation code for IPv6.
3 * Copyright (C) 2002 USAGI/WIDE Project 3 * Copyright (C) 2002 USAGI/WIDE Project
4 * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au>
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
@@ -46,7 +46,7 @@ static int xfrm6_output_one(struct sk_buff *skb)
46 struct dst_entry *dst = skb->dst; 46 struct dst_entry *dst = skb->dst;
47 struct xfrm_state *x = dst->xfrm; 47 struct xfrm_state *x = dst->xfrm;
48 int err; 48 int err;
49 49
50 if (skb->ip_summed == CHECKSUM_PARTIAL) { 50 if (skb->ip_summed == CHECKSUM_PARTIAL) {
51 err = skb_checksum_help(skb); 51 err = skb_checksum_help(skb);
52 if (err) 52 if (err)
@@ -81,7 +81,7 @@ static int xfrm6_output_one(struct sk_buff *skb)
81 spin_unlock_bh(&x->lock); 81 spin_unlock_bh(&x->lock);
82 82
83 skb->nh.raw = skb->data; 83 skb->nh.raw = skb->data;
84 84
85 if (!(skb->dst = dst_pop(dst))) { 85 if (!(skb->dst = dst_pop(dst))) {
86 err = -EHOSTUNREACH; 86 err = -EHOSTUNREACH;
87 goto error_nolock; 87 goto error_nolock;
@@ -108,7 +108,7 @@ static int xfrm6_output_finish2(struct sk_buff *skb)
108 108
109 while (likely((err = xfrm6_output_one(skb)) == 0)) { 109 while (likely((err = xfrm6_output_one(skb)) == 0)) {
110 nf_reset(skb); 110 nf_reset(skb);
111 111
112 err = nf_hook(PF_INET6, NF_IP6_LOCAL_OUT, &skb, NULL, 112 err = nf_hook(PF_INET6, NF_IP6_LOCAL_OUT, &skb, NULL,
113 skb->dst->dev, dst_output); 113 skb->dst->dev, dst_output);
114 if (unlikely(err != 1)) 114 if (unlikely(err != 1))
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 59480e92177d..b1133f27c8ae 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -8,7 +8,7 @@
8 * IPv6 support 8 * IPv6 support
9 * YOSHIFUJI Hideaki 9 * YOSHIFUJI Hideaki
10 * Split up af-specific portion 10 * Split up af-specific portion
11 * 11 *
12 */ 12 */
13 13
14#include <linux/compiler.h> 14#include <linux/compiler.h>
@@ -178,7 +178,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
178 __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]); 178 __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]);
179 trailer_len += xfrm[i]->props.trailer_len; 179 trailer_len += xfrm[i]->props.trailer_len;
180 180
181 if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) { 181 if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL ||
182 xfrm[i]->props.mode == XFRM_MODE_ROUTEOPTIMIZATION) {
182 unsigned short encap_family = xfrm[i]->props.family; 183 unsigned short encap_family = xfrm[i]->props.family;
183 switch(encap_family) { 184 switch(encap_family) {
184 case AF_INET: 185 case AF_INET:
@@ -186,8 +187,9 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
186 fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4; 187 fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
187 break; 188 break;
188 case AF_INET6: 189 case AF_INET6:
189 ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6); 190 ipv6_addr_copy(&fl_tunnel.fl6_dst, __xfrm6_bundle_addr_remote(xfrm[i], &fl->fl6_dst));
190 ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6); 191
192 ipv6_addr_copy(&fl_tunnel.fl6_src, __xfrm6_bundle_addr_remote(xfrm[i], &fl->fl6_src));
191 break; 193 break;
192 default: 194 default:
193 BUG_ON(1); 195 BUG_ON(1);
@@ -247,9 +249,9 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
247 x->u.rt6.rt6i_metric = rt0->rt6i_metric; 249 x->u.rt6.rt6i_metric = rt0->rt6i_metric;
248 x->u.rt6.rt6i_node = rt0->rt6i_node; 250 x->u.rt6.rt6i_node = rt0->rt6i_node;
249 x->u.rt6.rt6i_gateway = rt0->rt6i_gateway; 251 x->u.rt6.rt6i_gateway = rt0->rt6i_gateway;
250 memcpy(&x->u.rt6.rt6i_gateway, &rt0->rt6i_gateway, sizeof(x->u.rt6.rt6i_gateway)); 252 memcpy(&x->u.rt6.rt6i_gateway, &rt0->rt6i_gateway, sizeof(x->u.rt6.rt6i_gateway));
251 x->u.rt6.rt6i_dst = rt0->rt6i_dst; 253 x->u.rt6.rt6i_dst = rt0->rt6i_dst;
252 x->u.rt6.rt6i_src = rt0->rt6i_src; 254 x->u.rt6.rt6i_src = rt0->rt6i_src;
253 x->u.rt6.rt6i_idev = rt0->rt6i_idev; 255 x->u.rt6.rt6i_idev = rt0->rt6i_idev;
254 in6_dev_hold(rt0->rt6i_idev); 256 in6_dev_hold(rt0->rt6i_idev);
255 __xfrm6_bundle_len_dec(&header_len, &nfheader_len, x->u.dst.xfrm); 257 __xfrm6_bundle_len_dec(&header_len, &nfheader_len, x->u.dst.xfrm);
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 60ad5f074e0a..baa461b9f74e 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -8,7 +8,7 @@
8 * IPv6 support 8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI 9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific portion 10 * Split up af-specific portion
11 * 11 *
12 */ 12 */
13 13
14#include <net/xfrm.h> 14#include <net/xfrm.h>
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 12e426b9aacd..fb0228772f01 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -5,12 +5,12 @@
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -32,7 +32,7 @@
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33 33
34/* 34/*
35 * xfrm_tunnel_spi things are for allocating unique id ("spi") 35 * xfrm_tunnel_spi things are for allocating unique id ("spi")
36 * per xfrm_address_t. 36 * per xfrm_address_t.
37 */ 37 */
38struct xfrm6_tunnel_spi { 38struct xfrm6_tunnel_spi {
@@ -155,8 +155,8 @@ static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
155 155
156 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { 156 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
157 index = xfrm6_tunnel_spi_hash_byspi(spi); 157 index = xfrm6_tunnel_spi_hash_byspi(spi);
158 hlist_for_each_entry(x6spi, pos, 158 hlist_for_each_entry(x6spi, pos,
159 &xfrm6_tunnel_spi_byspi[index], 159 &xfrm6_tunnel_spi_byspi[index],
160 list_byspi) { 160 list_byspi) {
161 if (x6spi->spi == spi) 161 if (x6spi->spi == spi)
162 goto try_next_1; 162 goto try_next_1;
@@ -167,8 +167,8 @@ try_next_1:;
167 } 167 }
168 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) { 168 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) {
169 index = xfrm6_tunnel_spi_hash_byspi(spi); 169 index = xfrm6_tunnel_spi_hash_byspi(spi);
170 hlist_for_each_entry(x6spi, pos, 170 hlist_for_each_entry(x6spi, pos,
171 &xfrm6_tunnel_spi_byspi[index], 171 &xfrm6_tunnel_spi_byspi[index],
172 list_byspi) { 172 list_byspi) {
173 if (x6spi->spi == spi) 173 if (x6spi->spi == spi)
174 goto try_next_2; 174 goto try_next_2;
@@ -222,7 +222,7 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
222 222
223 write_lock_bh(&xfrm6_tunnel_spi_lock); 223 write_lock_bh(&xfrm6_tunnel_spi_lock);
224 224
225 hlist_for_each_entry_safe(x6spi, pos, n, 225 hlist_for_each_entry_safe(x6spi, pos, n,
226 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 226 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
227 list_byaddr) 227 list_byaddr)
228 { 228 {
@@ -269,9 +269,9 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
269{ 269{
270 /* xfrm6_tunnel native err handling */ 270 /* xfrm6_tunnel native err handling */
271 switch (type) { 271 switch (type) {
272 case ICMPV6_DEST_UNREACH: 272 case ICMPV6_DEST_UNREACH:
273 switch (code) { 273 switch (code) {
274 case ICMPV6_NOROUTE: 274 case ICMPV6_NOROUTE:
275 case ICMPV6_ADM_PROHIBITED: 275 case ICMPV6_ADM_PROHIBITED:
276 case ICMPV6_NOT_NEIGHBOUR: 276 case ICMPV6_NOT_NEIGHBOUR:
277 case ICMPV6_ADDR_UNREACH: 277 case ICMPV6_ADDR_UNREACH:
@@ -287,7 +287,7 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
287 case ICMPV6_EXC_HOPLIMIT: 287 case ICMPV6_EXC_HOPLIMIT:
288 break; 288 break;
289 case ICMPV6_EXC_FRAGTIME: 289 case ICMPV6_EXC_FRAGTIME:
290 default: 290 default:
291 break; 291 break;
292 } 292 }
293 break; 293 break;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 89f283c51dff..cac35a77f069 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -11,7 +11,7 @@
11 * work I am currently employed to do there. 11 * work I am currently employed to do there.
12 * 12 *
13 * All the material in this file is subject to the Gnu license version 2. 13 * All the material in this file is subject to the Gnu license version 2.
14 * Neither Alan Cox nor the Swansea University Computer Society admit 14 * Neither Alan Cox nor the Swansea University Computer Society admit
15 * liability nor provide warranty for any of this software. This material 15 * liability nor provide warranty for any of this software. This material
16 * is provided as is and at no charge. 16 * is provided as is and at no charge.
17 * 17 *
@@ -152,8 +152,8 @@ static void ipx_destroy_socket(struct sock *sk)
152 ipx_remove_socket(sk); 152 ipx_remove_socket(sk);
153 skb_queue_purge(&sk->sk_receive_queue); 153 skb_queue_purge(&sk->sk_receive_queue);
154#ifdef IPX_REFCNT_DEBUG 154#ifdef IPX_REFCNT_DEBUG
155 atomic_dec(&ipx_sock_nr); 155 atomic_dec(&ipx_sock_nr);
156 printk(KERN_DEBUG "IPX socket %p released, %d are still alive\n", sk, 156 printk(KERN_DEBUG "IPX socket %p released, %d are still alive\n", sk,
157 atomic_read(&ipx_sock_nr)); 157 atomic_read(&ipx_sock_nr));
158 if (atomic_read(&sk->sk_refcnt) != 1) 158 if (atomic_read(&sk->sk_refcnt) != 1)
159 printk(KERN_DEBUG "Destruction sock ipx %p delayed, cnt=%d\n", 159 printk(KERN_DEBUG "Destruction sock ipx %p delayed, cnt=%d\n",
@@ -162,7 +162,7 @@ static void ipx_destroy_socket(struct sock *sk)
162 sock_put(sk); 162 sock_put(sk);
163} 163}
164 164
165/* 165/*
166 * The following code is used to support IPX Interfaces (IPXITF). An 166 * The following code is used to support IPX Interfaces (IPXITF). An
167 * IPX interface is defined by a physical device and a frame type. 167 * IPX interface is defined by a physical device and a frame type.
168 */ 168 */
@@ -369,7 +369,7 @@ static __exit void ipxitf_cleanup(void)
369 struct ipx_interface *i, *tmp; 369 struct ipx_interface *i, *tmp;
370 370
371 spin_lock_bh(&ipx_interfaces_lock); 371 spin_lock_bh(&ipx_interfaces_lock);
372 list_for_each_entry_safe(i, tmp, &ipx_interfaces, node) 372 list_for_each_entry_safe(i, tmp, &ipx_interfaces, node)
373 __ipxitf_put(i); 373 __ipxitf_put(i);
374 spin_unlock_bh(&ipx_interfaces_lock); 374 spin_unlock_bh(&ipx_interfaces_lock);
375} 375}
@@ -446,10 +446,10 @@ static struct sock *ncp_connection_hack(struct ipx_interface *intrfc,
446 * You might call this a hack, but believe me, you do not want a 446 * You might call this a hack, but believe me, you do not want a
447 * complete NCP layer in the kernel, and this is VERY fast as well. */ 447 * complete NCP layer in the kernel, and this is VERY fast as well. */
448 struct sock *sk = NULL; 448 struct sock *sk = NULL;
449 int connection = 0; 449 int connection = 0;
450 u8 *ncphdr = (u8 *)(ipx + 1); 450 u8 *ncphdr = (u8 *)(ipx + 1);
451 451
452 if (*ncphdr == 0x22 && *(ncphdr + 1) == 0x22) /* NCP request */ 452 if (*ncphdr == 0x22 && *(ncphdr + 1) == 0x22) /* NCP request */
453 connection = (((int) *(ncphdr + 5)) << 8) | (int) *(ncphdr + 3); 453 connection = (((int) *(ncphdr + 5)) << 8) | (int) *(ncphdr + 3);
454 else if (*ncphdr == 0x77 && *(ncphdr + 1) == 0x77) /* BURST packet */ 454 else if (*ncphdr == 0x77 && *(ncphdr + 1) == 0x77) /* BURST packet */
455 connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); 455 connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8);
@@ -482,7 +482,7 @@ static int ipxitf_demux_socket(struct ipx_interface *intrfc,
482 482
483 if (intrfc == ipx_primary_net && ntohs(ipx->ipx_dest.sock) == 0x451) 483 if (intrfc == ipx_primary_net && ntohs(ipx->ipx_dest.sock) == 0x451)
484 sock1 = ncp_connection_hack(intrfc, ipx); 484 sock1 = ncp_connection_hack(intrfc, ipx);
485 if (!sock1) 485 if (!sock1)
486 /* No special socket found, forward the packet the normal way */ 486 /* No special socket found, forward the packet the normal way */
487 sock1 = ipxitf_find_socket(intrfc, ipx->ipx_dest.sock); 487 sock1 = ipxitf_find_socket(intrfc, ipx->ipx_dest.sock);
488 488
@@ -607,22 +607,22 @@ int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node)
607 *last_hop = IPX_SKB_CB(skb)->last_hop.netnum; 607 *last_hop = IPX_SKB_CB(skb)->last_hop.netnum;
608 IPX_SKB_CB(skb)->last_hop.index = -1; 608 IPX_SKB_CB(skb)->last_hop.index = -1;
609 } 609 }
610 610
611 /* 611 /*
612 * We need to know how many skbuffs it will take to send out this 612 * We need to know how many skbuffs it will take to send out this
613 * packet to avoid unnecessary copies. 613 * packet to avoid unnecessary copies.
614 */ 614 */
615 615
616 if (!dl || !dev || dev->flags & IFF_LOOPBACK) 616 if (!dl || !dev || dev->flags & IFF_LOOPBACK)
617 send_to_wire = 0; /* No non looped */ 617 send_to_wire = 0; /* No non looped */
618 618
619 /* 619 /*
620 * See if this should be demuxed to sockets on this interface 620 * See if this should be demuxed to sockets on this interface
621 * 621 *
622 * We want to ensure the original was eaten or that we only use 622 * We want to ensure the original was eaten or that we only use
623 * up clones. 623 * up clones.
624 */ 624 */
625 625
626 if (ipx->ipx_dest.net == intrfc->if_netnum) { 626 if (ipx->ipx_dest.net == intrfc->if_netnum) {
627 /* 627 /*
628 * To our own node, loop and free the original. 628 * To our own node, loop and free the original.
@@ -709,8 +709,8 @@ static int ipxitf_rcv(struct ipx_interface *intrfc, struct sk_buff *skb)
709 709
710 /* See if we should update our network number */ 710 /* See if we should update our network number */
711 if (!intrfc->if_netnum) /* net number of intrfc not known yet */ 711 if (!intrfc->if_netnum) /* net number of intrfc not known yet */
712 ipxitf_discover_netnum(intrfc, skb); 712 ipxitf_discover_netnum(intrfc, skb);
713 713
714 IPX_SKB_CB(skb)->last_hop.index = -1; 714 IPX_SKB_CB(skb)->last_hop.index = -1;
715 if (ipx->ipx_type == IPX_TYPE_PPROP) { 715 if (ipx->ipx_type == IPX_TYPE_PPROP) {
716 rc = ipxitf_pprop(intrfc, skb); 716 rc = ipxitf_pprop(intrfc, skb);
@@ -756,7 +756,7 @@ out_intrfc:
756 756
757static void ipxitf_discover_netnum(struct ipx_interface *intrfc, 757static void ipxitf_discover_netnum(struct ipx_interface *intrfc,
758 struct sk_buff *skb) 758 struct sk_buff *skb)
759{ 759{
760 const struct ipx_cb *cb = IPX_SKB_CB(skb); 760 const struct ipx_cb *cb = IPX_SKB_CB(skb);
761 761
762 /* see if this is an intra packet: source_net == dest_net */ 762 /* see if this is an intra packet: source_net == dest_net */
@@ -793,7 +793,7 @@ static void ipxitf_discover_netnum(struct ipx_interface *intrfc,
793 * it, not even processing it locally, if it has exact %IPX_MAX_PPROP_HOPS we 793 * it, not even processing it locally, if it has exact %IPX_MAX_PPROP_HOPS we
794 * don't broadcast it, but process it locally. See chapter 5 of Novell's "IPX 794 * don't broadcast it, but process it locally. See chapter 5 of Novell's "IPX
795 * RIP and SAP Router Specification", Part Number 107-000029-001. 795 * RIP and SAP Router Specification", Part Number 107-000029-001.
796 * 796 *
797 * If it is valid, check if we have pprop broadcasting enabled by the user, 797 * If it is valid, check if we have pprop broadcasting enabled by the user,
798 * if not, just return zero for local processing. 798 * if not, just return zero for local processing.
799 * 799 *
@@ -820,7 +820,7 @@ static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb)
820 * tctrl <= 15, any data payload... */ 820 * tctrl <= 15, any data payload... */
821 if (IPX_SKB_CB(skb)->ipx_tctrl > IPX_MAX_PPROP_HOPS || 821 if (IPX_SKB_CB(skb)->ipx_tctrl > IPX_MAX_PPROP_HOPS ||
822 ntohs(ipx->ipx_pktsize) < sizeof(struct ipxhdr) + 822 ntohs(ipx->ipx_pktsize) < sizeof(struct ipxhdr) +
823 IPX_MAX_PPROP_HOPS * sizeof(u32)) 823 IPX_MAX_PPROP_HOPS * sizeof(u32))
824 goto out; 824 goto out;
825 /* are we broadcasting this damn thing? */ 825 /* are we broadcasting this damn thing? */
826 rc = 0; 826 rc = 0;
@@ -831,7 +831,7 @@ static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb)
831 * locally. */ 831 * locally. */
832 if (IPX_SKB_CB(skb)->ipx_tctrl == IPX_MAX_PPROP_HOPS) 832 if (IPX_SKB_CB(skb)->ipx_tctrl == IPX_MAX_PPROP_HOPS)
833 goto out; 833 goto out;
834 834
835 c = ((u8 *) ipx) + sizeof(struct ipxhdr); 835 c = ((u8 *) ipx) + sizeof(struct ipxhdr);
836 l = (__be32 *) c; 836 l = (__be32 *) c;
837 837
@@ -851,7 +851,7 @@ static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb)
851 /* Except unconfigured interfaces */ 851 /* Except unconfigured interfaces */
852 if (!ifcs->if_netnum) 852 if (!ifcs->if_netnum)
853 continue; 853 continue;
854 854
855 /* That aren't in the list */ 855 /* That aren't in the list */
856 if (ifcs == intrfc) 856 if (ifcs == intrfc)
857 continue; 857 continue;
@@ -1003,7 +1003,7 @@ static int ipxitf_create(struct ipx_interface_definition *idef)
1003 dlink_type = htons(ETH_P_IPX); 1003 dlink_type = htons(ETH_P_IPX);
1004 datalink = pEII_datalink; 1004 datalink = pEII_datalink;
1005 break; 1005 break;
1006 } else 1006 } else
1007 printk(KERN_WARNING "IPX frame type EtherII over " 1007 printk(KERN_WARNING "IPX frame type EtherII over "
1008 "token-ring is obsolete. Use SNAP " 1008 "token-ring is obsolete. Use SNAP "
1009 "instead.\n"); 1009 "instead.\n");
@@ -1208,14 +1208,14 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg)
1208 rc = 0; 1208 rc = 0;
1209 break; 1209 break;
1210 } 1210 }
1211 case SIOCAIPXITFCRT: 1211 case SIOCAIPXITFCRT:
1212 rc = -EFAULT; 1212 rc = -EFAULT;
1213 if (get_user(val, (unsigned char __user *) arg)) 1213 if (get_user(val, (unsigned char __user *) arg))
1214 break; 1214 break;
1215 rc = 0; 1215 rc = 0;
1216 ipxcfg_auto_create_interfaces = val; 1216 ipxcfg_auto_create_interfaces = val;
1217 break; 1217 break;
1218 case SIOCAIPXPRISLT: 1218 case SIOCAIPXPRISLT:
1219 rc = -EFAULT; 1219 rc = -EFAULT;
1220 if (get_user(val, (unsigned char __user *) arg)) 1220 if (get_user(val, (unsigned char __user *) arg))
1221 break; 1221 break;
@@ -1230,14 +1230,14 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg)
1230/* 1230/*
1231 * Checksum routine for IPX 1231 * Checksum routine for IPX
1232 */ 1232 */
1233 1233
1234/* Note: We assume ipx_tctrl==0 and htons(length)==ipx_pktsize */ 1234/* Note: We assume ipx_tctrl==0 and htons(length)==ipx_pktsize */
1235/* This functions should *not* mess with packet contents */ 1235/* This functions should *not* mess with packet contents */
1236 1236
1237__be16 ipx_cksum(struct ipxhdr *packet, int length) 1237__be16 ipx_cksum(struct ipxhdr *packet, int length)
1238{ 1238{
1239 /* 1239 /*
1240 * NOTE: sum is a net byte order quantity, which optimizes the 1240 * NOTE: sum is a net byte order quantity, which optimizes the
1241 * loop. This only works on big and little endian machines. (I 1241 * loop. This only works on big and little endian machines. (I
1242 * don't know of a machine that isn't.) 1242 * don't know of a machine that isn't.)
1243 */ 1243 */
@@ -1342,7 +1342,7 @@ static int ipx_getsockopt(struct socket *sock, int level, int optname,
1342 rc = -EINVAL; 1342 rc = -EINVAL;
1343 if(len < 0) 1343 if(len < 0)
1344 goto out; 1344 goto out;
1345 1345
1346 rc = -EFAULT; 1346 rc = -EFAULT;
1347 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 1347 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
1348 goto out; 1348 goto out;
@@ -1372,13 +1372,13 @@ static int ipx_create(struct socket *sock, int protocol)
1372 if (sock->type != SOCK_DGRAM) 1372 if (sock->type != SOCK_DGRAM)
1373 goto out; 1373 goto out;
1374 1374
1375 rc = -ENOMEM; 1375 rc = -ENOMEM;
1376 sk = sk_alloc(PF_IPX, GFP_KERNEL, &ipx_proto, 1); 1376 sk = sk_alloc(PF_IPX, GFP_KERNEL, &ipx_proto, 1);
1377 if (!sk) 1377 if (!sk)
1378 goto out; 1378 goto out;
1379#ifdef IPX_REFCNT_DEBUG 1379#ifdef IPX_REFCNT_DEBUG
1380 atomic_inc(&ipx_sock_nr); 1380 atomic_inc(&ipx_sock_nr);
1381 printk(KERN_DEBUG "IPX socket %p created, now we have %d alive\n", sk, 1381 printk(KERN_DEBUG "IPX socket %p created, now we have %d alive\n", sk,
1382 atomic_read(&ipx_sock_nr)); 1382 atomic_read(&ipx_sock_nr));
1383#endif 1383#endif
1384 sock_init_data(sock, sk); 1384 sock_init_data(sock, sk);
@@ -1561,7 +1561,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
1561 goto out; 1561 goto out;
1562 } 1562 }
1563 1563
1564 /* We can either connect to primary network or somewhere 1564 /* We can either connect to primary network or somewhere
1565 * we can route to */ 1565 * we can route to */
1566 rt = ipxrtr_lookup(addr->sipx_network); 1566 rt = ipxrtr_lookup(addr->sipx_network);
1567 rc = -ENETUNREACH; 1567 rc = -ENETUNREACH;
@@ -1641,10 +1641,10 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
1641 struct ipxhdr *ipx; 1641 struct ipxhdr *ipx;
1642 u16 ipx_pktsize; 1642 u16 ipx_pktsize;
1643 int rc = 0; 1643 int rc = 0;
1644 1644
1645 /* Not ours */ 1645 /* Not ours */
1646 if (skb->pkt_type == PACKET_OTHERHOST) 1646 if (skb->pkt_type == PACKET_OTHERHOST)
1647 goto drop; 1647 goto drop;
1648 1648
1649 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1649 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1650 goto out; 1650 goto out;
@@ -1653,12 +1653,12 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
1653 goto drop; 1653 goto drop;
1654 1654
1655 ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize); 1655 ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize);
1656 1656
1657 /* Too small or invalid header? */ 1657 /* Too small or invalid header? */
1658 if (ipx_pktsize < sizeof(struct ipxhdr) || 1658 if (ipx_pktsize < sizeof(struct ipxhdr) ||
1659 !pskb_may_pull(skb, ipx_pktsize)) 1659 !pskb_may_pull(skb, ipx_pktsize))
1660 goto drop; 1660 goto drop;
1661 1661
1662 ipx = ipx_hdr(skb); 1662 ipx = ipx_hdr(skb);
1663 if (ipx->ipx_checksum != IPX_NO_CHECKSUM && 1663 if (ipx->ipx_checksum != IPX_NO_CHECKSUM &&
1664 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) 1664 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize))
@@ -1786,7 +1786,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1786 if (rc) 1786 if (rc)
1787 goto out; 1787 goto out;
1788 } 1788 }
1789 1789
1790 rc = -ENOTCONN; 1790 rc = -ENOTCONN;
1791 if (sock_flag(sk, SOCK_ZAPPED)) 1791 if (sock_flag(sk, SOCK_ZAPPED))
1792 goto out; 1792 goto out;
@@ -1875,15 +1875,15 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1875 * This socket wants to take care of the NCP connection 1875 * This socket wants to take care of the NCP connection
1876 * handed to us in arg. 1876 * handed to us in arg.
1877 */ 1877 */
1878 rc = -EPERM; 1878 rc = -EPERM;
1879 if (!capable(CAP_NET_ADMIN)) 1879 if (!capable(CAP_NET_ADMIN))
1880 break; 1880 break;
1881 rc = get_user(ipx_sk(sk)->ipx_ncp_conn, 1881 rc = get_user(ipx_sk(sk)->ipx_ncp_conn,
1882 (const unsigned short __user *)argp); 1882 (const unsigned short __user *)argp);
1883 break; 1883 break;
1884 case SIOCGSTAMP: 1884 case SIOCGSTAMP:
1885 rc = -EINVAL; 1885 rc = -EINVAL;
1886 if (sk) 1886 if (sk)
1887 rc = sock_get_timestamp(sk, argp); 1887 rc = sock_get_timestamp(sk, argp);
1888 break; 1888 break;
1889 case SIOCGIFDSTADDR: 1889 case SIOCGIFDSTADDR:
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index b7463dfca63e..811e4badce81 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -352,7 +352,7 @@ int __init ipx_proc_init(void)
352{ 352{
353 struct proc_dir_entry *p; 353 struct proc_dir_entry *p;
354 int rc = -ENOMEM; 354 int rc = -ENOMEM;
355 355
356 ipx_proc_dir = proc_mkdir("ipx", proc_net); 356 ipx_proc_dir = proc_mkdir("ipx", proc_net);
357 357
358 if (!ipx_proc_dir) 358 if (!ipx_proc_dir)
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 68560ee0d797..8e1cad971f11 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -234,7 +234,7 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
234 if (rc) { 234 if (rc) {
235 kfree_skb(skb); 235 kfree_skb(skb);
236 goto out_put; 236 goto out_put;
237 } 237 }
238 238
239 /* Apply checksum. Not allowed on 802.3 links. */ 239 /* Apply checksum. Not allowed on 802.3 links. */
240 if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023)) 240 if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023))
@@ -242,7 +242,7 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
242 else 242 else
243 ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr)); 243 ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
244 244
245 rc = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ? 245 rc = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ?
246 rt->ir_router_node : ipx->ipx_dest.node); 246 rt->ir_router_node : ipx->ipx_dest.node);
247out_put: 247out_put:
248 ipxitf_put(intrfc); 248 ipxitf_put(intrfc);
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c
index fa574735c76f..85ae35fa1e0e 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/net/ipx/sysctl_net_ipx.c
@@ -34,7 +34,7 @@ static struct ctl_table ipx_dir_table[] = {
34 .procname = "ipx", 34 .procname = "ipx",
35 .mode = 0555, 35 .mode = 0555,
36 .child = ipx_table, 36 .child = ipx_table,
37 }, 37 },
38 { 0 }, 38 { 0 },
39}; 39};
40 40
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 7e1aea89ef05..eabd6838f50a 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -138,7 +138,7 @@ static void irda_disconnect_indication(void *instance, void *sap,
138 sk->sk_shutdown |= SEND_SHUTDOWN; 138 sk->sk_shutdown |= SEND_SHUTDOWN;
139 139
140 sk->sk_state_change(sk); 140 sk->sk_state_change(sk);
141 sock_orphan(sk); 141 sock_orphan(sk);
142 release_sock(sk); 142 release_sock(sk);
143 143
144 /* Close our TSAP. 144 /* Close our TSAP.
@@ -158,7 +158,7 @@ static void irda_disconnect_indication(void *instance, void *sap,
158 irttp_close_tsap(self->tsap); 158 irttp_close_tsap(self->tsap);
159 self->tsap = NULL; 159 self->tsap = NULL;
160 } 160 }
161 } 161 }
162 162
163 /* Note : once we are there, there is not much you want to do 163 /* Note : once we are there, there is not much you want to do
164 * with the socket anymore, apart from closing it. 164 * with the socket anymore, apart from closing it.
@@ -1211,7 +1211,7 @@ static int irda_release(struct socket *sock)
1211 1211
1212 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 1212 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
1213 1213
1214 if (sk == NULL) 1214 if (sk == NULL)
1215 return 0; 1215 return 0;
1216 1216
1217 lock_sock(sk); 1217 lock_sock(sk);
@@ -1259,7 +1259,7 @@ static int irda_release(struct socket *sock)
1259 * memory leak is now gone... - Jean II 1259 * memory leak is now gone... - Jean II
1260 */ 1260 */
1261 1261
1262 return 0; 1262 return 0;
1263} 1263}
1264 1264
1265/* 1265/*
@@ -1312,7 +1312,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1312 len = self->max_data_size; 1312 len = self->max_data_size;
1313 } 1313 }
1314 1314
1315 skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16, 1315 skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16,
1316 msg->msg_flags & MSG_DONTWAIT, &err); 1316 msg->msg_flags & MSG_DONTWAIT, &err);
1317 if (!skb) 1317 if (!skb)
1318 return -ENOBUFS; 1318 return -ENOBUFS;
@@ -1714,7 +1714,7 @@ static int irda_shutdown(struct socket *sock, int how)
1714 self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ 1714 self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */
1715 self->saddr = 0x0; /* so IrLMP assign us any link */ 1715 self->saddr = 0x0; /* so IrLMP assign us any link */
1716 1716
1717 return 0; 1717 return 0;
1718} 1718}
1719 1719
1720/* 1720/*
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index 89fd2a2cbca6..789478bc3009 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: discovery.c 3 * Filename: discovery.c
4 * Version: 0.1 4 * Version: 0.1
5 * Description: Routines for handling discoveries at the IrLMP layer 5 * Description: Routines for handling discoveries at the IrLMP layer
@@ -10,24 +10,24 @@
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Modified at: Fri May 28 3:11 CST 1999 11 * Modified at: Fri May 28 3:11 CST 1999
12 * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl> 12 * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
13 * 13 *
14 * Copyright (c) 1999 Dag Brattli, All Rights Reserved. 14 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * This program is distributed in the hope that it will be useful, 21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details. 24 * GNU General Public License for more details.
25 * 25 *
26 * You should have received a copy of the GNU General Public License 26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software 27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 * MA 02111-1307 USA 29 * MA 02111-1307 USA
30 * 30 *
31 ********************************************************************/ 31 ********************************************************************/
32 32
33#include <linux/string.h> 33#include <linux/string.h>
@@ -65,9 +65,9 @@ void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new)
65 65
66 spin_lock_irqsave(&cachelog->hb_spinlock, flags); 66 spin_lock_irqsave(&cachelog->hb_spinlock, flags);
67 67
68 /* 68 /*
69 * Remove all discoveries of devices that has previously been 69 * Remove all discoveries of devices that has previously been
70 * discovered on the same link with the same name (info), or the 70 * discovered on the same link with the same name (info), or the
71 * same daddr. We do this since some devices (mostly PDAs) change 71 * same daddr. We do this since some devices (mostly PDAs) change
72 * their device address between every discovery. 72 * their device address between every discovery.
73 */ 73 */
@@ -79,10 +79,10 @@ void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new)
79 discovery = (discovery_t *) hashbin_get_next(cachelog); 79 discovery = (discovery_t *) hashbin_get_next(cachelog);
80 80
81 if ((node->data.saddr == new->data.saddr) && 81 if ((node->data.saddr == new->data.saddr) &&
82 ((node->data.daddr == new->data.daddr) || 82 ((node->data.daddr == new->data.daddr) ||
83 (strcmp(node->data.info, new->data.info) == 0))) 83 (strcmp(node->data.info, new->data.info) == 0)))
84 { 84 {
85 /* This discovery is a previous discovery 85 /* This discovery is a previous discovery
86 * from the same device, so just remove it 86 * from the same device, so just remove it
87 */ 87 */
88 hashbin_remove_this(cachelog, (irda_queue_t *) node); 88 hashbin_remove_this(cachelog, (irda_queue_t *) node);
@@ -134,7 +134,7 @@ void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
134 134
135 discovery = (discovery_t *) hashbin_remove_first(log); 135 discovery = (discovery_t *) hashbin_remove_first(log);
136 } 136 }
137 137
138 /* Delete the now empty log */ 138 /* Delete the now empty log */
139 hashbin_delete(log, (FREE_FUNC) kfree); 139 hashbin_delete(log, (FREE_FUNC) kfree);
140} 140}
@@ -232,7 +232,7 @@ void irlmp_dump_discoveries(hashbin_t *log)
232 while (discovery != NULL) { 232 while (discovery != NULL) {
233 IRDA_DEBUG(0, "Discovery:\n"); 233 IRDA_DEBUG(0, "Discovery:\n");
234 IRDA_DEBUG(0, " daddr=%08x\n", discovery->data.daddr); 234 IRDA_DEBUG(0, " daddr=%08x\n", discovery->data.daddr);
235 IRDA_DEBUG(0, " saddr=%08x\n", discovery->data.saddr); 235 IRDA_DEBUG(0, " saddr=%08x\n", discovery->data.saddr);
236 IRDA_DEBUG(0, " nickname=%s\n", discovery->data.info); 236 IRDA_DEBUG(0, " nickname=%s\n", discovery->data.info);
237 237
238 discovery = (discovery_t *) hashbin_get_next(log); 238 discovery = (discovery_t *) hashbin_get_next(log);
@@ -321,26 +321,26 @@ static inline discovery_t *discovery_seq_idx(loff_t pos)
321{ 321{
322 discovery_t *discovery; 322 discovery_t *discovery;
323 323
324 for (discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog); 324 for (discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog);
325 discovery != NULL; 325 discovery != NULL;
326 discovery = (discovery_t *) hashbin_get_next(irlmp->cachelog)) { 326 discovery = (discovery_t *) hashbin_get_next(irlmp->cachelog)) {
327 if (pos-- == 0) 327 if (pos-- == 0)
328 break; 328 break;
329 } 329 }
330 330
331 return discovery; 331 return discovery;
332} 332}
333 333
334static void *discovery_seq_start(struct seq_file *seq, loff_t *pos) 334static void *discovery_seq_start(struct seq_file *seq, loff_t *pos)
335{ 335{
336 spin_lock_irq(&irlmp->cachelog->hb_spinlock); 336 spin_lock_irq(&irlmp->cachelog->hb_spinlock);
337 return *pos ? discovery_seq_idx(*pos - 1) : SEQ_START_TOKEN; 337 return *pos ? discovery_seq_idx(*pos - 1) : SEQ_START_TOKEN;
338} 338}
339 339
340static void *discovery_seq_next(struct seq_file *seq, void *v, loff_t *pos) 340static void *discovery_seq_next(struct seq_file *seq, void *v, loff_t *pos)
341{ 341{
342 ++*pos; 342 ++*pos;
343 return (v == SEQ_START_TOKEN) 343 return (v == SEQ_START_TOKEN)
344 ? (void *) hashbin_get_first(irlmp->cachelog) 344 ? (void *) hashbin_get_first(irlmp->cachelog)
345 : (void *) hashbin_get_next(irlmp->cachelog); 345 : (void *) hashbin_get_next(irlmp->cachelog);
346} 346}
@@ -357,9 +357,9 @@ static int discovery_seq_show(struct seq_file *seq, void *v)
357 else { 357 else {
358 const discovery_t *discovery = v; 358 const discovery_t *discovery = v;
359 359
360 seq_printf(seq, "nickname: %s, hint: 0x%02x%02x", 360 seq_printf(seq, "nickname: %s, hint: 0x%02x%02x",
361 discovery->data.info, 361 discovery->data.info,
362 discovery->data.hints[0], 362 discovery->data.hints[0],
363 discovery->data.hints[1]); 363 discovery->data.hints[1]);
364#if 0 364#if 0
365 if ( discovery->data.hints[0] & HINT_PNP) 365 if ( discovery->data.hints[0] & HINT_PNP)
@@ -376,20 +376,20 @@ static int discovery_seq_show(struct seq_file *seq, void *v)
376 seq_puts(seq, "Fax "); 376 seq_puts(seq, "Fax ");
377 if ( discovery->data.hints[0] & HINT_LAN) 377 if ( discovery->data.hints[0] & HINT_LAN)
378 seq_puts(seq, "LAN Access "); 378 seq_puts(seq, "LAN Access ");
379 379
380 if ( discovery->data.hints[1] & HINT_TELEPHONY) 380 if ( discovery->data.hints[1] & HINT_TELEPHONY)
381 seq_puts(seq, "Telephony "); 381 seq_puts(seq, "Telephony ");
382 if ( discovery->data.hints[1] & HINT_FILE_SERVER) 382 if ( discovery->data.hints[1] & HINT_FILE_SERVER)
383 seq_puts(seq, "File Server "); 383 seq_puts(seq, "File Server ");
384 if ( discovery->data.hints[1] & HINT_COMM) 384 if ( discovery->data.hints[1] & HINT_COMM)
385 seq_puts(seq, "IrCOMM "); 385 seq_puts(seq, "IrCOMM ");
386 if ( discovery->data.hints[1] & HINT_OBEX) 386 if ( discovery->data.hints[1] & HINT_OBEX)
387 seq_puts(seq, "IrOBEX "); 387 seq_puts(seq, "IrOBEX ");
388#endif 388#endif
389 seq_printf(seq,", saddr: 0x%08x, daddr: 0x%08x\n\n", 389 seq_printf(seq,", saddr: 0x%08x, daddr: 0x%08x\n\n",
390 discovery->data.saddr, 390 discovery->data.saddr,
391 discovery->data.daddr); 391 discovery->data.daddr);
392 392
393 seq_putc(seq, '\n'); 393 seq_putc(seq, '\n');
394 } 394 }
395 return 0; 395 return 0;
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index ad6b6af3dd97..c28ee7bce26a 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: ircomm_core.c 3 * Filename: ircomm_core.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: IrCOMM service interface 5 * Description: IrCOMM service interface
@@ -8,25 +8,25 @@
8 * Created at: Sun Jun 6 20:37:34 1999 8 * Created at: Sun Jun 6 20:37:34 1999
9 * Modified at: Tue Dec 21 13:26:41 1999 9 * Modified at: Tue Dec 21 13:26:41 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> 13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * This program is distributed in the hope that it will be useful, 20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details. 23 * GNU General Public License for more details.
24 * 24 *
25 * You should have received a copy of the GNU General Public License 25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software 26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
28 * MA 02111-1307 USA 28 * MA 02111-1307 USA
29 * 29 *
30 ********************************************************************/ 30 ********************************************************************/
31 31
32#include <linux/module.h> 32#include <linux/module.h>
@@ -49,7 +49,7 @@
49#include <net/irda/ircomm_core.h> 49#include <net/irda/ircomm_core.h>
50 50
51static int __ircomm_close(struct ircomm_cb *self); 51static int __ircomm_close(struct ircomm_cb *self);
52static void ircomm_control_indication(struct ircomm_cb *self, 52static void ircomm_control_indication(struct ircomm_cb *self,
53 struct sk_buff *skb, int clen); 53 struct sk_buff *skb, int clen);
54 54
55#ifdef CONFIG_PROC_FS 55#ifdef CONFIG_PROC_FS
@@ -69,22 +69,22 @@ hashbin_t *ircomm = NULL;
69 69
70static int __init ircomm_init(void) 70static int __init ircomm_init(void)
71{ 71{
72 ircomm = hashbin_new(HB_LOCK); 72 ircomm = hashbin_new(HB_LOCK);
73 if (ircomm == NULL) { 73 if (ircomm == NULL) {
74 IRDA_ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__); 74 IRDA_ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__);
75 return -ENOMEM; 75 return -ENOMEM;
76 } 76 }
77 77
78#ifdef CONFIG_PROC_FS 78#ifdef CONFIG_PROC_FS
79 { struct proc_dir_entry *ent; 79 { struct proc_dir_entry *ent;
80 ent = create_proc_entry("ircomm", 0, proc_irda); 80 ent = create_proc_entry("ircomm", 0, proc_irda);
81 if (ent) 81 if (ent)
82 ent->proc_fops = &ircomm_proc_fops; 82 ent->proc_fops = &ircomm_proc_fops;
83 } 83 }
84#endif /* CONFIG_PROC_FS */ 84#endif /* CONFIG_PROC_FS */
85 85
86 IRDA_MESSAGE("IrCOMM protocol (Dag Brattli)\n"); 86 IRDA_MESSAGE("IrCOMM protocol (Dag Brattli)\n");
87 87
88 return 0; 88 return 0;
89} 89}
90 90
@@ -139,7 +139,7 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line)
139 139
140 hashbin_insert(ircomm, (irda_queue_t *) self, line, NULL); 140 hashbin_insert(ircomm, (irda_queue_t *) self, line, NULL);
141 141
142 ircomm_next_state(self, IRCOMM_IDLE); 142 ircomm_next_state(self, IRCOMM_IDLE);
143 143
144 return self; 144 return self;
145} 145}
@@ -195,8 +195,8 @@ int ircomm_close(struct ircomm_cb *self)
195 entry = hashbin_remove(ircomm, self->line, NULL); 195 entry = hashbin_remove(ircomm, self->line, NULL);
196 196
197 IRDA_ASSERT(entry == self, return -1;); 197 IRDA_ASSERT(entry == self, return -1;);
198 198
199 return __ircomm_close(self); 199 return __ircomm_close(self);
200} 200}
201 201
202EXPORT_SYMBOL(ircomm_close); 202EXPORT_SYMBOL(ircomm_close);
@@ -206,9 +206,9 @@ EXPORT_SYMBOL(ircomm_close);
206 * 206 *
207 * Impl. of this function is differ from one of the reference. This 207 * Impl. of this function is differ from one of the reference. This
208 * function does discovery as well as sending connect request 208 * function does discovery as well as sending connect request
209 * 209 *
210 */ 210 */
211int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel, 211int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel,
212 __u32 saddr, __u32 daddr, struct sk_buff *skb, 212 __u32 saddr, __u32 daddr, struct sk_buff *skb,
213 __u8 service_type) 213 __u8 service_type)
214{ 214{
@@ -243,20 +243,20 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
243 struct ircomm_info *info) 243 struct ircomm_info *info)
244{ 244{
245 int clen = 0; 245 int clen = 0;
246 246
247 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 247 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
248 248
249 /* Check if the packet contains data on the control channel */ 249 /* Check if the packet contains data on the control channel */
250 if (skb->len > 0) 250 if (skb->len > 0)
251 clen = skb->data[0]; 251 clen = skb->data[0];
252 252
253 /* 253 /*
254 * If there are any data hiding in the control channel, we must 254 * If there are any data hiding in the control channel, we must
255 * deliver it first. The side effect is that the control channel 255 * deliver it first. The side effect is that the control channel
256 * will be removed from the skb 256 * will be removed from the skb
257 */ 257 */
258 if (self->notify.connect_indication) 258 if (self->notify.connect_indication)
259 self->notify.connect_indication(self->notify.instance, self, 259 self->notify.connect_indication(self->notify.instance, self,
260 info->qos, info->max_data_size, 260 info->qos, info->max_data_size,
261 info->max_header_size, skb); 261 info->max_header_size, skb);
262 else { 262 else {
@@ -282,7 +282,7 @@ int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata)
282 ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL); 282 ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL);
283 283
284 return ret; 284 return ret;
285} 285}
286 286
287EXPORT_SYMBOL(ircomm_connect_response); 287EXPORT_SYMBOL(ircomm_connect_response);
288 288
@@ -299,7 +299,7 @@ void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
299 299
300 if (self->notify.connect_confirm ) 300 if (self->notify.connect_confirm )
301 self->notify.connect_confirm(self->notify.instance, 301 self->notify.connect_confirm(self->notify.instance,
302 self, info->qos, 302 self, info->qos,
303 info->max_data_size, 303 info->max_data_size,
304 info->max_header_size, skb); 304 info->max_header_size, skb);
305 else { 305 else {
@@ -322,7 +322,7 @@ int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb)
322 IRDA_ASSERT(self != NULL, return -EFAULT;); 322 IRDA_ASSERT(self != NULL, return -EFAULT;);
323 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); 323 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;);
324 IRDA_ASSERT(skb != NULL, return -EFAULT;); 324 IRDA_ASSERT(skb != NULL, return -EFAULT;);
325 325
326 ret = ircomm_do_event(self, IRCOMM_DATA_REQUEST, skb, NULL); 326 ret = ircomm_do_event(self, IRCOMM_DATA_REQUEST, skb, NULL);
327 327
328 return ret; 328 return ret;
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(ircomm_data_request);
337 * 337 *
338 */ 338 */
339void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb) 339void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb)
340{ 340{
341 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 341 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
342 342
343 IRDA_ASSERT(skb->len > 0, return;); 343 IRDA_ASSERT(skb->len > 0, return;);
@@ -363,9 +363,9 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
363 363
364 clen = skb->data[0]; 364 clen = skb->data[0];
365 365
366 /* 366 /*
367 * If there are any data hiding in the control channel, we must 367 * If there are any data hiding in the control channel, we must
368 * deliver it first. The side effect is that the control channel 368 * deliver it first. The side effect is that the control channel
369 * will be removed from the skb 369 * will be removed from the skb
370 */ 370 */
371 if (clen > 0) 371 if (clen > 0)
@@ -375,7 +375,7 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
375 skb_pull(skb, clen+1); 375 skb_pull(skb, clen+1);
376 376
377 if (skb->len) 377 if (skb->len)
378 ircomm_data_indication(self, skb); 378 ircomm_data_indication(self, skb);
379 else { 379 else {
380 IRDA_DEBUG(4, "%s(), data was control info only!\n", 380 IRDA_DEBUG(4, "%s(), data was control info only!\n",
381 __FUNCTION__ ); 381 __FUNCTION__ );
@@ -391,13 +391,13 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
391int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb) 391int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb)
392{ 392{
393 int ret; 393 int ret;
394 394
395 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 395 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
396 396
397 IRDA_ASSERT(self != NULL, return -EFAULT;); 397 IRDA_ASSERT(self != NULL, return -EFAULT;);
398 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); 398 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;);
399 IRDA_ASSERT(skb != NULL, return -EFAULT;); 399 IRDA_ASSERT(skb != NULL, return -EFAULT;);
400 400
401 ret = ircomm_do_event(self, IRCOMM_CONTROL_REQUEST, skb, NULL); 401 ret = ircomm_do_event(self, IRCOMM_CONTROL_REQUEST, skb, NULL);
402 402
403 return ret; 403 return ret;
@@ -411,10 +411,10 @@ EXPORT_SYMBOL(ircomm_control_request);
411 * Data has arrived on the control channel 411 * Data has arrived on the control channel
412 * 412 *
413 */ 413 */
414static void ircomm_control_indication(struct ircomm_cb *self, 414static void ircomm_control_indication(struct ircomm_cb *self,
415 struct sk_buff *skb, int clen) 415 struct sk_buff *skb, int clen)
416{ 416{
417 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 417 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
418 418
419 /* Use udata for delivering data on the control channel */ 419 /* Use udata for delivering data on the control channel */
420 if (self->notify.udata_indication) { 420 if (self->notify.udata_indication) {
@@ -427,8 +427,8 @@ static void ircomm_control_indication(struct ircomm_cb *self,
427 427
428 /* Remove data channel from control channel */ 428 /* Remove data channel from control channel */
429 skb_trim(ctrl_skb, clen+1); 429 skb_trim(ctrl_skb, clen+1);
430 430
431 self->notify.udata_indication(self->notify.instance, self, 431 self->notify.udata_indication(self->notify.instance, self,
432 ctrl_skb); 432 ctrl_skb);
433 433
434 /* Drop reference count - 434 /* Drop reference count -
@@ -455,7 +455,7 @@ int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata)
455 IRDA_ASSERT(self != NULL, return -1;); 455 IRDA_ASSERT(self != NULL, return -1;);
456 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); 456 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
457 457
458 ret = ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, userdata, 458 ret = ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, userdata,
459 &info); 459 &info);
460 return ret; 460 return ret;
461} 461}
@@ -472,7 +472,7 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
472 struct ircomm_info *info) 472 struct ircomm_info *info)
473{ 473{
474 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 474 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
475 475
476 IRDA_ASSERT(info != NULL, return;); 476 IRDA_ASSERT(info != NULL, return;);
477 477
478 if (self->notify.disconnect_indication) { 478 if (self->notify.disconnect_indication) {
@@ -486,7 +486,7 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
486/* 486/*
487 * Function ircomm_flow_request (self, flow) 487 * Function ircomm_flow_request (self, flow)
488 * 488 *
489 * 489 *
490 * 490 *
491 */ 491 */
492void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow) 492void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow)
@@ -517,7 +517,7 @@ static void *ircomm_seq_start(struct seq_file *seq, loff_t *pos)
517 self = (struct ircomm_cb *) hashbin_get_next(ircomm)) { 517 self = (struct ircomm_cb *) hashbin_get_next(ircomm)) {
518 if (off++ == *pos) 518 if (off++ == *pos)
519 break; 519 break;
520 520
521 } 521 }
522 return self; 522 return self;
523} 523}
@@ -535,7 +535,7 @@ static void ircomm_seq_stop(struct seq_file *seq, void *v)
535} 535}
536 536
537static int ircomm_seq_show(struct seq_file *seq, void *v) 537static int ircomm_seq_show(struct seq_file *seq, void *v)
538{ 538{
539 const struct ircomm_cb *self = v; 539 const struct ircomm_cb *self = v;
540 540
541 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EINVAL; ); 541 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EINVAL; );
@@ -548,7 +548,7 @@ static int ircomm_seq_show(struct seq_file *seq, void *v)
548 seq_printf(seq, 548 seq_printf(seq,
549 " state: %s, slsap_sel: %#02x, dlsap_sel: %#02x, mode:", 549 " state: %s, slsap_sel: %#02x, dlsap_sel: %#02x, mode:",
550 ircomm_state[ self->state], 550 ircomm_state[ self->state],
551 self->slsap_sel, self->dlsap_sel); 551 self->slsap_sel, self->dlsap_sel);
552 552
553 if(self->service_type & IRCOMM_3_WIRE_RAW) 553 if(self->service_type & IRCOMM_3_WIRE_RAW)
554 seq_printf(seq, " 3-wire-raw"); 554 seq_printf(seq, " 3-wire-raw");
diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c
index 01f4e801a1ba..23d0468794e2 100644
--- a/net/irda/ircomm/ircomm_event.c
+++ b/net/irda/ircomm/ircomm_event.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: ircomm_event.c 3 * Filename: ircomm_event.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: IrCOMM layer state machine 5 * Description: IrCOMM layer state machine
@@ -8,24 +8,24 @@
8 * Created at: Sun Jun 6 20:33:11 1999 8 * Created at: Sun Jun 6 20:33:11 1999
9 * Modified at: Sun Dec 12 13:44:32 1999 9 * Modified at: Sun Dec 12 13:44:32 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
13 * 13 *
14 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as 15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * This program is distributed in the hope that it will be useful, 19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details. 22 * GNU General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
27 * MA 02111-1307 USA 27 * MA 02111-1307 USA
28 * 28 *
29 ********************************************************************/ 29 ********************************************************************/
30 30
31#include <linux/sched.h> 31#include <linux/sched.h>
@@ -41,13 +41,13 @@
41#include <net/irda/ircomm_core.h> 41#include <net/irda/ircomm_core.h>
42#include <net/irda/ircomm_event.h> 42#include <net/irda/ircomm_event.h>
43 43
44static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event, 44static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
45 struct sk_buff *skb, struct ircomm_info *info); 45 struct sk_buff *skb, struct ircomm_info *info);
46static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event, 46static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
47 struct sk_buff *skb, struct ircomm_info *info); 47 struct sk_buff *skb, struct ircomm_info *info);
48static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event, 48static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
49 struct sk_buff *skb, struct ircomm_info *info); 49 struct sk_buff *skb, struct ircomm_info *info);
50static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, 50static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
51 struct sk_buff *skb, struct ircomm_info *info); 51 struct sk_buff *skb, struct ircomm_info *info);
52 52
53char *ircomm_state[] = { 53char *ircomm_state[] = {
@@ -60,26 +60,26 @@ char *ircomm_state[] = {
60#ifdef CONFIG_IRDA_DEBUG 60#ifdef CONFIG_IRDA_DEBUG
61static char *ircomm_event[] = { 61static char *ircomm_event[] = {
62 "IRCOMM_CONNECT_REQUEST", 62 "IRCOMM_CONNECT_REQUEST",
63 "IRCOMM_CONNECT_RESPONSE", 63 "IRCOMM_CONNECT_RESPONSE",
64 "IRCOMM_TTP_CONNECT_INDICATION", 64 "IRCOMM_TTP_CONNECT_INDICATION",
65 "IRCOMM_LMP_CONNECT_INDICATION", 65 "IRCOMM_LMP_CONNECT_INDICATION",
66 "IRCOMM_TTP_CONNECT_CONFIRM", 66 "IRCOMM_TTP_CONNECT_CONFIRM",
67 "IRCOMM_LMP_CONNECT_CONFIRM", 67 "IRCOMM_LMP_CONNECT_CONFIRM",
68 68
69 "IRCOMM_LMP_DISCONNECT_INDICATION", 69 "IRCOMM_LMP_DISCONNECT_INDICATION",
70 "IRCOMM_TTP_DISCONNECT_INDICATION", 70 "IRCOMM_TTP_DISCONNECT_INDICATION",
71 "IRCOMM_DISCONNECT_REQUEST", 71 "IRCOMM_DISCONNECT_REQUEST",
72 72
73 "IRCOMM_TTP_DATA_INDICATION", 73 "IRCOMM_TTP_DATA_INDICATION",
74 "IRCOMM_LMP_DATA_INDICATION", 74 "IRCOMM_LMP_DATA_INDICATION",
75 "IRCOMM_DATA_REQUEST", 75 "IRCOMM_DATA_REQUEST",
76 "IRCOMM_CONTROL_REQUEST", 76 "IRCOMM_CONTROL_REQUEST",
77 "IRCOMM_CONTROL_INDICATION", 77 "IRCOMM_CONTROL_INDICATION",
78}; 78};
79#endif /* CONFIG_IRDA_DEBUG */ 79#endif /* CONFIG_IRDA_DEBUG */
80 80
81static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event, 81static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event,
82 struct sk_buff *skb, struct ircomm_info *info) = 82 struct sk_buff *skb, struct ircomm_info *info) =
83{ 83{
84 ircomm_state_idle, 84 ircomm_state_idle,
85 ircomm_state_waiti, 85 ircomm_state_waiti,
@@ -93,14 +93,14 @@ static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event,
93 * IrCOMM is currently idle 93 * IrCOMM is currently idle
94 * 94 *
95 */ 95 */
96static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event, 96static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
97 struct sk_buff *skb, struct ircomm_info *info) 97 struct sk_buff *skb, struct ircomm_info *info)
98{ 98{
99 int ret = 0; 99 int ret = 0;
100 100
101 switch (event) { 101 switch (event) {
102 case IRCOMM_CONNECT_REQUEST: 102 case IRCOMM_CONNECT_REQUEST:
103 ircomm_next_state(self, IRCOMM_WAITI); 103 ircomm_next_state(self, IRCOMM_WAITI);
104 ret = self->issue.connect_request(self, skb, info); 104 ret = self->issue.connect_request(self, skb, info);
105 break; 105 break;
106 case IRCOMM_TTP_CONNECT_INDICATION: 106 case IRCOMM_TTP_CONNECT_INDICATION:
@@ -119,10 +119,10 @@ static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
119/* 119/*
120 * Function ircomm_state_waiti (self, event, skb) 120 * Function ircomm_state_waiti (self, event, skb)
121 * 121 *
122 * The IrCOMM user has requested an IrCOMM connection to the remote 122 * The IrCOMM user has requested an IrCOMM connection to the remote
123 * device and is awaiting confirmation 123 * device and is awaiting confirmation
124 */ 124 */
125static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event, 125static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
126 struct sk_buff *skb, struct ircomm_info *info) 126 struct sk_buff *skb, struct ircomm_info *info)
127{ 127{
128 int ret = 0; 128 int ret = 0;
@@ -152,8 +152,8 @@ static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
152 * IrCOMM has received an incoming connection request and is awaiting 152 * IrCOMM has received an incoming connection request and is awaiting
153 * response from the user 153 * response from the user
154 */ 154 */
155static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event, 155static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
156 struct sk_buff *skb, struct ircomm_info *info) 156 struct sk_buff *skb, struct ircomm_info *info)
157{ 157{
158 int ret = 0; 158 int ret = 0;
159 159
@@ -185,7 +185,7 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
185 * IrCOMM is connected to the peer IrCOMM device 185 * IrCOMM is connected to the peer IrCOMM device
186 * 186 *
187 */ 187 */
188static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, 188static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
189 struct sk_buff *skb, struct ircomm_info *info) 189 struct sk_buff *skb, struct ircomm_info *info)
190{ 190{
191 int ret = 0; 191 int ret = 0;
@@ -228,7 +228,7 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
228 * 228 *
229 */ 229 */
230int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, 230int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event,
231 struct sk_buff *skb, struct ircomm_info *info) 231 struct sk_buff *skb, struct ircomm_info *info)
232{ 232{
233 IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __FUNCTION__ , 233 IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __FUNCTION__ ,
234 ircomm_state[self->state], ircomm_event[event]); 234 ircomm_state[self->state], ircomm_event[event]);
@@ -245,7 +245,7 @@ int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event,
245void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state) 245void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state)
246{ 246{
247 self->state = state; 247 self->state = state;
248 248
249 IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __FUNCTION__ , 249 IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __FUNCTION__ ,
250 ircomm_state[self->state], self->service_type); 250 ircomm_state[self->state], self->service_type);
251} 251}
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index c8e0d89ee11f..22bd75299104 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: ircomm_lmp.c 3 * Filename: ircomm_lmp.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: Interface between IrCOMM and IrLMP 5 * Description: Interface between IrCOMM and IrLMP
@@ -9,25 +9,25 @@
9 * Modified at: Sun Dec 12 13:44:17 1999 9 * Modified at: Sun Dec 12 13:44:17 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Sources: Previous IrLPT work by Thomas Davis 11 * Sources: Previous IrLPT work by Thomas Davis
12 * 12 *
13 * Copyright (c) 1999 Dag Brattli, All Rights Reserved. 13 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
14 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> 14 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * This program is distributed in the hope that it will be useful, 21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details. 24 * GNU General Public License for more details.
25 * 25 *
26 * You should have received a copy of the GNU General Public License 26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software 27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 * MA 02111-1307 USA 29 * MA 02111-1307 USA
30 * 30 *
31 ********************************************************************/ 31 ********************************************************************/
32 32
33#include <linux/sched.h> 33#include <linux/sched.h>
@@ -45,11 +45,11 @@
45/* 45/*
46 * Function ircomm_lmp_connect_request (self, userdata) 46 * Function ircomm_lmp_connect_request (self, userdata)
47 * 47 *
48 * 48 *
49 * 49 *
50 */ 50 */
51static int ircomm_lmp_connect_request(struct ircomm_cb *self, 51static int ircomm_lmp_connect_request(struct ircomm_cb *self,
52 struct sk_buff *userdata, 52 struct sk_buff *userdata,
53 struct ircomm_info *info) 53 struct ircomm_info *info)
54{ 54{
55 int ret = 0; 55 int ret = 0;
@@ -61,14 +61,14 @@ static int ircomm_lmp_connect_request(struct ircomm_cb *self,
61 skb_get(userdata); 61 skb_get(userdata);
62 62
63 ret = irlmp_connect_request(self->lsap, info->dlsap_sel, 63 ret = irlmp_connect_request(self->lsap, info->dlsap_sel,
64 info->saddr, info->daddr, NULL, userdata); 64 info->saddr, info->daddr, NULL, userdata);
65 return ret; 65 return ret;
66} 66}
67 67
68/* 68/*
69 * Function ircomm_lmp_connect_response (self, skb) 69 * Function ircomm_lmp_connect_response (self, skb)
70 * 70 *
71 * 71 *
72 * 72 *
73 */ 73 */
74static int ircomm_lmp_connect_response(struct ircomm_cb *self, 74static int ircomm_lmp_connect_response(struct ircomm_cb *self,
@@ -78,7 +78,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
78 int ret; 78 int ret;
79 79
80 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 80 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
81 81
82 /* Any userdata supplied? */ 82 /* Any userdata supplied? */
83 if (userdata == NULL) { 83 if (userdata == NULL) {
84 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); 84 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
@@ -88,8 +88,8 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
88 /* Reserve space for MUX and LAP header */ 88 /* Reserve space for MUX and LAP header */
89 skb_reserve(tx_skb, LMP_MAX_HEADER); 89 skb_reserve(tx_skb, LMP_MAX_HEADER);
90 } else { 90 } else {
91 /* 91 /*
92 * Check that the client has reserved enough space for 92 * Check that the client has reserved enough space for
93 * headers 93 * headers
94 */ 94 */
95 IRDA_ASSERT(skb_headroom(userdata) >= LMP_MAX_HEADER, 95 IRDA_ASSERT(skb_headroom(userdata) >= LMP_MAX_HEADER,
@@ -105,22 +105,22 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
105 return 0; 105 return 0;
106} 106}
107 107
108static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, 108static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
109 struct sk_buff *userdata, 109 struct sk_buff *userdata,
110 struct ircomm_info *info) 110 struct ircomm_info *info)
111{ 111{
112 struct sk_buff *tx_skb; 112 struct sk_buff *tx_skb;
113 int ret; 113 int ret;
114 114
115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
116 116
117 if (!userdata) { 117 if (!userdata) {
118 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); 118 tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
119 if (!tx_skb) 119 if (!tx_skb)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
122 /* Reserve space for MUX and LAP header */ 122 /* Reserve space for MUX and LAP header */
123 skb_reserve(tx_skb, LMP_MAX_HEADER); 123 skb_reserve(tx_skb, LMP_MAX_HEADER);
124 userdata = tx_skb; 124 userdata = tx_skb;
125 } else { 125 } else {
126 /* Don't forget to refcount it - should be NULL anyway */ 126 /* Don't forget to refcount it - should be NULL anyway */
@@ -136,7 +136,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
136 * Function ircomm_lmp_flow_control (skb) 136 * Function ircomm_lmp_flow_control (skb)
137 * 137 *
138 * This function is called when a data frame we have sent to IrLAP has 138 * This function is called when a data frame we have sent to IrLAP has
139 * been deallocated. We do this to make sure we don't flood IrLAP with 139 * been deallocated. We do this to make sure we don't flood IrLAP with
140 * frames, since we are not using the IrTTP flow control mechanism 140 * frames, since we are not using the IrTTP flow control mechanism
141 */ 141 */
142static void ircomm_lmp_flow_control(struct sk_buff *skb) 142static void ircomm_lmp_flow_control(struct sk_buff *skb)
@@ -150,29 +150,29 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb)
150 cb = (struct irda_skb_cb *) skb->cb; 150 cb = (struct irda_skb_cb *) skb->cb;
151 151
152 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 152 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
153 153
154 line = cb->line; 154 line = cb->line;
155 155
156 self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL); 156 self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL);
157 if (!self) { 157 if (!self) {
158 IRDA_DEBUG(2, "%s(), didn't find myself\n", __FUNCTION__ ); 158 IRDA_DEBUG(2, "%s(), didn't find myself\n", __FUNCTION__ );
159 return; 159 return;
160 } 160 }
161 161
162 IRDA_ASSERT(self != NULL, return;); 162 IRDA_ASSERT(self != NULL, return;);
163 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 163 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
164 164
165 self->pkt_count--; 165 self->pkt_count--;
166 166
167 if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { 167 if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) {
168 IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __FUNCTION__ ); 168 IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __FUNCTION__ );
169 self->flow_status = FLOW_START; 169 self->flow_status = FLOW_START;
170 if (self->notify.flow_indication) 170 if (self->notify.flow_indication)
171 self->notify.flow_indication(self->notify.instance, 171 self->notify.flow_indication(self->notify.instance,
172 self, FLOW_START); 172 self, FLOW_START);
173 } 173 }
174} 174}
175 175
176/* 176/*
177 * Function ircomm_lmp_data_request (self, userdata) 177 * Function ircomm_lmp_data_request (self, userdata)
178 * 178 *
@@ -180,7 +180,7 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb)
180 * 180 *
181 */ 181 */
182static int ircomm_lmp_data_request(struct ircomm_cb *self, 182static int ircomm_lmp_data_request(struct ircomm_cb *self,
183 struct sk_buff *skb, 183 struct sk_buff *skb,
184 int not_used) 184 int not_used)
185{ 185{
186 struct irda_skb_cb *cb; 186 struct irda_skb_cb *cb;
@@ -189,8 +189,8 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
189 IRDA_ASSERT(skb != NULL, return -1;); 189 IRDA_ASSERT(skb != NULL, return -1;);
190 190
191 cb = (struct irda_skb_cb *) skb->cb; 191 cb = (struct irda_skb_cb *) skb->cb;
192 192
193 cb->line = self->line; 193 cb->line = self->line;
194 194
195 IRDA_DEBUG(4, "%s(), sending frame\n", __FUNCTION__ ); 195 IRDA_DEBUG(4, "%s(), sending frame\n", __FUNCTION__ );
196 196
@@ -199,13 +199,13 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
199 199
200 skb->destructor = ircomm_lmp_flow_control; 200 skb->destructor = ircomm_lmp_flow_control;
201 201
202 if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { 202 if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) {
203 IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __FUNCTION__ ); 203 IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __FUNCTION__ );
204 self->flow_status = FLOW_STOP; 204 self->flow_status = FLOW_STOP;
205 if (self->notify.flow_indication) 205 if (self->notify.flow_indication)
206 self->notify.flow_indication(self->notify.instance, 206 self->notify.flow_indication(self->notify.instance,
207 self, FLOW_STOP); 207 self, FLOW_STOP);
208 } 208 }
209 ret = irlmp_data_request(self->lsap, skb); 209 ret = irlmp_data_request(self->lsap, skb);
210 if (ret) { 210 if (ret) {
211 IRDA_ERROR("%s(), failed\n", __FUNCTION__); 211 IRDA_ERROR("%s(), failed\n", __FUNCTION__);
@@ -227,11 +227,11 @@ static int ircomm_lmp_data_indication(void *instance, void *sap,
227 struct ircomm_cb *self = (struct ircomm_cb *) instance; 227 struct ircomm_cb *self = (struct ircomm_cb *) instance;
228 228
229 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 229 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
230 230
231 IRDA_ASSERT(self != NULL, return -1;); 231 IRDA_ASSERT(self != NULL, return -1;);
232 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); 232 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
233 IRDA_ASSERT(skb != NULL, return -1;); 233 IRDA_ASSERT(skb != NULL, return -1;);
234 234
235 ircomm_do_event(self, IRCOMM_LMP_DATA_INDICATION, skb, NULL); 235 ircomm_do_event(self, IRCOMM_LMP_DATA_INDICATION, skb, NULL);
236 236
237 /* Drop reference count - see ircomm_tty_data_indication(). */ 237 /* Drop reference count - see ircomm_tty_data_indication(). */
@@ -241,15 +241,15 @@ static int ircomm_lmp_data_indication(void *instance, void *sap,
241} 241}
242 242
243/* 243/*
244 * Function ircomm_lmp_connect_confirm (instance, sap, qos, max_sdu_size, 244 * Function ircomm_lmp_connect_confirm (instance, sap, qos, max_sdu_size,
245 * max_header_size, skb) 245 * max_header_size, skb)
246 * 246 *
247 * Connection has been confirmed by peer device 247 * Connection has been confirmed by peer device
248 * 248 *
249 */ 249 */
250static void ircomm_lmp_connect_confirm(void *instance, void *sap, 250static void ircomm_lmp_connect_confirm(void *instance, void *sap,
251 struct qos_info *qos, 251 struct qos_info *qos,
252 __u32 max_seg_size, 252 __u32 max_seg_size,
253 __u8 max_header_size, 253 __u8 max_header_size,
254 struct sk_buff *skb) 254 struct sk_buff *skb)
255{ 255{
@@ -312,7 +312,7 @@ static void ircomm_lmp_connect_indication(void *instance, void *sap,
312 * Peer device has closed the connection, or the link went down for some 312 * Peer device has closed the connection, or the link went down for some
313 * other reason 313 * other reason
314 */ 314 */
315static void ircomm_lmp_disconnect_indication(void *instance, void *sap, 315static void ircomm_lmp_disconnect_indication(void *instance, void *sap,
316 LM_REASON reason, 316 LM_REASON reason,
317 struct sk_buff *skb) 317 struct sk_buff *skb)
318{ 318{
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index a39f5735a90b..fbac13e95b28 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: ircomm_param.c 3 * Filename: ircomm_param.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: Parameter handling for the IrCOMM protocol 5 * Description: Parameter handling for the IrCOMM protocol
@@ -8,24 +8,24 @@
8 * Created at: Mon Jun 7 10:25:11 1999 8 * Created at: Mon Jun 7 10:25:11 1999
9 * Modified at: Sun Jan 30 14:32:03 2000 9 * Modified at: Sun Jan 30 14:32:03 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
13 * 13 *
14 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as 15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * This program is distributed in the hope that it will be useful, 19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details. 22 * GNU General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
27 * MA 02111-1307 USA 27 * MA 02111-1307 USA
28 * 28 *
29 ********************************************************************/ 29 ********************************************************************/
30 30
31#include <linux/sched.h> 31#include <linux/sched.h>
@@ -41,23 +41,23 @@
41 41
42#include <net/irda/ircomm_param.h> 42#include <net/irda/ircomm_param.h>
43 43
44static int ircomm_param_service_type(void *instance, irda_param_t *param, 44static int ircomm_param_service_type(void *instance, irda_param_t *param,
45 int get); 45 int get);
46static int ircomm_param_port_type(void *instance, irda_param_t *param, 46static int ircomm_param_port_type(void *instance, irda_param_t *param,
47 int get); 47 int get);
48static int ircomm_param_port_name(void *instance, irda_param_t *param, 48static int ircomm_param_port_name(void *instance, irda_param_t *param,
49 int get); 49 int get);
50static int ircomm_param_service_type(void *instance, irda_param_t *param, 50static int ircomm_param_service_type(void *instance, irda_param_t *param,
51 int get); 51 int get);
52static int ircomm_param_data_rate(void *instance, irda_param_t *param, 52static int ircomm_param_data_rate(void *instance, irda_param_t *param,
53 int get); 53 int get);
54static int ircomm_param_data_format(void *instance, irda_param_t *param, 54static int ircomm_param_data_format(void *instance, irda_param_t *param,
55 int get); 55 int get);
56static int ircomm_param_flow_control(void *instance, irda_param_t *param, 56static int ircomm_param_flow_control(void *instance, irda_param_t *param,
57 int get); 57 int get);
58static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get); 58static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get);
59static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get); 59static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get);
60static int ircomm_param_line_status(void *instance, irda_param_t *param, 60static int ircomm_param_line_status(void *instance, irda_param_t *param,
61 int get); 61 int get);
62static int ircomm_param_dte(void *instance, irda_param_t *param, int get); 62static int ircomm_param_dte(void *instance, irda_param_t *param, int get);
63static int ircomm_param_dce(void *instance, irda_param_t *param, int get); 63static int ircomm_param_dce(void *instance, irda_param_t *param, int get);
@@ -85,7 +85,7 @@ static pi_minor_info_t pi_minor_call_table_9_wire[] = {
85static pi_major_info_t pi_major_call_table[] = { 85static pi_major_info_t pi_major_call_table[] = {
86 { pi_minor_call_table_common, 3 }, 86 { pi_minor_call_table_common, 3 },
87 { pi_minor_call_table_non_raw, 6 }, 87 { pi_minor_call_table_non_raw, 6 },
88 { pi_minor_call_table_9_wire, 3 } 88 { pi_minor_call_table_9_wire, 3 }
89/* { pi_minor_call_table_centronics } */ 89/* { pi_minor_call_table_centronics } */
90}; 90};
91 91
@@ -119,20 +119,20 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
119 119
120 spin_lock_irqsave(&self->spinlock, flags); 120 spin_lock_irqsave(&self->spinlock, flags);
121 121
122 skb = self->ctrl_skb; 122 skb = self->ctrl_skb;
123 if (!skb) { 123 if (!skb) {
124 skb = alloc_skb(256, GFP_ATOMIC); 124 skb = alloc_skb(256, GFP_ATOMIC);
125 if (!skb) { 125 if (!skb) {
126 spin_unlock_irqrestore(&self->spinlock, flags); 126 spin_unlock_irqrestore(&self->spinlock, flags);
127 return -ENOMEM; 127 return -ENOMEM;
128 } 128 }
129 129
130 skb_reserve(skb, self->max_header_size); 130 skb_reserve(skb, self->max_header_size);
131 self->ctrl_skb = skb; 131 self->ctrl_skb = skb;
132 } 132 }
133 /* 133 /*
134 * Inserting is a little bit tricky since we don't know how much 134 * Inserting is a little bit tricky since we don't know how much
135 * room we will need. But this should hopefully work OK 135 * room we will need. But this should hopefully work OK
136 */ 136 */
137 count = irda_param_insert(self, pi, skb->tail, skb_tailroom(skb), 137 count = irda_param_insert(self, pi, skb->tail, skb_tailroom(skb),
138 &ircomm_param_info); 138 &ircomm_param_info);
@@ -162,7 +162,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
162 * query and then the remote device sends its initial parameters 162 * query and then the remote device sends its initial parameters
163 * 163 *
164 */ 164 */
165static int ircomm_param_service_type(void *instance, irda_param_t *param, 165static int ircomm_param_service_type(void *instance, irda_param_t *param,
166 int get) 166 int get)
167{ 167{
168 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 168 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
@@ -179,7 +179,7 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param,
179 /* Find all common service types */ 179 /* Find all common service types */
180 service_type &= self->service_type; 180 service_type &= self->service_type;
181 if (!service_type) { 181 if (!service_type) {
182 IRDA_DEBUG(2, 182 IRDA_DEBUG(2,
183 "%s(), No common service type to use!\n", __FUNCTION__ ); 183 "%s(), No common service type to use!\n", __FUNCTION__ );
184 return -1; 184 return -1;
185 } 185 }
@@ -198,12 +198,12 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param,
198 else if (service_type & IRCOMM_3_WIRE_RAW) 198 else if (service_type & IRCOMM_3_WIRE_RAW)
199 self->settings.service_type = IRCOMM_3_WIRE_RAW; 199 self->settings.service_type = IRCOMM_3_WIRE_RAW;
200 200
201 IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __FUNCTION__ , 201 IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __FUNCTION__ ,
202 self->settings.service_type); 202 self->settings.service_type);
203 203
204 /* 204 /*
205 * Now the line is ready for some communication. Check if we are a 205 * Now the line is ready for some communication. Check if we are a
206 * server, and send over some initial parameters. 206 * server, and send over some initial parameters.
207 * Client do it in ircomm_tty_state_setup(). 207 * Client do it in ircomm_tty_state_setup().
208 * Note : we may get called from ircomm_tty_getvalue_confirm(), 208 * Note : we may get called from ircomm_tty_getvalue_confirm(),
209 * therefore before we even have open any socket. And self->client 209 * therefore before we even have open any socket. And self->client
@@ -235,13 +235,13 @@ static int ircomm_param_port_type(void *instance, irda_param_t *param, int get)
235 235
236 IRDA_ASSERT(self != NULL, return -1;); 236 IRDA_ASSERT(self != NULL, return -1;);
237 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 237 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
238 238
239 if (get) 239 if (get)
240 param->pv.i = IRCOMM_SERIAL; 240 param->pv.i = IRCOMM_SERIAL;
241 else { 241 else {
242 self->settings.port_type = (__u8) param->pv.i; 242 self->settings.port_type = (__u8) param->pv.i;
243 243
244 IRDA_DEBUG(0, "%s(), port type=%d\n", __FUNCTION__ , 244 IRDA_DEBUG(0, "%s(), port type=%d\n", __FUNCTION__ ,
245 self->settings.port_type); 245 self->settings.port_type);
246 } 246 }
247 return 0; 247 return 0;
@@ -256,7 +256,7 @@ static int ircomm_param_port_type(void *instance, irda_param_t *param, int get)
256static int ircomm_param_port_name(void *instance, irda_param_t *param, int get) 256static int ircomm_param_port_name(void *instance, irda_param_t *param, int get)
257{ 257{
258 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 258 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
259 259
260 IRDA_ASSERT(self != NULL, return -1;); 260 IRDA_ASSERT(self != NULL, return -1;);
261 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 261 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
262 262
@@ -279,7 +279,7 @@ static int ircomm_param_port_name(void *instance, irda_param_t *param, int get)
279static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get) 279static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get)
280{ 280{
281 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 281 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
282 282
283 IRDA_ASSERT(self != NULL, return -1;); 283 IRDA_ASSERT(self != NULL, return -1;);
284 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 284 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
285 285
@@ -287,7 +287,7 @@ static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get)
287 param->pv.i = self->settings.data_rate; 287 param->pv.i = self->settings.data_rate;
288 else 288 else
289 self->settings.data_rate = param->pv.i; 289 self->settings.data_rate = param->pv.i;
290 290
291 IRDA_DEBUG(2, "%s(), data rate = %d\n", __FUNCTION__ , param->pv.i); 291 IRDA_DEBUG(2, "%s(), data rate = %d\n", __FUNCTION__ , param->pv.i);
292 292
293 return 0; 293 return 0;
@@ -299,7 +299,7 @@ static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get)
299 * Exchange data format to be used in this settings 299 * Exchange data format to be used in this settings
300 * 300 *
301 */ 301 */
302static int ircomm_param_data_format(void *instance, irda_param_t *param, 302static int ircomm_param_data_format(void *instance, irda_param_t *param,
303 int get) 303 int get)
304{ 304{
305 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 305 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
@@ -311,7 +311,7 @@ static int ircomm_param_data_format(void *instance, irda_param_t *param,
311 param->pv.i = self->settings.data_format; 311 param->pv.i = self->settings.data_format;
312 else 312 else
313 self->settings.data_format = (__u8) param->pv.i; 313 self->settings.data_format = (__u8) param->pv.i;
314 314
315 return 0; 315 return 0;
316} 316}
317 317
@@ -321,14 +321,14 @@ static int ircomm_param_data_format(void *instance, irda_param_t *param,
321 * Exchange flow control settings to be used in this settings 321 * Exchange flow control settings to be used in this settings
322 * 322 *
323 */ 323 */
324static int ircomm_param_flow_control(void *instance, irda_param_t *param, 324static int ircomm_param_flow_control(void *instance, irda_param_t *param,
325 int get) 325 int get)
326{ 326{
327 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 327 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
328 328
329 IRDA_ASSERT(self != NULL, return -1;); 329 IRDA_ASSERT(self != NULL, return -1;);
330 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 330 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
331 331
332 if (get) 332 if (get)
333 param->pv.i = self->settings.flow_control; 333 param->pv.i = self->settings.flow_control;
334 else 334 else
@@ -351,7 +351,7 @@ static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get)
351 351
352 IRDA_ASSERT(self != NULL, return -1;); 352 IRDA_ASSERT(self != NULL, return -1;);
353 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 353 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
354 354
355 if (get) { 355 if (get) {
356 param->pv.i = self->settings.xonxoff[0]; 356 param->pv.i = self->settings.xonxoff[0];
357 param->pv.i |= self->settings.xonxoff[1] << 8; 357 param->pv.i |= self->settings.xonxoff[1] << 8;
@@ -360,7 +360,7 @@ static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get)
360 self->settings.xonxoff[1] = (__u16) param->pv.i >> 8; 360 self->settings.xonxoff[1] = (__u16) param->pv.i >> 8;
361 } 361 }
362 362
363 IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __FUNCTION__ , 363 IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __FUNCTION__ ,
364 param->pv.i & 0xff, param->pv.i >> 8); 364 param->pv.i & 0xff, param->pv.i >> 8);
365 365
366 return 0; 366 return 0;
@@ -378,7 +378,7 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get)
378 378
379 IRDA_ASSERT(self != NULL, return -1;); 379 IRDA_ASSERT(self != NULL, return -1;);
380 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 380 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
381 381
382 if (get) { 382 if (get) {
383 param->pv.i = self->settings.enqack[0]; 383 param->pv.i = self->settings.enqack[0];
384 param->pv.i |= self->settings.enqack[1] << 8; 384 param->pv.i |= self->settings.enqack[1] << 8;
@@ -396,10 +396,10 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get)
396/* 396/*
397 * Function ircomm_param_line_status (self, param) 397 * Function ircomm_param_line_status (self, param)
398 * 398 *
399 * 399 *
400 * 400 *
401 */ 401 */
402static int ircomm_param_line_status(void *instance, irda_param_t *param, 402static int ircomm_param_line_status(void *instance, irda_param_t *param,
403 int get) 403 int get)
404{ 404{
405 IRDA_DEBUG(2, "%s(), not impl.\n", __FUNCTION__ ); 405 IRDA_DEBUG(2, "%s(), not impl.\n", __FUNCTION__ );
@@ -427,7 +427,7 @@ static int ircomm_param_dte(void *instance, irda_param_t *param, int get)
427 dte = (__u8) param->pv.i; 427 dte = (__u8) param->pv.i;
428 428
429 self->settings.dce = 0; 429 self->settings.dce = 0;
430 430
431 if (dte & IRCOMM_DELTA_DTR) 431 if (dte & IRCOMM_DELTA_DTR)
432 self->settings.dce |= (IRCOMM_DELTA_DSR| 432 self->settings.dce |= (IRCOMM_DELTA_DSR|
433 IRCOMM_DELTA_RI | 433 IRCOMM_DELTA_RI |
@@ -436,7 +436,7 @@ static int ircomm_param_dte(void *instance, irda_param_t *param, int get)
436 self->settings.dce |= (IRCOMM_DSR| 436 self->settings.dce |= (IRCOMM_DSR|
437 IRCOMM_RI | 437 IRCOMM_RI |
438 IRCOMM_CD); 438 IRCOMM_CD);
439 439
440 if (dte & IRCOMM_DELTA_RTS) 440 if (dte & IRCOMM_DELTA_RTS)
441 self->settings.dce |= IRCOMM_DELTA_CTS; 441 self->settings.dce |= IRCOMM_DELTA_CTS;
442 if (dte & IRCOMM_RTS) 442 if (dte & IRCOMM_RTS)
@@ -455,7 +455,7 @@ static int ircomm_param_dte(void *instance, irda_param_t *param, int get)
455/* 455/*
456 * Function ircomm_param_dce (instance, param) 456 * Function ircomm_param_dce (instance, param)
457 * 457 *
458 * 458 *
459 * 459 *
460 */ 460 */
461static int ircomm_param_dce(void *instance, irda_param_t *param, int get) 461static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
diff --git a/net/irda/ircomm/ircomm_ttp.c b/net/irda/ircomm/ircomm_ttp.c
index d98bf3570d29..bb06ebaadd16 100644
--- a/net/irda/ircomm/ircomm_ttp.c
+++ b/net/irda/ircomm/ircomm_ttp.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: ircomm_ttp.c 3 * Filename: ircomm_ttp.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: Interface between IrCOMM and IrTTP 5 * Description: Interface between IrCOMM and IrTTP
@@ -8,25 +8,25 @@
8 * Created at: Sun Jun 6 20:48:27 1999 8 * Created at: Sun Jun 6 20:48:27 1999
9 * Modified at: Mon Dec 13 11:35:13 1999 9 * Modified at: Mon Dec 13 11:35:13 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> 13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * This program is distributed in the hope that it will be useful, 20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details. 23 * GNU General Public License for more details.
24 * 24 *
25 * You should have received a copy of the GNU General Public License 25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software 26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
28 * MA 02111-1307 USA 28 * MA 02111-1307 USA
29 * 29 *
30 ********************************************************************/ 30 ********************************************************************/
31 31
32#include <linux/sched.h> 32#include <linux/sched.h>
@@ -43,8 +43,8 @@
43static int ircomm_ttp_data_indication(void *instance, void *sap, 43static int ircomm_ttp_data_indication(void *instance, void *sap,
44 struct sk_buff *skb); 44 struct sk_buff *skb);
45static void ircomm_ttp_connect_confirm(void *instance, void *sap, 45static void ircomm_ttp_connect_confirm(void *instance, void *sap,
46 struct qos_info *qos, 46 struct qos_info *qos,
47 __u32 max_sdu_size, 47 __u32 max_sdu_size,
48 __u8 max_header_size, 48 __u8 max_header_size,
49 struct sk_buff *skb); 49 struct sk_buff *skb);
50static void ircomm_ttp_connect_indication(void *instance, void *sap, 50static void ircomm_ttp_connect_indication(void *instance, void *sap,
@@ -54,25 +54,25 @@ static void ircomm_ttp_connect_indication(void *instance, void *sap,
54 struct sk_buff *skb); 54 struct sk_buff *skb);
55static void ircomm_ttp_flow_indication(void *instance, void *sap, 55static void ircomm_ttp_flow_indication(void *instance, void *sap,
56 LOCAL_FLOW cmd); 56 LOCAL_FLOW cmd);
57static void ircomm_ttp_disconnect_indication(void *instance, void *sap, 57static void ircomm_ttp_disconnect_indication(void *instance, void *sap,
58 LM_REASON reason, 58 LM_REASON reason,
59 struct sk_buff *skb); 59 struct sk_buff *skb);
60static int ircomm_ttp_data_request(struct ircomm_cb *self, 60static int ircomm_ttp_data_request(struct ircomm_cb *self,
61 struct sk_buff *skb, 61 struct sk_buff *skb,
62 int clen); 62 int clen);
63static int ircomm_ttp_connect_request(struct ircomm_cb *self, 63static int ircomm_ttp_connect_request(struct ircomm_cb *self,
64 struct sk_buff *userdata, 64 struct sk_buff *userdata,
65 struct ircomm_info *info); 65 struct ircomm_info *info);
66static int ircomm_ttp_connect_response(struct ircomm_cb *self, 66static int ircomm_ttp_connect_response(struct ircomm_cb *self,
67 struct sk_buff *userdata); 67 struct sk_buff *userdata);
68static int ircomm_ttp_disconnect_request(struct ircomm_cb *self, 68static int ircomm_ttp_disconnect_request(struct ircomm_cb *self,
69 struct sk_buff *userdata, 69 struct sk_buff *userdata,
70 struct ircomm_info *info); 70 struct ircomm_info *info);
71 71
72/* 72/*
73 * Function ircomm_open_tsap (self) 73 * Function ircomm_open_tsap (self)
74 * 74 *
75 * 75 *
76 * 76 *
77 */ 77 */
78int ircomm_open_tsap(struct ircomm_cb *self) 78int ircomm_open_tsap(struct ircomm_cb *self)
@@ -113,11 +113,11 @@ int ircomm_open_tsap(struct ircomm_cb *self)
113/* 113/*
114 * Function ircomm_ttp_connect_request (self, userdata) 114 * Function ircomm_ttp_connect_request (self, userdata)
115 * 115 *
116 * 116 *
117 * 117 *
118 */ 118 */
119static int ircomm_ttp_connect_request(struct ircomm_cb *self, 119static int ircomm_ttp_connect_request(struct ircomm_cb *self,
120 struct sk_buff *userdata, 120 struct sk_buff *userdata,
121 struct ircomm_info *info) 121 struct ircomm_info *info)
122{ 122{
123 int ret = 0; 123 int ret = 0;
@@ -129,16 +129,16 @@ static int ircomm_ttp_connect_request(struct ircomm_cb *self,
129 skb_get(userdata); 129 skb_get(userdata);
130 130
131 ret = irttp_connect_request(self->tsap, info->dlsap_sel, 131 ret = irttp_connect_request(self->tsap, info->dlsap_sel,
132 info->saddr, info->daddr, NULL, 132 info->saddr, info->daddr, NULL,
133 TTP_SAR_DISABLE, userdata); 133 TTP_SAR_DISABLE, userdata);
134 134
135 return ret; 135 return ret;
136} 136}
137 137
138/* 138/*
139 * Function ircomm_ttp_connect_response (self, skb) 139 * Function ircomm_ttp_connect_response (self, skb)
140 * 140 *
141 * 141 *
142 * 142 *
143 */ 143 */
144static int ircomm_ttp_connect_response(struct ircomm_cb *self, 144static int ircomm_ttp_connect_response(struct ircomm_cb *self,
@@ -147,7 +147,7 @@ static int ircomm_ttp_connect_response(struct ircomm_cb *self,
147 int ret; 147 int ret;
148 148
149 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 149 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
150 150
151 /* Don't forget to refcount it - should be NULL anyway */ 151 /* Don't forget to refcount it - should be NULL anyway */
152 if(userdata) 152 if(userdata)
153 skb_get(userdata); 153 skb_get(userdata);
@@ -160,14 +160,14 @@ static int ircomm_ttp_connect_response(struct ircomm_cb *self,
160/* 160/*
161 * Function ircomm_ttp_data_request (self, userdata) 161 * Function ircomm_ttp_data_request (self, userdata)
162 * 162 *
163 * Send IrCOMM data to IrTTP layer. Currently we do not try to combine 163 * Send IrCOMM data to IrTTP layer. Currently we do not try to combine
164 * control data with pure data, so they will be sent as separate frames. 164 * control data with pure data, so they will be sent as separate frames.
165 * Should not be a big problem though, since control frames are rare. But 165 * Should not be a big problem though, since control frames are rare. But
166 * some of them are sent after connection establishment, so this can 166 * some of them are sent after connection establishment, so this can
167 * increase the latency a bit. 167 * increase the latency a bit.
168 */ 168 */
169static int ircomm_ttp_data_request(struct ircomm_cb *self, 169static int ircomm_ttp_data_request(struct ircomm_cb *self,
170 struct sk_buff *skb, 170 struct sk_buff *skb,
171 int clen) 171 int clen)
172{ 172{
173 int ret; 173 int ret;
@@ -176,7 +176,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self,
176 176
177 IRDA_DEBUG(2, "%s(), clen=%d\n", __FUNCTION__ , clen); 177 IRDA_DEBUG(2, "%s(), clen=%d\n", __FUNCTION__ , clen);
178 178
179 /* 179 /*
180 * Insert clen field, currently we either send data only, or control 180 * Insert clen field, currently we either send data only, or control
181 * only frames, to make things easier and avoid queueing 181 * only frames, to make things easier and avoid queueing
182 */ 182 */
@@ -210,7 +210,7 @@ static int ircomm_ttp_data_indication(void *instance, void *sap,
210 struct ircomm_cb *self = (struct ircomm_cb *) instance; 210 struct ircomm_cb *self = (struct ircomm_cb *) instance;
211 211
212 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 212 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
213 213
214 IRDA_ASSERT(self != NULL, return -1;); 214 IRDA_ASSERT(self != NULL, return -1;);
215 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); 215 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
216 IRDA_ASSERT(skb != NULL, return -1;); 216 IRDA_ASSERT(skb != NULL, return -1;);
@@ -224,8 +224,8 @@ static int ircomm_ttp_data_indication(void *instance, void *sap,
224} 224}
225 225
226static void ircomm_ttp_connect_confirm(void *instance, void *sap, 226static void ircomm_ttp_connect_confirm(void *instance, void *sap,
227 struct qos_info *qos, 227 struct qos_info *qos,
228 __u32 max_sdu_size, 228 __u32 max_sdu_size,
229 __u8 max_header_size, 229 __u8 max_header_size,
230 struct sk_buff *skb) 230 struct sk_buff *skb)
231{ 231{
@@ -261,7 +261,7 @@ out:
261 * Function ircomm_ttp_connect_indication (instance, sap, qos, max_sdu_size, 261 * Function ircomm_ttp_connect_indication (instance, sap, qos, max_sdu_size,
262 * max_header_size, skb) 262 * max_header_size, skb)
263 * 263 *
264 * 264 *
265 * 265 *
266 */ 266 */
267static void ircomm_ttp_connect_indication(void *instance, void *sap, 267static void ircomm_ttp_connect_indication(void *instance, void *sap,
@@ -301,11 +301,11 @@ out:
301/* 301/*
302 * Function ircomm_ttp_disconnect_request (self, userdata, info) 302 * Function ircomm_ttp_disconnect_request (self, userdata, info)
303 * 303 *
304 * 304 *
305 * 305 *
306 */ 306 */
307static int ircomm_ttp_disconnect_request(struct ircomm_cb *self, 307static int ircomm_ttp_disconnect_request(struct ircomm_cb *self,
308 struct sk_buff *userdata, 308 struct sk_buff *userdata,
309 struct ircomm_info *info) 309 struct ircomm_info *info)
310{ 310{
311 int ret; 311 int ret;
@@ -322,10 +322,10 @@ static int ircomm_ttp_disconnect_request(struct ircomm_cb *self,
322/* 322/*
323 * Function ircomm_ttp_disconnect_indication (instance, sap, reason, skb) 323 * Function ircomm_ttp_disconnect_indication (instance, sap, reason, skb)
324 * 324 *
325 * 325 *
326 * 326 *
327 */ 327 */
328static void ircomm_ttp_disconnect_indication(void *instance, void *sap, 328static void ircomm_ttp_disconnect_indication(void *instance, void *sap,
329 LM_REASON reason, 329 LM_REASON reason,
330 struct sk_buff *skb) 330 struct sk_buff *skb)
331{ 331{
@@ -361,7 +361,7 @@ static void ircomm_ttp_flow_indication(void *instance, void *sap,
361 361
362 IRDA_ASSERT(self != NULL, return;); 362 IRDA_ASSERT(self != NULL, return;);
363 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); 363 IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
364 364
365 if (self->notify.flow_indication) 365 if (self->notify.flow_indication)
366 self->notify.flow_indication(self->notify.instance, self, cmd); 366 self->notify.flow_indication(self->notify.instance, self, cmd);
367} 367}
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 262bda808d96..3d241e415a2a 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: ircomm_tty.c 3 * Filename: ircomm_tty.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: IrCOMM serial TTY driver 5 * Description: IrCOMM serial TTY driver
@@ -9,25 +9,25 @@
9 * Modified at: Wed Feb 23 00:09:02 2000 9 * Modified at: Wed Feb 23 00:09:02 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Sources: serial.c and previous IrCOMM work by Takahide Higuchi 11 * Sources: serial.c and previous IrCOMM work by Takahide Higuchi
12 * 12 *
13 * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. 13 * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
14 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> 14 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * This program is distributed in the hope that it will be useful, 21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details. 24 * GNU General Public License for more details.
25 * 25 *
26 * You should have received a copy of the GNU General Public License 26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software 27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 * MA 02111-1307 USA 29 * MA 02111-1307 USA
30 * 30 *
31 ********************************************************************/ 31 ********************************************************************/
32 32
33#include <linux/init.h> 33#include <linux/init.h>
@@ -69,7 +69,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
69 struct sk_buff *skb); 69 struct sk_buff *skb);
70static int ircomm_tty_control_indication(void *instance, void *sap, 70static int ircomm_tty_control_indication(void *instance, void *sap,
71 struct sk_buff *skb); 71 struct sk_buff *skb);
72static void ircomm_tty_flow_indication(void *instance, void *sap, 72static void ircomm_tty_flow_indication(void *instance, void *sap,
73 LOCAL_FLOW cmd); 73 LOCAL_FLOW cmd);
74#ifdef CONFIG_PROC_FS 74#ifdef CONFIG_PROC_FS
75static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len, 75static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
@@ -113,7 +113,7 @@ static int __init ircomm_tty_init(void)
113 driver = alloc_tty_driver(IRCOMM_TTY_PORTS); 113 driver = alloc_tty_driver(IRCOMM_TTY_PORTS);
114 if (!driver) 114 if (!driver)
115 return -ENOMEM; 115 return -ENOMEM;
116 ircomm_tty = hashbin_new(HB_LOCK); 116 ircomm_tty = hashbin_new(HB_LOCK);
117 if (ircomm_tty == NULL) { 117 if (ircomm_tty == NULL) {
118 IRDA_ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__); 118 IRDA_ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__);
119 put_tty_driver(driver); 119 put_tty_driver(driver);
@@ -163,11 +163,11 @@ static void __exit ircomm_tty_cleanup(void)
163{ 163{
164 int ret; 164 int ret;
165 165
166 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 166 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
167 167
168 ret = tty_unregister_driver(driver); 168 ret = tty_unregister_driver(driver);
169 if (ret) { 169 if (ret) {
170 IRDA_ERROR("%s(), failed to unregister driver\n", 170 IRDA_ERROR("%s(), failed to unregister driver\n",
171 __FUNCTION__); 171 __FUNCTION__);
172 return; 172 return;
173 } 173 }
@@ -179,7 +179,7 @@ static void __exit ircomm_tty_cleanup(void)
179/* 179/*
180 * Function ircomm_startup (self) 180 * Function ircomm_startup (self)
181 * 181 *
182 * 182 *
183 * 183 *
184 */ 184 */
185static int ircomm_tty_startup(struct ircomm_tty_cb *self) 185static int ircomm_tty_startup(struct ircomm_tty_cb *self)
@@ -203,17 +203,17 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
203 /* These callbacks we must handle ourselves */ 203 /* These callbacks we must handle ourselves */
204 notify.data_indication = ircomm_tty_data_indication; 204 notify.data_indication = ircomm_tty_data_indication;
205 notify.udata_indication = ircomm_tty_control_indication; 205 notify.udata_indication = ircomm_tty_control_indication;
206 notify.flow_indication = ircomm_tty_flow_indication; 206 notify.flow_indication = ircomm_tty_flow_indication;
207 207
208 /* Use the ircomm_tty interface for these ones */ 208 /* Use the ircomm_tty interface for these ones */
209 notify.disconnect_indication = ircomm_tty_disconnect_indication; 209 notify.disconnect_indication = ircomm_tty_disconnect_indication;
210 notify.connect_confirm = ircomm_tty_connect_confirm; 210 notify.connect_confirm = ircomm_tty_connect_confirm;
211 notify.connect_indication = ircomm_tty_connect_indication; 211 notify.connect_indication = ircomm_tty_connect_indication;
212 strlcpy(notify.name, "ircomm_tty", sizeof(notify.name)); 212 strlcpy(notify.name, "ircomm_tty", sizeof(notify.name));
213 notify.instance = self; 213 notify.instance = self;
214 214
215 if (!self->ircomm) { 215 if (!self->ircomm) {
216 self->ircomm = ircomm_open(&notify, self->service_type, 216 self->ircomm = ircomm_open(&notify, self->service_type,
217 self->line); 217 self->line);
218 } 218 }
219 if (!self->ircomm) 219 if (!self->ircomm)
@@ -237,10 +237,10 @@ err:
237/* 237/*
238 * Function ircomm_block_til_ready (self, filp) 238 * Function ircomm_block_til_ready (self, filp)
239 * 239 *
240 * 240 *
241 * 241 *
242 */ 242 */
243static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, 243static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
244 struct file *filp) 244 struct file *filp)
245{ 245{
246 DECLARE_WAITQUEUE(wait, current); 246 DECLARE_WAITQUEUE(wait, current);
@@ -248,7 +248,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
248 int do_clocal = 0, extra_count = 0; 248 int do_clocal = 0, extra_count = 0;
249 unsigned long flags; 249 unsigned long flags;
250 struct tty_struct *tty; 250 struct tty_struct *tty;
251 251
252 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 252 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
253 253
254 tty = self->tty; 254 tty = self->tty;
@@ -256,7 +256,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
256 /* 256 /*
257 * If non-blocking mode is set, or the port is not enabled, 257 * If non-blocking mode is set, or the port is not enabled,
258 * then make the check up front and then exit. 258 * then make the check up front and then exit.
259 */ 259 */
260 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ 260 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
261 /* nonblock mode is set or port is not enabled */ 261 /* nonblock mode is set or port is not enabled */
262 self->flags |= ASYNC_NORMAL_ACTIVE; 262 self->flags |= ASYNC_NORMAL_ACTIVE;
@@ -268,17 +268,17 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
268 IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __FUNCTION__ ); 268 IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __FUNCTION__ );
269 do_clocal = 1; 269 do_clocal = 1;
270 } 270 }
271 271
272 /* Wait for carrier detect and the line to become 272 /* Wait for carrier detect and the line to become
273 * free (i.e., not in use by the callout). While we are in 273 * free (i.e., not in use by the callout). While we are in
274 * this loop, self->open_count is dropped by one, so that 274 * this loop, self->open_count is dropped by one, so that
275 * mgsl_close() knows when to free things. We restore it upon 275 * mgsl_close() knows when to free things. We restore it upon
276 * exit, either normal or abnormal. 276 * exit, either normal or abnormal.
277 */ 277 */
278 278
279 retval = 0; 279 retval = 0;
280 add_wait_queue(&self->open_wait, &wait); 280 add_wait_queue(&self->open_wait, &wait);
281 281
282 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", 282 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
283 __FILE__,__LINE__, tty->driver->name, self->open_count ); 283 __FILE__,__LINE__, tty->driver->name, self->open_count );
284 284
@@ -290,7 +290,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
290 } 290 }
291 spin_unlock_irqrestore(&self->spinlock, flags); 291 spin_unlock_irqrestore(&self->spinlock, flags);
292 self->blocked_open++; 292 self->blocked_open++;
293 293
294 while (1) { 294 while (1) {
295 if (tty->termios->c_cflag & CBAUD) { 295 if (tty->termios->c_cflag & CBAUD) {
296 /* Here, we use to lock those two guys, but 296 /* Here, we use to lock those two guys, but
@@ -298,45 +298,45 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
298 * I don't see the point (and I see the deadlock). 298 * I don't see the point (and I see the deadlock).
299 * Jean II */ 299 * Jean II */
300 self->settings.dte |= IRCOMM_RTS + IRCOMM_DTR; 300 self->settings.dte |= IRCOMM_RTS + IRCOMM_DTR;
301 301
302 ircomm_param_request(self, IRCOMM_DTE, TRUE); 302 ircomm_param_request(self, IRCOMM_DTE, TRUE);
303 } 303 }
304 304
305 current->state = TASK_INTERRUPTIBLE; 305 current->state = TASK_INTERRUPTIBLE;
306 306
307 if (tty_hung_up_p(filp) || 307 if (tty_hung_up_p(filp) ||
308 !test_bit(ASYNC_B_INITIALIZED, &self->flags)) { 308 !test_bit(ASYNC_B_INITIALIZED, &self->flags)) {
309 retval = (self->flags & ASYNC_HUP_NOTIFY) ? 309 retval = (self->flags & ASYNC_HUP_NOTIFY) ?
310 -EAGAIN : -ERESTARTSYS; 310 -EAGAIN : -ERESTARTSYS;
311 break; 311 break;
312 } 312 }
313 313
314 /* 314 /*
315 * Check if link is ready now. Even if CLOCAL is 315 * Check if link is ready now. Even if CLOCAL is
316 * specified, we cannot return before the IrCOMM link is 316 * specified, we cannot return before the IrCOMM link is
317 * ready 317 * ready
318 */ 318 */
319 if (!test_bit(ASYNC_B_CLOSING, &self->flags) && 319 if (!test_bit(ASYNC_B_CLOSING, &self->flags) &&
320 (do_clocal || (self->settings.dce & IRCOMM_CD)) && 320 (do_clocal || (self->settings.dce & IRCOMM_CD)) &&
321 self->state == IRCOMM_TTY_READY) 321 self->state == IRCOMM_TTY_READY)
322 { 322 {
323 break; 323 break;
324 } 324 }
325 325
326 if (signal_pending(current)) { 326 if (signal_pending(current)) {
327 retval = -ERESTARTSYS; 327 retval = -ERESTARTSYS;
328 break; 328 break;
329 } 329 }
330 330
331 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", 331 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
332 __FILE__,__LINE__, tty->driver->name, self->open_count ); 332 __FILE__,__LINE__, tty->driver->name, self->open_count );
333 333
334 schedule(); 334 schedule();
335 } 335 }
336 336
337 __set_current_state(TASK_RUNNING); 337 __set_current_state(TASK_RUNNING);
338 remove_wait_queue(&self->open_wait, &wait); 338 remove_wait_queue(&self->open_wait, &wait);
339 339
340 if (extra_count) { 340 if (extra_count) {
341 /* ++ is not atomic, so this should be protected - Jean II */ 341 /* ++ is not atomic, so this should be protected - Jean II */
342 spin_lock_irqsave(&self->spinlock, flags); 342 spin_lock_irqsave(&self->spinlock, flags);
@@ -344,14 +344,14 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
344 spin_unlock_irqrestore(&self->spinlock, flags); 344 spin_unlock_irqrestore(&self->spinlock, flags);
345 } 345 }
346 self->blocked_open--; 346 self->blocked_open--;
347 347
348 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", 348 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
349 __FILE__,__LINE__, tty->driver->name, self->open_count); 349 __FILE__,__LINE__, tty->driver->name, self->open_count);
350 350
351 if (!retval) 351 if (!retval)
352 self->flags |= ASYNC_NORMAL_ACTIVE; 352 self->flags |= ASYNC_NORMAL_ACTIVE;
353 353
354 return retval; 354 return retval;
355} 355}
356 356
357/* 357/*
@@ -384,7 +384,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); 384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__);
385 return -ENOMEM; 385 return -ENOMEM;
386 } 386 }
387 387
388 self->magic = IRCOMM_TTY_MAGIC; 388 self->magic = IRCOMM_TTY_MAGIC;
389 self->flow = FLOW_STOP; 389 self->flow = FLOW_STOP;
390 390
@@ -398,13 +398,13 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
398 /* Init some important stuff */ 398 /* Init some important stuff */
399 init_timer(&self->watchdog_timer); 399 init_timer(&self->watchdog_timer);
400 init_waitqueue_head(&self->open_wait); 400 init_waitqueue_head(&self->open_wait);
401 init_waitqueue_head(&self->close_wait); 401 init_waitqueue_head(&self->close_wait);
402 spin_lock_init(&self->spinlock); 402 spin_lock_init(&self->spinlock);
403 403
404 /* 404 /*
405 * Force TTY into raw mode by default which is usually what 405 * Force TTY into raw mode by default which is usually what
406 * we want for IrCOMM and IrLPT. This way applications will 406 * we want for IrCOMM and IrLPT. This way applications will
407 * not have to twiddle with printcap etc. 407 * not have to twiddle with printcap etc.
408 */ 408 */
409 tty->termios->c_iflag = 0; 409 tty->termios->c_iflag = 0;
410 tty->termios->c_oflag = 0; 410 tty->termios->c_oflag = 0;
@@ -420,7 +420,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
420 self->tty = tty; 420 self->tty = tty;
421 spin_unlock_irqrestore(&self->spinlock, flags); 421 spin_unlock_irqrestore(&self->spinlock, flags);
422 422
423 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __FUNCTION__ , tty->driver->name, 423 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __FUNCTION__ , tty->driver->name,
424 self->line, self->open_count); 424 self->line, self->open_count);
425 425
426 /* Not really used by us, but lets do it anyway */ 426 /* Not really used by us, but lets do it anyway */
@@ -473,7 +473,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
473 473
474 ret = ircomm_tty_block_til_ready(self, filp); 474 ret = ircomm_tty_block_til_ready(self, filp);
475 if (ret) { 475 if (ret) {
476 IRDA_DEBUG(2, 476 IRDA_DEBUG(2,
477 "%s(), returning after block_til_ready with %d\n", __FUNCTION__ , 477 "%s(), returning after block_til_ready with %d\n", __FUNCTION__ ,
478 ret); 478 ret);
479 479
@@ -519,7 +519,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
519 * serial port won't be shutdown. 519 * serial port won't be shutdown.
520 */ 520 */
521 IRDA_DEBUG(0, "%s(), bad serial port count; " 521 IRDA_DEBUG(0, "%s(), bad serial port count; "
522 "tty->count is 1, state->count is %d\n", __FUNCTION__ , 522 "tty->count is 1, state->count is %d\n", __FUNCTION__ ,
523 self->open_count); 523 self->open_count);
524 self->open_count = 1; 524 self->open_count = 1;
525 } 525 }
@@ -546,7 +546,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
546 spin_unlock_irqrestore(&self->spinlock, flags); 546 spin_unlock_irqrestore(&self->spinlock, flags);
547 547
548 /* 548 /*
549 * Now we wait for the transmit buffer to clear; and we notify 549 * Now we wait for the transmit buffer to clear; and we notify
550 * the line discipline to only process XON/XOFF characters. 550 * the line discipline to only process XON/XOFF characters.
551 */ 551 */
552 tty->closing = 1; 552 tty->closing = 1;
@@ -576,7 +576,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
576/* 576/*
577 * Function ircomm_tty_flush_buffer (tty) 577 * Function ircomm_tty_flush_buffer (tty)
578 * 578 *
579 * 579 *
580 * 580 *
581 */ 581 */
582static void ircomm_tty_flush_buffer(struct tty_struct *tty) 582static void ircomm_tty_flush_buffer(struct tty_struct *tty)
@@ -586,9 +586,9 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty)
586 IRDA_ASSERT(self != NULL, return;); 586 IRDA_ASSERT(self != NULL, return;);
587 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 587 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
588 588
589 /* 589 /*
590 * Let do_softint() do this to avoid race condition with 590 * Let do_softint() do this to avoid race condition with
591 * do_softint() ;-) 591 * do_softint() ;-)
592 */ 592 */
593 schedule_work(&self->tqueue); 593 schedule_work(&self->tqueue);
594} 594}
@@ -597,7 +597,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty)
597 * Function ircomm_tty_do_softint (work) 597 * Function ircomm_tty_do_softint (work)
598 * 598 *
599 * We use this routine to give the write wakeup to the user at at a 599 * We use this routine to give the write wakeup to the user at at a
600 * safe time (as fast as possible after write have completed). This 600 * safe time (as fast as possible after write have completed). This
601 * can be compared to the Tx interrupt. 601 * can be compared to the Tx interrupt.
602 */ 602 */
603static void ircomm_tty_do_softint(struct work_struct *work) 603static void ircomm_tty_do_softint(struct work_struct *work)
@@ -638,7 +638,7 @@ static void ircomm_tty_do_softint(struct work_struct *work)
638 638
639 /* Unlink transmit buffer */ 639 /* Unlink transmit buffer */
640 spin_lock_irqsave(&self->spinlock, flags); 640 spin_lock_irqsave(&self->spinlock, flags);
641 641
642 skb = self->tx_skb; 642 skb = self->tx_skb;
643 self->tx_skb = NULL; 643 self->tx_skb = NULL;
644 644
@@ -650,9 +650,9 @@ static void ircomm_tty_do_softint(struct work_struct *work)
650 /* Drop reference count - see ircomm_ttp_data_request(). */ 650 /* Drop reference count - see ircomm_ttp_data_request(). */
651 dev_kfree_skb(skb); 651 dev_kfree_skb(skb);
652 } 652 }
653 653
654 /* Check if user (still) wants to be waken up */ 654 /* Check if user (still) wants to be waken up */
655 if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && 655 if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
656 tty->ldisc.write_wakeup) 656 tty->ldisc.write_wakeup)
657 { 657 {
658 (tty->ldisc.write_wakeup)(tty); 658 (tty->ldisc.write_wakeup)(tty);
@@ -717,12 +717,12 @@ static int ircomm_tty_write(struct tty_struct *tty,
717 /* Fetch current transmit buffer */ 717 /* Fetch current transmit buffer */
718 skb = self->tx_skb; 718 skb = self->tx_skb;
719 719
720 /* 720 /*
721 * Send out all the data we get, possibly as multiple fragmented 721 * Send out all the data we get, possibly as multiple fragmented
722 * frames, but this will only happen if the data is larger than the 722 * frames, but this will only happen if the data is larger than the
723 * max data size. The normal case however is just the opposite, and 723 * max data size. The normal case however is just the opposite, and
724 * this function may be called multiple times, and will then actually 724 * this function may be called multiple times, and will then actually
725 * defragment the data and send it out as one packet as soon as 725 * defragment the data and send it out as one packet as soon as
726 * possible, but at a safer point in time 726 * possible, but at a safer point in time
727 */ 727 */
728 while (count) { 728 while (count) {
@@ -731,16 +731,16 @@ static int ircomm_tty_write(struct tty_struct *tty,
731 /* Adjust data size to the max data size */ 731 /* Adjust data size to the max data size */
732 if (size > self->max_data_size) 732 if (size > self->max_data_size)
733 size = self->max_data_size; 733 size = self->max_data_size;
734 734
735 /* 735 /*
736 * Do we already have a buffer ready for transmit, or do 736 * Do we already have a buffer ready for transmit, or do
737 * we need to allocate a new frame 737 * we need to allocate a new frame
738 */ 738 */
739 if (skb) { 739 if (skb) {
740 /* 740 /*
741 * Any room for more data at the end of the current 741 * Any room for more data at the end of the current
742 * transmit buffer? Cannot use skb_tailroom, since 742 * transmit buffer? Cannot use skb_tailroom, since
743 * dev_alloc_skb gives us a larger skb than we 743 * dev_alloc_skb gives us a larger skb than we
744 * requested 744 * requested
745 * Note : use tx_data_size, because max_data_size 745 * Note : use tx_data_size, because max_data_size
746 * may have changed and we don't want to overwrite 746 * may have changed and we don't want to overwrite
@@ -751,8 +751,8 @@ static int ircomm_tty_write(struct tty_struct *tty,
751 if (size > tailroom) 751 if (size > tailroom)
752 size = tailroom; 752 size = tailroom;
753 } else { 753 } else {
754 /* 754 /*
755 * Current transmit frame is full, so break 755 * Current transmit frame is full, so break
756 * out, so we can send it as soon as possible 756 * out, so we can send it as soon as possible
757 */ 757 */
758 break; 758 break;
@@ -782,15 +782,15 @@ static int ircomm_tty_write(struct tty_struct *tty,
782 782
783 spin_unlock_irqrestore(&self->spinlock, flags); 783 spin_unlock_irqrestore(&self->spinlock, flags);
784 784
785 /* 785 /*
786 * Schedule a new thread which will transmit the frame as soon 786 * Schedule a new thread which will transmit the frame as soon
787 * as possible, but at a safe point in time. We do this so the 787 * as possible, but at a safe point in time. We do this so the
788 * "user" can give us data multiple times, as PPP does (because of 788 * "user" can give us data multiple times, as PPP does (because of
789 * its 256 byte tx buffer). We will then defragment and send out 789 * its 256 byte tx buffer). We will then defragment and send out
790 * all this data as one single packet. 790 * all this data as one single packet.
791 */ 791 */
792 schedule_work(&self->tqueue); 792 schedule_work(&self->tqueue);
793 793
794 return len; 794 return len;
795} 795}
796 796
@@ -846,7 +846,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
846 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 846 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
847 unsigned long orig_jiffies, poll_time; 847 unsigned long orig_jiffies, poll_time;
848 unsigned long flags; 848 unsigned long flags;
849 849
850 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 850 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
851 851
852 IRDA_ASSERT(self != NULL, return;); 852 IRDA_ASSERT(self != NULL, return;);
@@ -876,7 +876,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
876 * 876 *
877 * This routine notifies the tty driver that input buffers for the line 877 * This routine notifies the tty driver that input buffers for the line
878 * discipline are close to full, and it should somehow signal that no 878 * discipline are close to full, and it should somehow signal that no
879 * more characters should be sent to the tty. 879 * more characters should be sent to the tty.
880 */ 880 */
881static void ircomm_tty_throttle(struct tty_struct *tty) 881static void ircomm_tty_throttle(struct tty_struct *tty)
882{ 882{
@@ -890,16 +890,16 @@ static void ircomm_tty_throttle(struct tty_struct *tty)
890 /* Software flow control? */ 890 /* Software flow control? */
891 if (I_IXOFF(tty)) 891 if (I_IXOFF(tty))
892 ircomm_tty_send_xchar(tty, STOP_CHAR(tty)); 892 ircomm_tty_send_xchar(tty, STOP_CHAR(tty));
893 893
894 /* Hardware flow control? */ 894 /* Hardware flow control? */
895 if (tty->termios->c_cflag & CRTSCTS) { 895 if (tty->termios->c_cflag & CRTSCTS) {
896 self->settings.dte &= ~IRCOMM_RTS; 896 self->settings.dte &= ~IRCOMM_RTS;
897 self->settings.dte |= IRCOMM_DELTA_RTS; 897 self->settings.dte |= IRCOMM_DELTA_RTS;
898 898
899 ircomm_param_request(self, IRCOMM_DTE, TRUE); 899 ircomm_param_request(self, IRCOMM_DTE, TRUE);
900 } 900 }
901 901
902 ircomm_flow_request(self->ircomm, FLOW_STOP); 902 ircomm_flow_request(self->ircomm, FLOW_STOP);
903} 903}
904 904
905/* 905/*
@@ -930,7 +930,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty)
930 ircomm_param_request(self, IRCOMM_DTE, TRUE); 930 ircomm_param_request(self, IRCOMM_DTE, TRUE);
931 IRDA_DEBUG(1, "%s(), FLOW_START\n", __FUNCTION__ ); 931 IRDA_DEBUG(1, "%s(), FLOW_START\n", __FUNCTION__ );
932 } 932 }
933 ircomm_flow_request(self->ircomm, FLOW_START); 933 ircomm_flow_request(self->ircomm, FLOW_START);
934} 934}
935 935
936/* 936/*
@@ -975,7 +975,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
975 spin_lock_irqsave(&self->spinlock, flags); 975 spin_lock_irqsave(&self->spinlock, flags);
976 976
977 del_timer(&self->watchdog_timer); 977 del_timer(&self->watchdog_timer);
978 978
979 /* Free parameter buffer */ 979 /* Free parameter buffer */
980 if (self->ctrl_skb) { 980 if (self->ctrl_skb) {
981 dev_kfree_skb(self->ctrl_skb); 981 dev_kfree_skb(self->ctrl_skb);
@@ -1001,7 +1001,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
1001 * 1001 *
1002 * This routine notifies the tty driver that it should hangup the tty 1002 * This routine notifies the tty driver that it should hangup the tty
1003 * device. 1003 * device.
1004 * 1004 *
1005 */ 1005 */
1006static void ircomm_tty_hangup(struct tty_struct *tty) 1006static void ircomm_tty_hangup(struct tty_struct *tty)
1007{ 1007{
@@ -1044,7 +1044,7 @@ static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch)
1044 * Function ircomm_tty_start (tty) 1044 * Function ircomm_tty_start (tty)
1045 * 1045 *
1046 * This routine notifies the tty driver that it resume sending 1046 * This routine notifies the tty driver that it resume sending
1047 * characters to the tty device. 1047 * characters to the tty device.
1048 */ 1048 */
1049void ircomm_tty_start(struct tty_struct *tty) 1049void ircomm_tty_start(struct tty_struct *tty)
1050{ 1050{
@@ -1057,9 +1057,9 @@ void ircomm_tty_start(struct tty_struct *tty)
1057 * Function ircomm_tty_stop (tty) 1057 * Function ircomm_tty_stop (tty)
1058 * 1058 *
1059 * This routine notifies the tty driver that it should stop outputting 1059 * This routine notifies the tty driver that it should stop outputting
1060 * characters to the tty device. 1060 * characters to the tty device.
1061 */ 1061 */
1062static void ircomm_tty_stop(struct tty_struct *tty) 1062static void ircomm_tty_stop(struct tty_struct *tty)
1063{ 1063{
1064 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 1064 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
1065 1065
@@ -1094,14 +1094,14 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
1094 /*wake_up_interruptible(&self->delta_msr_wait);*/ 1094 /*wake_up_interruptible(&self->delta_msr_wait);*/
1095 } 1095 }
1096 if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { 1096 if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) {
1097 IRDA_DEBUG(2, 1097 IRDA_DEBUG(2,
1098 "%s(), ircomm%d CD now %s...\n", __FUNCTION__ , self->line, 1098 "%s(), ircomm%d CD now %s...\n", __FUNCTION__ , self->line,
1099 (status & IRCOMM_CD) ? "on" : "off"); 1099 (status & IRCOMM_CD) ? "on" : "off");
1100 1100
1101 if (status & IRCOMM_CD) { 1101 if (status & IRCOMM_CD) {
1102 wake_up_interruptible(&self->open_wait); 1102 wake_up_interruptible(&self->open_wait);
1103 } else { 1103 } else {
1104 IRDA_DEBUG(2, 1104 IRDA_DEBUG(2,
1105 "%s(), Doing serial hangup..\n", __FUNCTION__ ); 1105 "%s(), Doing serial hangup..\n", __FUNCTION__ );
1106 if (tty) 1106 if (tty)
1107 tty_hangup(tty); 1107 tty_hangup(tty);
@@ -1113,10 +1113,10 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
1113 if (self->flags & ASYNC_CTS_FLOW) { 1113 if (self->flags & ASYNC_CTS_FLOW) {
1114 if (tty->hw_stopped) { 1114 if (tty->hw_stopped) {
1115 if (status & IRCOMM_CTS) { 1115 if (status & IRCOMM_CTS) {
1116 IRDA_DEBUG(2, 1116 IRDA_DEBUG(2,
1117 "%s(), CTS tx start...\n", __FUNCTION__ ); 1117 "%s(), CTS tx start...\n", __FUNCTION__ );
1118 tty->hw_stopped = 0; 1118 tty->hw_stopped = 0;
1119 1119
1120 /* Wake up processes blocked on open */ 1120 /* Wake up processes blocked on open */
1121 wake_up_interruptible(&self->open_wait); 1121 wake_up_interruptible(&self->open_wait);
1122 1122
@@ -1125,7 +1125,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
1125 } 1125 }
1126 } else { 1126 } else {
1127 if (!(status & IRCOMM_CTS)) { 1127 if (!(status & IRCOMM_CTS)) {
1128 IRDA_DEBUG(2, 1128 IRDA_DEBUG(2,
1129 "%s(), CTS tx stop...\n", __FUNCTION__ ); 1129 "%s(), CTS tx stop...\n", __FUNCTION__ );
1130 tty->hw_stopped = 1; 1130 tty->hw_stopped = 1;
1131 } 1131 }
@@ -1145,7 +1145,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1145 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 1145 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
1146 1146
1147 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 1147 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
1148 1148
1149 IRDA_ASSERT(self != NULL, return -1;); 1149 IRDA_ASSERT(self != NULL, return -1;);
1150 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 1150 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
1151 IRDA_ASSERT(skb != NULL, return -1;); 1151 IRDA_ASSERT(skb != NULL, return -1;);
@@ -1155,10 +1155,10 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1155 return 0; 1155 return 0;
1156 } 1156 }
1157 1157
1158 /* 1158 /*
1159 * If we receive data when hardware is stopped then something is wrong. 1159 * If we receive data when hardware is stopped then something is wrong.
1160 * We try to poll the peers line settings to check if we are up todate. 1160 * We try to poll the peers line settings to check if we are up todate.
1161 * Devices like WinCE can do this, and since they don't send any 1161 * Devices like WinCE can do this, and since they don't send any
1162 * params, we can just as well declare the hardware for running. 1162 * params, we can just as well declare the hardware for running.
1163 */ 1163 */
1164 if (self->tty->hw_stopped && (self->flow == FLOW_START)) { 1164 if (self->tty->hw_stopped && (self->flow == FLOW_START)) {
@@ -1170,9 +1170,9 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1170 ircomm_tty_link_established(self); 1170 ircomm_tty_link_established(self);
1171 } 1171 }
1172 1172
1173 /* 1173 /*
1174 * Just give it over to the line discipline. There is no need to 1174 * Just give it over to the line discipline. There is no need to
1175 * involve the flip buffers, since we are not running in an interrupt 1175 * involve the flip buffers, since we are not running in an interrupt
1176 * handler 1176 * handler
1177 */ 1177 */
1178 self->tty->ldisc.receive_buf(self->tty, skb->data, NULL, skb->len); 1178 self->tty->ldisc.receive_buf(self->tty, skb->data, NULL, skb->len);
@@ -1195,14 +1195,14 @@ static int ircomm_tty_control_indication(void *instance, void *sap,
1195 int clen; 1195 int clen;
1196 1196
1197 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 1197 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
1198 1198
1199 IRDA_ASSERT(self != NULL, return -1;); 1199 IRDA_ASSERT(self != NULL, return -1;);
1200 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 1200 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
1201 IRDA_ASSERT(skb != NULL, return -1;); 1201 IRDA_ASSERT(skb != NULL, return -1;);
1202 1202
1203 clen = skb->data[0]; 1203 clen = skb->data[0];
1204 1204
1205 irda_param_extract_all(self, skb->data+1, IRDA_MIN(skb->len-1, clen), 1205 irda_param_extract_all(self, skb->data+1, IRDA_MIN(skb->len-1, clen),
1206 &ircomm_param_info); 1206 &ircomm_param_info);
1207 1207
1208 /* No need to kfree_skb - see ircomm_control_indication() */ 1208 /* No need to kfree_skb - see ircomm_control_indication() */
@@ -1217,7 +1217,7 @@ static int ircomm_tty_control_indication(void *instance, void *sap,
1217 * transmission of data. We just mark the hardware as stopped, and wait 1217 * transmission of data. We just mark the hardware as stopped, and wait
1218 * for IrTTP to notify us that things are OK again. 1218 * for IrTTP to notify us that things are OK again.
1219 */ 1219 */
1220static void ircomm_tty_flow_indication(void *instance, void *sap, 1220static void ircomm_tty_flow_indication(void *instance, void *sap,
1221 LOCAL_FLOW cmd) 1221 LOCAL_FLOW cmd)
1222{ 1222{
1223 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 1223 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
@@ -1247,7 +1247,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
1247 1247
1248static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf) 1248static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
1249{ 1249{
1250 int ret=0; 1250 int ret=0;
1251 1251
1252 ret += sprintf(buf+ret, "State: %s\n", ircomm_tty_state[self->state]); 1252 ret += sprintf(buf+ret, "State: %s\n", ircomm_tty_state[self->state]);
1253 1253
@@ -1260,37 +1260,37 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
1260 ret += sprintf(buf+ret, "3_WIRE_RAW"); 1260 ret += sprintf(buf+ret, "3_WIRE_RAW");
1261 else 1261 else
1262 ret += sprintf(buf+ret, "No common service type!\n"); 1262 ret += sprintf(buf+ret, "No common service type!\n");
1263 ret += sprintf(buf+ret, "\n"); 1263 ret += sprintf(buf+ret, "\n");
1264 1264
1265 ret += sprintf(buf+ret, "Port name: %s\n", self->settings.port_name); 1265 ret += sprintf(buf+ret, "Port name: %s\n", self->settings.port_name);
1266 1266
1267 ret += sprintf(buf+ret, "DTE status: "); 1267 ret += sprintf(buf+ret, "DTE status: ");
1268 if (self->settings.dte & IRCOMM_RTS) 1268 if (self->settings.dte & IRCOMM_RTS)
1269 ret += sprintf(buf+ret, "RTS|"); 1269 ret += sprintf(buf+ret, "RTS|");
1270 if (self->settings.dte & IRCOMM_DTR) 1270 if (self->settings.dte & IRCOMM_DTR)
1271 ret += sprintf(buf+ret, "DTR|"); 1271 ret += sprintf(buf+ret, "DTR|");
1272 if (self->settings.dte) 1272 if (self->settings.dte)
1273 ret--; /* remove the last | */ 1273 ret--; /* remove the last | */
1274 ret += sprintf(buf+ret, "\n"); 1274 ret += sprintf(buf+ret, "\n");
1275 1275
1276 ret += sprintf(buf+ret, "DCE status: "); 1276 ret += sprintf(buf+ret, "DCE status: ");
1277 if (self->settings.dce & IRCOMM_CTS) 1277 if (self->settings.dce & IRCOMM_CTS)
1278 ret += sprintf(buf+ret, "CTS|"); 1278 ret += sprintf(buf+ret, "CTS|");
1279 if (self->settings.dce & IRCOMM_DSR) 1279 if (self->settings.dce & IRCOMM_DSR)
1280 ret += sprintf(buf+ret, "DSR|"); 1280 ret += sprintf(buf+ret, "DSR|");
1281 if (self->settings.dce & IRCOMM_CD) 1281 if (self->settings.dce & IRCOMM_CD)
1282 ret += sprintf(buf+ret, "CD|"); 1282 ret += sprintf(buf+ret, "CD|");
1283 if (self->settings.dce & IRCOMM_RI) 1283 if (self->settings.dce & IRCOMM_RI)
1284 ret += sprintf(buf+ret, "RI|"); 1284 ret += sprintf(buf+ret, "RI|");
1285 if (self->settings.dce) 1285 if (self->settings.dce)
1286 ret--; /* remove the last | */ 1286 ret--; /* remove the last | */
1287 ret += sprintf(buf+ret, "\n"); 1287 ret += sprintf(buf+ret, "\n");
1288 1288
1289 ret += sprintf(buf+ret, "Configuration: "); 1289 ret += sprintf(buf+ret, "Configuration: ");
1290 if (!self->settings.null_modem) 1290 if (!self->settings.null_modem)
1291 ret += sprintf(buf+ret, "DTE <-> DCE\n"); 1291 ret += sprintf(buf+ret, "DTE <-> DCE\n");
1292 else 1292 else
1293 ret += sprintf(buf+ret, 1293 ret += sprintf(buf+ret,
1294 "DTE <-> DTE (null modem emulation)\n"); 1294 "DTE <-> DTE (null modem emulation)\n");
1295 1295
1296 ret += sprintf(buf+ret, "Data rate: %d\n", self->settings.data_rate); 1296 ret += sprintf(buf+ret, "Data rate: %d\n", self->settings.data_rate);
@@ -1314,7 +1314,7 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
1314 ret += sprintf(buf+ret, "ENQ_ACK_OUT|"); 1314 ret += sprintf(buf+ret, "ENQ_ACK_OUT|");
1315 if (self->settings.flow_control) 1315 if (self->settings.flow_control)
1316 ret--; /* remove the last | */ 1316 ret--; /* remove the last | */
1317 ret += sprintf(buf+ret, "\n"); 1317 ret += sprintf(buf+ret, "\n");
1318 1318
1319 ret += sprintf(buf+ret, "Flags: "); 1319 ret += sprintf(buf+ret, "Flags: ");
1320 if (self->flags & ASYNC_CTS_FLOW) 1320 if (self->flags & ASYNC_CTS_FLOW)
@@ -1333,25 +1333,25 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
1333 ret--; /* remove the last | */ 1333 ret--; /* remove the last | */
1334 ret += sprintf(buf+ret, "\n"); 1334 ret += sprintf(buf+ret, "\n");
1335 1335
1336 ret += sprintf(buf+ret, "Role: %s\n", self->client ? 1336 ret += sprintf(buf+ret, "Role: %s\n", self->client ?
1337 "client" : "server"); 1337 "client" : "server");
1338 ret += sprintf(buf+ret, "Open count: %d\n", self->open_count); 1338 ret += sprintf(buf+ret, "Open count: %d\n", self->open_count);
1339 ret += sprintf(buf+ret, "Max data size: %d\n", self->max_data_size); 1339 ret += sprintf(buf+ret, "Max data size: %d\n", self->max_data_size);
1340 ret += sprintf(buf+ret, "Max header size: %d\n", self->max_header_size); 1340 ret += sprintf(buf+ret, "Max header size: %d\n", self->max_header_size);
1341 1341
1342 if (self->tty) 1342 if (self->tty)
1343 ret += sprintf(buf+ret, "Hardware: %s\n", 1343 ret += sprintf(buf+ret, "Hardware: %s\n",
1344 self->tty->hw_stopped ? "Stopped" : "Running"); 1344 self->tty->hw_stopped ? "Stopped" : "Running");
1345 1345
1346 ret += sprintf(buf+ret, "\n"); 1346 ret += sprintf(buf+ret, "\n");
1347 return ret; 1347 return ret;
1348} 1348}
1349 1349
1350 1350
1351/* 1351/*
1352 * Function ircomm_tty_read_proc (buf, start, offset, len, eof, unused) 1352 * Function ircomm_tty_read_proc (buf, start, offset, len, eof, unused)
1353 * 1353 *
1354 * 1354 *
1355 * 1355 *
1356 */ 1356 */
1357#ifdef CONFIG_PROC_FS 1357#ifdef CONFIG_PROC_FS
@@ -1359,8 +1359,8 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
1359 int *eof, void *unused) 1359 int *eof, void *unused)
1360{ 1360{
1361 struct ircomm_tty_cb *self; 1361 struct ircomm_tty_cb *self;
1362 int count = 0, l; 1362 int count = 0, l;
1363 off_t begin = 0; 1363 off_t begin = 0;
1364 unsigned long flags; 1364 unsigned long flags;
1365 1365
1366 spin_lock_irqsave(&ircomm_tty->hb_spinlock, flags); 1366 spin_lock_irqsave(&ircomm_tty->hb_spinlock, flags);
@@ -1370,25 +1370,25 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
1370 if (self->magic != IRCOMM_TTY_MAGIC) 1370 if (self->magic != IRCOMM_TTY_MAGIC)
1371 break; 1371 break;
1372 1372
1373 l = ircomm_tty_line_info(self, buf + count); 1373 l = ircomm_tty_line_info(self, buf + count);
1374 count += l; 1374 count += l;
1375 if (count+begin > offset+len) 1375 if (count+begin > offset+len)
1376 goto done; 1376 goto done;
1377 if (count+begin < offset) { 1377 if (count+begin < offset) {
1378 begin += count; 1378 begin += count;
1379 count = 0; 1379 count = 0;
1380 } 1380 }
1381 1381
1382 self = (struct ircomm_tty_cb *) hashbin_get_next(ircomm_tty); 1382 self = (struct ircomm_tty_cb *) hashbin_get_next(ircomm_tty);
1383 } 1383 }
1384 *eof = 1; 1384 *eof = 1;
1385done: 1385done:
1386 spin_unlock_irqrestore(&ircomm_tty->hb_spinlock, flags); 1386 spin_unlock_irqrestore(&ircomm_tty->hb_spinlock, flags);
1387 1387
1388 if (offset >= count+begin) 1388 if (offset >= count+begin)
1389 return 0; 1389 return 0;
1390 *start = buf + (offset-begin); 1390 *start = buf + (offset-begin);
1391 return ((len < begin+count-offset) ? len : begin+count-offset); 1391 return ((len < begin+count-offset) ? len : begin+count-offset);
1392} 1392}
1393#endif /* CONFIG_PROC_FS */ 1393#endif /* CONFIG_PROC_FS */
1394 1394
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index 99f5eddbb4b7..8d7ba93e4e09 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -1,32 +1,32 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: ircomm_tty_attach.c 3 * Filename: ircomm_tty_attach.c
4 * Version: 4 * Version:
5 * Description: Code for attaching the serial driver to IrCOMM 5 * Description: Code for attaching the serial driver to IrCOMM
6 * Status: Experimental. 6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no> 7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sat Jun 5 17:42:00 1999 8 * Created at: Sat Jun 5 17:42:00 1999
9 * Modified at: Tue Jan 4 14:20:49 2000 9 * Modified at: Tue Jan 4 14:20:49 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> 13 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * This program is distributed in the hope that it will be useful, 20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details. 23 * GNU General Public License for more details.
24 * 24 *
25 * You should have received a copy of the GNU General Public License 25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software 26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
28 * MA 02111-1307 USA 28 * MA 02111-1307 USA
29 * 29 *
30 ********************************************************************/ 30 ********************************************************************/
31 31
32#include <linux/sched.h> 32#include <linux/sched.h>
@@ -50,35 +50,35 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self);
50static void ircomm_tty_discovery_indication(discinfo_t *discovery, 50static void ircomm_tty_discovery_indication(discinfo_t *discovery,
51 DISCOVERY_MODE mode, 51 DISCOVERY_MODE mode,
52 void *priv); 52 void *priv);
53static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, 53static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
54 struct ias_value *value, void *priv); 54 struct ias_value *value, void *priv);
55static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self, 55static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
56 int timeout); 56 int timeout);
57static void ircomm_tty_watchdog_timer_expired(void *data); 57static void ircomm_tty_watchdog_timer_expired(void *data);
58 58
59static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, 59static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
60 IRCOMM_TTY_EVENT event, 60 IRCOMM_TTY_EVENT event,
61 struct sk_buff *skb, 61 struct sk_buff *skb,
62 struct ircomm_tty_info *info); 62 struct ircomm_tty_info *info);
63static int ircomm_tty_state_search(struct ircomm_tty_cb *self, 63static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
64 IRCOMM_TTY_EVENT event, 64 IRCOMM_TTY_EVENT event,
65 struct sk_buff *skb, 65 struct sk_buff *skb,
66 struct ircomm_tty_info *info); 66 struct ircomm_tty_info *info);
67static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, 67static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
68 IRCOMM_TTY_EVENT event, 68 IRCOMM_TTY_EVENT event,
69 struct sk_buff *skb, 69 struct sk_buff *skb,
70 struct ircomm_tty_info *info); 70 struct ircomm_tty_info *info);
71static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, 71static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
72 IRCOMM_TTY_EVENT event, 72 IRCOMM_TTY_EVENT event,
73 struct sk_buff *skb, 73 struct sk_buff *skb,
74 struct ircomm_tty_info *info); 74 struct ircomm_tty_info *info);
75static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, 75static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
76 IRCOMM_TTY_EVENT event, 76 IRCOMM_TTY_EVENT event,
77 struct sk_buff *skb, 77 struct sk_buff *skb,
78 struct ircomm_tty_info *info); 78 struct ircomm_tty_info *info);
79static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, 79static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
80 IRCOMM_TTY_EVENT event, 80 IRCOMM_TTY_EVENT event,
81 struct sk_buff *skb, 81 struct sk_buff *skb,
82 struct ircomm_tty_info *info); 82 struct ircomm_tty_info *info);
83 83
84char *ircomm_tty_state[] = { 84char *ircomm_tty_state[] = {
@@ -111,7 +111,7 @@ static char *ircomm_tty_event[] = {
111#endif /* CONFIG_IRDA_DEBUG */ 111#endif /* CONFIG_IRDA_DEBUG */
112 112
113static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, 113static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
114 struct sk_buff *skb, struct ircomm_tty_info *info) = 114 struct sk_buff *skb, struct ircomm_tty_info *info) =
115{ 115{
116 ircomm_tty_state_idle, 116 ircomm_tty_state_idle,
117 ircomm_tty_state_search, 117 ircomm_tty_state_search,
@@ -125,7 +125,7 @@ static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
125 * Function ircomm_tty_attach_cable (driver) 125 * Function ircomm_tty_attach_cable (driver)
126 * 126 *
127 * Try to attach cable (IrCOMM link). This function will only return 127 * Try to attach cable (IrCOMM link). This function will only return
128 * when the link has been connected, or if an error condition occurs. 128 * when the link has been connected, or if an error condition occurs.
129 * If success, the return value is the resulting service type. 129 * If success, the return value is the resulting service type.
130 */ 130 */
131int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) 131int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
@@ -135,7 +135,7 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
135 IRDA_ASSERT(self != NULL, return -1;); 135 IRDA_ASSERT(self != NULL, return -1;);
136 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 136 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
137 137
138 /* Check if somebody has already connected to us */ 138 /* Check if somebody has already connected to us */
139 if (ircomm_is_connected(self->ircomm)) { 139 if (ircomm_is_connected(self->ircomm)) {
140 IRDA_DEBUG(0, "%s(), already connected!\n", __FUNCTION__ ); 140 IRDA_DEBUG(0, "%s(), already connected!\n", __FUNCTION__ );
141 return 0; 141 return 0;
@@ -177,7 +177,7 @@ void ircomm_tty_detach_cable(struct ircomm_tty_cb *self)
177 self->skey = NULL; 177 self->skey = NULL;
178 } 178 }
179 179
180 if (self->iriap) { 180 if (self->iriap) {
181 iriap_close(self->iriap); 181 iriap_close(self->iriap);
182 self->iriap = NULL; 182 self->iriap = NULL;
183 } 183 }
@@ -212,7 +212,7 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
212 212
213 IRDA_ASSERT(self != NULL, return;); 213 IRDA_ASSERT(self != NULL, return;);
214 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 214 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
215 215
216 /* Compute hint bits based on service */ 216 /* Compute hint bits based on service */
217 hints = irlmp_service_to_hint(S_COMM); 217 hints = irlmp_service_to_hint(S_COMM);
218 if (self->service_type & IRCOMM_3_WIRE_RAW) 218 if (self->service_type & IRCOMM_3_WIRE_RAW)
@@ -234,19 +234,19 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
234 if (self->service_type & IRCOMM_3_WIRE_RAW) { 234 if (self->service_type & IRCOMM_3_WIRE_RAW) {
235 /* Register IrLPT with LM-IAS */ 235 /* Register IrLPT with LM-IAS */
236 self->obj = irias_new_object("IrLPT", IAS_IRLPT_ID); 236 self->obj = irias_new_object("IrLPT", IAS_IRLPT_ID);
237 irias_add_integer_attrib(self->obj, "IrDA:IrLMP:LsapSel", 237 irias_add_integer_attrib(self->obj, "IrDA:IrLMP:LsapSel",
238 self->slsap_sel, IAS_KERNEL_ATTR); 238 self->slsap_sel, IAS_KERNEL_ATTR);
239 } else { 239 } else {
240 /* Register IrCOMM with LM-IAS */ 240 /* Register IrCOMM with LM-IAS */
241 self->obj = irias_new_object("IrDA:IrCOMM", IAS_IRCOMM_ID); 241 self->obj = irias_new_object("IrDA:IrCOMM", IAS_IRCOMM_ID);
242 irias_add_integer_attrib(self->obj, "IrDA:TinyTP:LsapSel", 242 irias_add_integer_attrib(self->obj, "IrDA:TinyTP:LsapSel",
243 self->slsap_sel, IAS_KERNEL_ATTR); 243 self->slsap_sel, IAS_KERNEL_ATTR);
244 244
245 /* Code the parameters into the buffer */ 245 /* Code the parameters into the buffer */
246 irda_param_pack(oct_seq, "bbbbbb", 246 irda_param_pack(oct_seq, "bbbbbb",
247 IRCOMM_SERVICE_TYPE, 1, self->service_type, 247 IRCOMM_SERVICE_TYPE, 1, self->service_type,
248 IRCOMM_PORT_TYPE, 1, IRCOMM_SERIAL); 248 IRCOMM_PORT_TYPE, 1, IRCOMM_SERIAL);
249 249
250 /* Register parameters with LM-IAS */ 250 /* Register parameters with LM-IAS */
251 irias_add_octseq_attrib(self->obj, "Parameters", oct_seq, 6, 251 irias_add_octseq_attrib(self->obj, "Parameters", oct_seq, 6,
252 IAS_KERNEL_ATTR); 252 IAS_KERNEL_ATTR);
@@ -302,23 +302,23 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
302 IRDA_ASSERT(self != NULL, return -1;); 302 IRDA_ASSERT(self != NULL, return -1;);
303 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 303 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
304 304
305 if (self->service_type & IRCOMM_3_WIRE_RAW) 305 if (self->service_type & IRCOMM_3_WIRE_RAW)
306 return 0; 306 return 0;
307 307
308 /* 308 /*
309 * Set default values, but only if the application for some reason 309 * Set default values, but only if the application for some reason
310 * haven't set them already 310 * haven't set them already
311 */ 311 */
312 IRDA_DEBUG(2, "%s(), data-rate = %d\n", __FUNCTION__ , 312 IRDA_DEBUG(2, "%s(), data-rate = %d\n", __FUNCTION__ ,
313 self->settings.data_rate); 313 self->settings.data_rate);
314 if (!self->settings.data_rate) 314 if (!self->settings.data_rate)
315 self->settings.data_rate = 9600; 315 self->settings.data_rate = 9600;
316 IRDA_DEBUG(2, "%s(), data-format = %d\n", __FUNCTION__ , 316 IRDA_DEBUG(2, "%s(), data-format = %d\n", __FUNCTION__ ,
317 self->settings.data_format); 317 self->settings.data_format);
318 if (!self->settings.data_format) 318 if (!self->settings.data_format)
319 self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */ 319 self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */
320 320
321 IRDA_DEBUG(2, "%s(), flow-control = %d\n", __FUNCTION__ , 321 IRDA_DEBUG(2, "%s(), flow-control = %d\n", __FUNCTION__ ,
322 self->settings.flow_control); 322 self->settings.flow_control);
323 /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/ 323 /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/
324 324
@@ -330,7 +330,7 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
330 ircomm_param_request(self, IRCOMM_SERVICE_TYPE, FALSE); 330 ircomm_param_request(self, IRCOMM_SERVICE_TYPE, FALSE);
331 ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE); 331 ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);
332 ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE); 332 ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
333 333
334 /* For a 3 wire service, we just flush the last parameter and return */ 334 /* For a 3 wire service, we just flush the last parameter and return */
335 if (self->settings.service_type == IRCOMM_3_WIRE) { 335 if (self->settings.service_type == IRCOMM_3_WIRE) {
336 ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE); 336 ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
@@ -342,10 +342,10 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
342#if 0 342#if 0
343 ircomm_param_request(self, IRCOMM_XON_XOFF, FALSE); 343 ircomm_param_request(self, IRCOMM_XON_XOFF, FALSE);
344 ircomm_param_request(self, IRCOMM_ENQ_ACK, FALSE); 344 ircomm_param_request(self, IRCOMM_ENQ_ACK, FALSE);
345#endif 345#endif
346 /* Notify peer that we are ready to receive data */ 346 /* Notify peer that we are ready to receive data */
347 ircomm_param_request(self, IRCOMM_DTE, TRUE); 347 ircomm_param_request(self, IRCOMM_DTE, TRUE);
348 348
349 return 0; 349 return 0;
350} 350}
351 351
@@ -388,8 +388,8 @@ static void ircomm_tty_discovery_indication(discinfo_t *discovery,
388 self = (struct ircomm_tty_cb *) hashbin_get_first(ircomm_tty); 388 self = (struct ircomm_tty_cb *) hashbin_get_first(ircomm_tty);
389 while (self != NULL) { 389 while (self != NULL) {
390 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 390 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
391 391
392 ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION, 392 ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION,
393 NULL, &info); 393 NULL, &info);
394 394
395 self = (struct ircomm_tty_cb *) hashbin_get_next(ircomm_tty); 395 self = (struct ircomm_tty_cb *) hashbin_get_next(ircomm_tty);
@@ -402,7 +402,7 @@ static void ircomm_tty_discovery_indication(discinfo_t *discovery,
402 * Link disconnected 402 * Link disconnected
403 * 403 *
404 */ 404 */
405void ircomm_tty_disconnect_indication(void *instance, void *sap, 405void ircomm_tty_disconnect_indication(void *instance, void *sap,
406 LM_REASON reason, 406 LM_REASON reason,
407 struct sk_buff *skb) 407 struct sk_buff *skb)
408{ 408{
@@ -422,7 +422,7 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap,
422 /* Stop data transfers */ 422 /* Stop data transfers */
423 self->tty->hw_stopped = 1; 423 self->tty->hw_stopped = 1;
424 424
425 ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL, 425 ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL,
426 NULL); 426 NULL);
427} 427}
428 428
@@ -432,8 +432,8 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap,
432 * Got result from the IAS query we make 432 * Got result from the IAS query we make
433 * 433 *
434 */ 434 */
435static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, 435static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
436 struct ias_value *value, 436 struct ias_value *value,
437 void *priv) 437 void *priv)
438{ 438{
439 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv; 439 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv;
@@ -454,18 +454,18 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
454 } 454 }
455 455
456 switch (value->type) { 456 switch (value->type) {
457 case IAS_OCT_SEQ: 457 case IAS_OCT_SEQ:
458 IRDA_DEBUG(2, "%s(), got octet sequence\n", __FUNCTION__ ); 458 IRDA_DEBUG(2, "%s(), got octet sequence\n", __FUNCTION__ );
459 459
460 irda_param_extract_all(self, value->t.oct_seq, value->len, 460 irda_param_extract_all(self, value->t.oct_seq, value->len,
461 &ircomm_param_info); 461 &ircomm_param_info);
462 462
463 ircomm_tty_do_event(self, IRCOMM_TTY_GOT_PARAMETERS, NULL, 463 ircomm_tty_do_event(self, IRCOMM_TTY_GOT_PARAMETERS, NULL,
464 NULL); 464 NULL);
465 break; 465 break;
466 case IAS_INTEGER: 466 case IAS_INTEGER:
467 /* Got LSAP selector */ 467 /* Got LSAP selector */
468 IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __FUNCTION__ , 468 IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __FUNCTION__ ,
469 value->t.integer); 469 value->t.integer);
470 470
471 if (value->t.integer == -1) { 471 if (value->t.integer == -1) {
@@ -491,10 +491,10 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
491 * Connection confirmed 491 * Connection confirmed
492 * 492 *
493 */ 493 */
494void ircomm_tty_connect_confirm(void *instance, void *sap, 494void ircomm_tty_connect_confirm(void *instance, void *sap,
495 struct qos_info *qos, 495 struct qos_info *qos,
496 __u32 max_data_size, 496 __u32 max_data_size,
497 __u8 max_header_size, 497 __u8 max_header_size,
498 struct sk_buff *skb) 498 struct sk_buff *skb)
499{ 499{
500 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 500 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
@@ -515,16 +515,16 @@ void ircomm_tty_connect_confirm(void *instance, void *sap,
515} 515}
516 516
517/* 517/*
518 * Function ircomm_tty_connect_indication (instance, sap, qos, max_sdu_size, 518 * Function ircomm_tty_connect_indication (instance, sap, qos, max_sdu_size,
519 * skb) 519 * skb)
520 * 520 *
521 * we are discovered and being requested to connect by remote device ! 521 * we are discovered and being requested to connect by remote device !
522 * 522 *
523 */ 523 */
524void ircomm_tty_connect_indication(void *instance, void *sap, 524void ircomm_tty_connect_indication(void *instance, void *sap,
525 struct qos_info *qos, 525 struct qos_info *qos,
526 __u32 max_data_size, 526 __u32 max_data_size,
527 __u8 max_header_size, 527 __u8 max_header_size,
528 struct sk_buff *skb) 528 struct sk_buff *skb)
529{ 529{
530 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 530 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
@@ -542,8 +542,8 @@ void ircomm_tty_connect_indication(void *instance, void *sap,
542 542
543 clen = skb->data[0]; 543 clen = skb->data[0];
544 if (clen) 544 if (clen)
545 irda_param_extract_all(self, skb->data+1, 545 irda_param_extract_all(self, skb->data+1,
546 IRDA_MIN(skb->len, clen), 546 IRDA_MIN(skb->len, clen),
547 &ircomm_param_info); 547 &ircomm_param_info);
548 548
549 ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_INDICATION, NULL, NULL); 549 ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_INDICATION, NULL, NULL);
@@ -566,14 +566,14 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
566 566
567 if (!self->tty) 567 if (!self->tty)
568 return; 568 return;
569 569
570 del_timer(&self->watchdog_timer); 570 del_timer(&self->watchdog_timer);
571 571
572 /* 572 /*
573 * IrCOMM link is now up, and if we are not using hardware 573 * IrCOMM link is now up, and if we are not using hardware
574 * flow-control, then declare the hardware as running. Otherwise we 574 * flow-control, then declare the hardware as running. Otherwise we
575 * will have to wait for the peer device (DCE) to raise the CTS 575 * will have to wait for the peer device (DCE) to raise the CTS
576 * line. 576 * line.
577 */ 577 */
578 if ((self->flags & ASYNC_CTS_FLOW) && ((self->settings.dce & IRCOMM_CTS) == 0)) { 578 if ((self->flags & ASYNC_CTS_FLOW) && ((self->settings.dce & IRCOMM_CTS) == 0)) {
579 IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __FUNCTION__ ); 579 IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __FUNCTION__ );
@@ -582,7 +582,7 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
582 IRDA_DEBUG(1, "%s(), starting hardware!\n", __FUNCTION__ ); 582 IRDA_DEBUG(1, "%s(), starting hardware!\n", __FUNCTION__ );
583 583
584 self->tty->hw_stopped = 0; 584 self->tty->hw_stopped = 0;
585 585
586 /* Wake up processes blocked on open */ 586 /* Wake up processes blocked on open */
587 wake_up_interruptible(&self->open_wait); 587 wake_up_interruptible(&self->open_wait);
588 } 588 }
@@ -593,8 +593,8 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
593/* 593/*
594 * Function ircomm_tty_start_watchdog_timer (self, timeout) 594 * Function ircomm_tty_start_watchdog_timer (self, timeout)
595 * 595 *
596 * Start the watchdog timer. This timer is used to make sure that any 596 * Start the watchdog timer. This timer is used to make sure that any
597 * connection attempt is successful, and if not, we will retry after 597 * connection attempt is successful, and if not, we will retry after
598 * the timeout 598 * the timeout
599 */ 599 */
600static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self, 600static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
@@ -616,7 +616,7 @@ static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
616static void ircomm_tty_watchdog_timer_expired(void *data) 616static void ircomm_tty_watchdog_timer_expired(void *data)
617{ 617{
618 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data; 618 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
619 619
620 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 620 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
621 621
622 IRDA_ASSERT(self != NULL, return;); 622 IRDA_ASSERT(self != NULL, return;);
@@ -633,14 +633,14 @@ static void ircomm_tty_watchdog_timer_expired(void *data)
633 * 633 *
634 */ 634 */
635int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, 635int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
636 struct sk_buff *skb, struct ircomm_tty_info *info) 636 struct sk_buff *skb, struct ircomm_tty_info *info)
637{ 637{
638 IRDA_ASSERT(self != NULL, return -1;); 638 IRDA_ASSERT(self != NULL, return -1;);
639 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); 639 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
640 640
641 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , 641 IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ ,
642 ircomm_tty_state[self->state], ircomm_tty_event[event]); 642 ircomm_tty_state[self->state], ircomm_tty_event[event]);
643 643
644 return (*state[self->state])(self, event, skb, info); 644 return (*state[self->state])(self, event, skb, info);
645} 645}
646 646
@@ -656,7 +656,7 @@ static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_
656 IRDA_ASSERT(self != NULL, return;); 656 IRDA_ASSERT(self != NULL, return;);
657 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 657 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
658 658
659 IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __FUNCTION__ , 659 IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __FUNCTION__ ,
660 ircomm_tty_state[self->state], self->service_type); 660 ircomm_tty_state[self->state], self->service_type);
661 */ 661 */
662 self->state = state; 662 self->state = state;
@@ -668,9 +668,9 @@ static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_
668 * Just hanging around 668 * Just hanging around
669 * 669 *
670 */ 670 */
671static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, 671static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
672 IRCOMM_TTY_EVENT event, 672 IRCOMM_TTY_EVENT event,
673 struct sk_buff *skb, 673 struct sk_buff *skb,
674 struct ircomm_tty_info *info) 674 struct ircomm_tty_info *info)
675{ 675{
676 int ret = 0; 676 int ret = 0;
@@ -679,10 +679,10 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
679 ircomm_tty_state[self->state], ircomm_tty_event[event]); 679 ircomm_tty_state[self->state], ircomm_tty_event[event]);
680 switch (event) { 680 switch (event) {
681 case IRCOMM_TTY_ATTACH_CABLE: 681 case IRCOMM_TTY_ATTACH_CABLE:
682 /* Try to discover any remote devices */ 682 /* Try to discover any remote devices */
683 ircomm_tty_start_watchdog_timer(self, 3*HZ); 683 ircomm_tty_start_watchdog_timer(self, 3*HZ);
684 ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); 684 ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
685 685
686 irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS); 686 irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
687 break; 687 break;
688 case IRCOMM_TTY_DISCOVERY_INDICATION: 688 case IRCOMM_TTY_DISCOVERY_INDICATION:
@@ -701,7 +701,7 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
701 iriap_getvaluebyclass_request(self->iriap, 701 iriap_getvaluebyclass_request(self->iriap,
702 self->saddr, self->daddr, 702 self->saddr, self->daddr,
703 "IrDA:IrCOMM", "Parameters"); 703 "IrDA:IrCOMM", "Parameters");
704 704
705 ircomm_tty_start_watchdog_timer(self, 3*HZ); 705 ircomm_tty_start_watchdog_timer(self, 3*HZ);
706 ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS); 706 ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS);
707 break; 707 break;
@@ -732,9 +732,9 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
732 * Trying to discover an IrCOMM device 732 * Trying to discover an IrCOMM device
733 * 733 *
734 */ 734 */
735static int ircomm_tty_state_search(struct ircomm_tty_cb *self, 735static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
736 IRCOMM_TTY_EVENT event, 736 IRCOMM_TTY_EVENT event,
737 struct sk_buff *skb, 737 struct sk_buff *skb,
738 struct ircomm_tty_info *info) 738 struct ircomm_tty_info *info)
739{ 739{
740 int ret = 0; 740 int ret = 0;
@@ -752,19 +752,19 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
752 __FUNCTION__); 752 __FUNCTION__);
753 return -EBUSY; 753 return -EBUSY;
754 } 754 }
755 755
756 self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, 756 self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
757 ircomm_tty_getvalue_confirm); 757 ircomm_tty_getvalue_confirm);
758 758
759 if (self->service_type == IRCOMM_3_WIRE_RAW) { 759 if (self->service_type == IRCOMM_3_WIRE_RAW) {
760 iriap_getvaluebyclass_request(self->iriap, self->saddr, 760 iriap_getvaluebyclass_request(self->iriap, self->saddr,
761 self->daddr, "IrLPT", 761 self->daddr, "IrLPT",
762 "IrDA:IrLMP:LsapSel"); 762 "IrDA:IrLMP:LsapSel");
763 ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL); 763 ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL);
764 } else { 764 } else {
765 iriap_getvaluebyclass_request(self->iriap, self->saddr, 765 iriap_getvaluebyclass_request(self->iriap, self->saddr,
766 self->daddr, 766 self->daddr,
767 "IrDA:IrCOMM", 767 "IrDA:IrCOMM",
768 "Parameters"); 768 "Parameters");
769 769
770 ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS); 770 ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS);
@@ -783,7 +783,7 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
783#if 1 783#if 1
784 /* Give up */ 784 /* Give up */
785#else 785#else
786 /* Try to discover any remote devices */ 786 /* Try to discover any remote devices */
787 ircomm_tty_start_watchdog_timer(self, 3*HZ); 787 ircomm_tty_start_watchdog_timer(self, 3*HZ);
788 irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS); 788 irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
789#endif 789#endif
@@ -805,9 +805,9 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
805 * Querying the remote LM-IAS for IrCOMM parameters 805 * Querying the remote LM-IAS for IrCOMM parameters
806 * 806 *
807 */ 807 */
808static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, 808static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
809 IRCOMM_TTY_EVENT event, 809 IRCOMM_TTY_EVENT event,
810 struct sk_buff *skb, 810 struct sk_buff *skb,
811 struct ircomm_tty_info *info) 811 struct ircomm_tty_info *info)
812{ 812{
813 int ret = 0; 813 int ret = 0;
@@ -822,12 +822,12 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
822 __FUNCTION__); 822 __FUNCTION__);
823 return -EBUSY; 823 return -EBUSY;
824 } 824 }
825 825
826 self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, 826 self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
827 ircomm_tty_getvalue_confirm); 827 ircomm_tty_getvalue_confirm);
828 828
829 iriap_getvaluebyclass_request(self->iriap, self->saddr, 829 iriap_getvaluebyclass_request(self->iriap, self->saddr,
830 self->daddr, "IrDA:IrCOMM", 830 self->daddr, "IrDA:IrCOMM",
831 "IrDA:TinyTP:LsapSel"); 831 "IrDA:TinyTP:LsapSel");
832 832
833 ircomm_tty_start_watchdog_timer(self, 3*HZ); 833 ircomm_tty_start_watchdog_timer(self, 3*HZ);
@@ -836,7 +836,7 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
836 case IRCOMM_TTY_WD_TIMER_EXPIRED: 836 case IRCOMM_TTY_WD_TIMER_EXPIRED:
837 /* Go back to search mode */ 837 /* Go back to search mode */
838 ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); 838 ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
839 ircomm_tty_start_watchdog_timer(self, 3*HZ); 839 ircomm_tty_start_watchdog_timer(self, 3*HZ);
840 break; 840 break;
841 case IRCOMM_TTY_CONNECT_INDICATION: 841 case IRCOMM_TTY_CONNECT_INDICATION:
842 del_timer(&self->watchdog_timer); 842 del_timer(&self->watchdog_timer);
@@ -863,9 +863,9 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
863 * Query remote LM-IAS for the LSAP selector which we can connect to 863 * Query remote LM-IAS for the LSAP selector which we can connect to
864 * 864 *
865 */ 865 */
866static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, 866static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
867 IRCOMM_TTY_EVENT event, 867 IRCOMM_TTY_EVENT event,
868 struct sk_buff *skb, 868 struct sk_buff *skb,
869 struct ircomm_tty_info *info) 869 struct ircomm_tty_info *info)
870{ 870{
871 int ret = 0; 871 int ret = 0;
@@ -877,7 +877,7 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
877 case IRCOMM_TTY_GOT_LSAPSEL: 877 case IRCOMM_TTY_GOT_LSAPSEL:
878 /* Connect to remote device */ 878 /* Connect to remote device */
879 ret = ircomm_connect_request(self->ircomm, self->dlsap_sel, 879 ret = ircomm_connect_request(self->ircomm, self->dlsap_sel,
880 self->saddr, self->daddr, 880 self->saddr, self->daddr,
881 NULL, self->service_type); 881 NULL, self->service_type);
882 ircomm_tty_start_watchdog_timer(self, 3*HZ); 882 ircomm_tty_start_watchdog_timer(self, 3*HZ);
883 ircomm_tty_next_state(self, IRCOMM_TTY_SETUP); 883 ircomm_tty_next_state(self, IRCOMM_TTY_SETUP);
@@ -912,9 +912,9 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
912 * Trying to connect 912 * Trying to connect
913 * 913 *
914 */ 914 */
915static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, 915static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
916 IRCOMM_TTY_EVENT event, 916 IRCOMM_TTY_EVENT event,
917 struct sk_buff *skb, 917 struct sk_buff *skb,
918 struct ircomm_tty_info *info) 918 struct ircomm_tty_info *info)
919{ 919{
920 int ret = 0; 920 int ret = 0;
@@ -926,10 +926,10 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
926 case IRCOMM_TTY_CONNECT_CONFIRM: 926 case IRCOMM_TTY_CONNECT_CONFIRM:
927 del_timer(&self->watchdog_timer); 927 del_timer(&self->watchdog_timer);
928 ircomm_tty_ias_unregister(self); 928 ircomm_tty_ias_unregister(self);
929 929
930 /* 930 /*
931 * Send initial parameters. This will also send out queued 931 * Send initial parameters. This will also send out queued
932 * parameters waiting for the connection to come up 932 * parameters waiting for the connection to come up
933 */ 933 */
934 ircomm_tty_send_initial_parameters(self); 934 ircomm_tty_send_initial_parameters(self);
935 ircomm_tty_link_established(self); 935 ircomm_tty_link_established(self);
@@ -938,7 +938,7 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
938 case IRCOMM_TTY_CONNECT_INDICATION: 938 case IRCOMM_TTY_CONNECT_INDICATION:
939 del_timer(&self->watchdog_timer); 939 del_timer(&self->watchdog_timer);
940 ircomm_tty_ias_unregister(self); 940 ircomm_tty_ias_unregister(self);
941 941
942 /* Accept connection */ 942 /* Accept connection */
943 ircomm_connect_response(self->ircomm, NULL); 943 ircomm_connect_response(self->ircomm, NULL);
944 ircomm_tty_next_state(self, IRCOMM_TTY_READY); 944 ircomm_tty_next_state(self, IRCOMM_TTY_READY);
@@ -966,9 +966,9 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
966 * IrCOMM is now connected 966 * IrCOMM is now connected
967 * 967 *
968 */ 968 */
969static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, 969static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
970 IRCOMM_TTY_EVENT event, 970 IRCOMM_TTY_EVENT event,
971 struct sk_buff *skb, 971 struct sk_buff *skb,
972 struct ircomm_tty_info *info) 972 struct ircomm_tty_info *info)
973{ 973{
974 int ret = 0; 974 int ret = 0;
@@ -976,7 +976,7 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
976 switch (event) { 976 switch (event) {
977 case IRCOMM_TTY_DATA_REQUEST: 977 case IRCOMM_TTY_DATA_REQUEST:
978 ret = ircomm_data_request(self->ircomm, skb); 978 ret = ircomm_data_request(self->ircomm, skb);
979 break; 979 break;
980 case IRCOMM_TTY_DETACH_CABLE: 980 case IRCOMM_TTY_DETACH_CABLE:
981 ircomm_disconnect_request(self->ircomm, NULL); 981 ircomm_disconnect_request(self->ircomm, NULL);
982 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); 982 ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index 75e39ea599d8..a5174e6e7ad3 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -1,31 +1,31 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: ircomm_tty_ioctl.c 3 * Filename: ircomm_tty_ioctl.c
4 * Version: 4 * Version:
5 * Description: 5 * Description:
6 * Status: Experimental. 6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no> 7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Thu Jun 10 14:39:09 1999 8 * Created at: Thu Jun 10 14:39:09 1999
9 * Modified at: Wed Jan 5 14:45:43 2000 9 * Modified at: Wed Jan 5 14:45:43 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
13 * 13 *
14 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as 15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * This program is distributed in the hope that it will be useful, 19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details. 22 * GNU General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
27 * MA 02111-1307 USA 27 * MA 02111-1307 USA
28 * 28 *
29 ********************************************************************/ 29 ********************************************************************/
30 30
31#include <linux/init.h> 31#include <linux/init.h>
@@ -75,7 +75,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
75 } 75 }
76 if (cflag & CSTOPB) 76 if (cflag & CSTOPB)
77 cval |= IRCOMM_2_STOP_BIT; 77 cval |= IRCOMM_2_STOP_BIT;
78 78
79 if (cflag & PARENB) 79 if (cflag & PARENB)
80 cval |= IRCOMM_PARITY_ENABLE; 80 cval |= IRCOMM_PARITY_ENABLE;
81 if (!(cflag & PARODD)) 81 if (!(cflag & PARODD))
@@ -88,7 +88,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
88 88
89 self->settings.data_rate = baud; 89 self->settings.data_rate = baud;
90 ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE); 90 ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);
91 91
92 /* CTS flow control flag and modem status interrupts */ 92 /* CTS flow control flag and modem status interrupts */
93 if (cflag & CRTSCTS) { 93 if (cflag & CRTSCTS) {
94 self->flags |= ASYNC_CTS_FLOW; 94 self->flags |= ASYNC_CTS_FLOW;
@@ -104,7 +104,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
104 self->flags &= ~ASYNC_CHECK_CD; 104 self->flags &= ~ASYNC_CHECK_CD;
105 else 105 else
106 self->flags |= ASYNC_CHECK_CD; 106 self->flags |= ASYNC_CHECK_CD;
107#if 0 107#if 0
108 /* 108 /*
109 * Set up parity check flag 109 * Set up parity check flag
110 */ 110 */
@@ -113,7 +113,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
113 driver->read_status_mask |= LSR_FE | LSR_PE; 113 driver->read_status_mask |= LSR_FE | LSR_PE;
114 if (I_BRKINT(driver->tty) || I_PARMRK(driver->tty)) 114 if (I_BRKINT(driver->tty) || I_PARMRK(driver->tty))
115 driver->read_status_mask |= LSR_BI; 115 driver->read_status_mask |= LSR_BI;
116 116
117 /* 117 /*
118 * Characters to ignore 118 * Characters to ignore
119 */ 119 */
@@ -124,17 +124,17 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
124 if (I_IGNBRK(self->tty)) { 124 if (I_IGNBRK(self->tty)) {
125 self->ignore_status_mask |= LSR_BI; 125 self->ignore_status_mask |= LSR_BI;
126 /* 126 /*
127 * If we're ignore parity and break indicators, ignore 127 * If we're ignore parity and break indicators, ignore
128 * overruns too. (For real raw support). 128 * overruns too. (For real raw support).
129 */ 129 */
130 if (I_IGNPAR(self->tty)) 130 if (I_IGNPAR(self->tty))
131 self->ignore_status_mask |= LSR_OE; 131 self->ignore_status_mask |= LSR_OE;
132 } 132 }
133#endif 133#endif
134 self->settings.data_format = cval; 134 self->settings.data_format = cval;
135 135
136 ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE); 136 ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
137 ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE); 137 ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
138} 138}
139 139
140/* 140/*
@@ -145,7 +145,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
145 * should be prepared to accept the case where old == NULL, and try to 145 * should be prepared to accept the case where old == NULL, and try to
146 * do something rational. 146 * do something rational.
147 */ 147 */
148void ircomm_tty_set_termios(struct tty_struct *tty, 148void ircomm_tty_set_termios(struct tty_struct *tty,
149 struct ktermios *old_termios) 149 struct ktermios *old_termios)
150{ 150{
151 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 151 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
@@ -153,8 +153,8 @@ void ircomm_tty_set_termios(struct tty_struct *tty,
153 153
154 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 154 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
155 155
156 if ((cflag == old_termios->c_cflag) && 156 if ((cflag == old_termios->c_cflag) &&
157 (RELEVANT_IFLAG(tty->termios->c_iflag) == 157 (RELEVANT_IFLAG(tty->termios->c_iflag) ==
158 RELEVANT_IFLAG(old_termios->c_iflag))) 158 RELEVANT_IFLAG(old_termios->c_iflag)))
159 { 159 {
160 return; 160 return;
@@ -168,21 +168,21 @@ void ircomm_tty_set_termios(struct tty_struct *tty,
168 self->settings.dte &= ~(IRCOMM_DTR|IRCOMM_RTS); 168 self->settings.dte &= ~(IRCOMM_DTR|IRCOMM_RTS);
169 ircomm_param_request(self, IRCOMM_DTE, TRUE); 169 ircomm_param_request(self, IRCOMM_DTE, TRUE);
170 } 170 }
171 171
172 /* Handle transition away from B0 status */ 172 /* Handle transition away from B0 status */
173 if (!(old_termios->c_cflag & CBAUD) && 173 if (!(old_termios->c_cflag & CBAUD) &&
174 (cflag & CBAUD)) { 174 (cflag & CBAUD)) {
175 self->settings.dte |= IRCOMM_DTR; 175 self->settings.dte |= IRCOMM_DTR;
176 if (!(tty->termios->c_cflag & CRTSCTS) || 176 if (!(tty->termios->c_cflag & CRTSCTS) ||
177 !test_bit(TTY_THROTTLED, &tty->flags)) { 177 !test_bit(TTY_THROTTLED, &tty->flags)) {
178 self->settings.dte |= IRCOMM_RTS; 178 self->settings.dte |= IRCOMM_RTS;
179 } 179 }
180 ircomm_param_request(self, IRCOMM_DTE, TRUE); 180 ircomm_param_request(self, IRCOMM_DTE, TRUE);
181 } 181 }
182 182
183 /* Handle turning off CRTSCTS */ 183 /* Handle turning off CRTSCTS */
184 if ((old_termios->c_cflag & CRTSCTS) && 184 if ((old_termios->c_cflag & CRTSCTS) &&
185 !(tty->termios->c_cflag & CRTSCTS)) 185 !(tty->termios->c_cflag & CRTSCTS))
186 { 186 {
187 tty->hw_stopped = 0; 187 tty->hw_stopped = 0;
188 ircomm_tty_start(tty); 188 ircomm_tty_start(tty);
@@ -192,7 +192,7 @@ void ircomm_tty_set_termios(struct tty_struct *tty,
192/* 192/*
193 * Function ircomm_tty_tiocmget (tty, file) 193 * Function ircomm_tty_tiocmget (tty, file)
194 * 194 *
195 * 195 *
196 * 196 *
197 */ 197 */
198int ircomm_tty_tiocmget(struct tty_struct *tty, struct file *file) 198int ircomm_tty_tiocmget(struct tty_struct *tty, struct file *file)
@@ -217,12 +217,12 @@ int ircomm_tty_tiocmget(struct tty_struct *tty, struct file *file)
217/* 217/*
218 * Function ircomm_tty_tiocmset (tty, file, set, clear) 218 * Function ircomm_tty_tiocmset (tty, file, set, clear)
219 * 219 *
220 * 220 *
221 * 221 *
222 */ 222 */
223int ircomm_tty_tiocmset(struct tty_struct *tty, struct file *file, 223int ircomm_tty_tiocmset(struct tty_struct *tty, struct file *file,
224 unsigned int set, unsigned int clear) 224 unsigned int set, unsigned int clear)
225{ 225{
226 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 226 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
227 227
228 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 228 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
@@ -249,21 +249,21 @@ int ircomm_tty_tiocmset(struct tty_struct *tty, struct file *file,
249 self->settings.dte |= IRCOMM_DELTA_DTR; 249 self->settings.dte |= IRCOMM_DELTA_DTR;
250 250
251 ircomm_param_request(self, IRCOMM_DTE, TRUE); 251 ircomm_param_request(self, IRCOMM_DTE, TRUE);
252 252
253 return 0; 253 return 0;
254} 254}
255 255
256/* 256/*
257 * Function get_serial_info (driver, retinfo) 257 * Function get_serial_info (driver, retinfo)
258 * 258 *
259 * 259 *
260 * 260 *
261 */ 261 */
262static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self, 262static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
263 struct serial_struct __user *retinfo) 263 struct serial_struct __user *retinfo)
264{ 264{
265 struct serial_struct info; 265 struct serial_struct info;
266 266
267 if (!retinfo) 267 if (!retinfo)
268 return -EFAULT; 268 return -EFAULT;
269 269
@@ -277,11 +277,11 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
277 info.closing_wait = self->closing_wait; 277 info.closing_wait = self->closing_wait;
278 278
279 /* For compatibility */ 279 /* For compatibility */
280 info.type = PORT_16550A; 280 info.type = PORT_16550A;
281 info.port = 0; 281 info.port = 0;
282 info.irq = 0; 282 info.irq = 0;
283 info.xmit_fifo_size = 0; 283 info.xmit_fifo_size = 0;
284 info.hub6 = 0; 284 info.hub6 = 0;
285 info.custom_divisor = 0; 285 info.custom_divisor = 0;
286 286
287 if (copy_to_user(retinfo, &info, sizeof(*retinfo))) 287 if (copy_to_user(retinfo, &info, sizeof(*retinfo)))
@@ -293,7 +293,7 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
293/* 293/*
294 * Function set_serial_info (driver, new_info) 294 * Function set_serial_info (driver, new_info)
295 * 295 *
296 * 296 *
297 * 297 *
298 */ 298 */
299static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self, 299static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self,
@@ -311,7 +311,7 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self,
311 311
312 state = self 312 state = self
313 old_state = *self; 313 old_state = *self;
314 314
315 if (!capable(CAP_SYS_ADMIN)) { 315 if (!capable(CAP_SYS_ADMIN)) {
316 if ((new_serial.baud_base != state->settings.data_rate) || 316 if ((new_serial.baud_base != state->settings.data_rate) ||
317 (new_serial.close_delay != state->close_delay) || 317 (new_serial.close_delay != state->close_delay) ||
@@ -368,10 +368,10 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self,
368/* 368/*
369 * Function ircomm_tty_ioctl (tty, file, cmd, arg) 369 * Function ircomm_tty_ioctl (tty, file, cmd, arg)
370 * 370 *
371 * 371 *
372 * 372 *
373 */ 373 */
374int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file, 374int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
375 unsigned int cmd, unsigned long arg) 375 unsigned int cmd, unsigned long arg)
376{ 376{
377 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; 377 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
@@ -416,7 +416,7 @@ int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file,
416 put_user(cnow.brk, &p_cuser->brk) || 416 put_user(cnow.brk, &p_cuser->brk) ||
417 put_user(cnow.buf_overrun, &p_cuser->buf_overrun)) 417 put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
418 return -EFAULT; 418 return -EFAULT;
419#endif 419#endif
420 return 0; 420 return 0;
421 default: 421 default:
422 ret = -ENOIOCTLCMD; /* ioctls which we must ignore */ 422 ret = -ENOIOCTLCMD; /* ioctls which we must ignore */
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 7e7a31798d8d..e717801b38f9 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -341,11 +341,11 @@ static void irda_task_timer_expired(void *data)
341 */ 341 */
342static void irda_device_setup(struct net_device *dev) 342static void irda_device_setup(struct net_device *dev)
343{ 343{
344 dev->hard_header_len = 0; 344 dev->hard_header_len = 0;
345 dev->addr_len = LAP_ALEN; 345 dev->addr_len = LAP_ALEN;
346 346
347 dev->type = ARPHRD_IRDA; 347 dev->type = ARPHRD_IRDA;
348 dev->tx_queue_len = 8; /* Window size + 1 s-frame */ 348 dev->tx_queue_len = 8; /* Window size + 1 s-frame */
349 349
350 memset(dev->broadcast, 0xff, LAP_ALEN); 350 memset(dev->broadcast, 0xff, LAP_ALEN);
351 351
@@ -354,7 +354,7 @@ static void irda_device_setup(struct net_device *dev)
354} 354}
355 355
356/* 356/*
357 * Funciton alloc_irdadev 357 * Funciton alloc_irdadev
358 * Allocates and sets up an IRDA device in a manner similar to 358 * Allocates and sets up an IRDA device in a manner similar to
359 * alloc_etherdev. 359 * alloc_etherdev.
360 */ 360 */
@@ -386,9 +386,9 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
386 /* Try to load the module needed */ 386 /* Try to load the module needed */
387 if (!reg && capable(CAP_SYS_MODULE)) { 387 if (!reg && capable(CAP_SYS_MODULE)) {
388 spin_unlock(&dongles->hb_spinlock); 388 spin_unlock(&dongles->hb_spinlock);
389 389
390 request_module("irda-dongle-%d", type); 390 request_module("irda-dongle-%d", type);
391 391
392 spin_lock(&dongles->hb_spinlock); 392 spin_lock(&dongles->hb_spinlock);
393 reg = hashbin_find(dongles, type, NULL); 393 reg = hashbin_find(dongles, type, NULL);
394 } 394 }
@@ -438,15 +438,15 @@ int irda_device_register_dongle(struct dongle_reg *new)
438 spin_lock(&dongles->hb_spinlock); 438 spin_lock(&dongles->hb_spinlock);
439 /* Check if this dongle has been registered before */ 439 /* Check if this dongle has been registered before */
440 if (hashbin_find(dongles, new->type, NULL)) { 440 if (hashbin_find(dongles, new->type, NULL)) {
441 IRDA_MESSAGE("%s: Dongle type %x already registered\n", 441 IRDA_MESSAGE("%s: Dongle type %x already registered\n",
442 __FUNCTION__, new->type); 442 __FUNCTION__, new->type);
443 } else { 443 } else {
444 /* Insert IrDA dongle into hashbin */ 444 /* Insert IrDA dongle into hashbin */
445 hashbin_insert(dongles, (irda_queue_t *) new, new->type, NULL); 445 hashbin_insert(dongles, (irda_queue_t *) new, new->type, NULL);
446 } 446 }
447 spin_unlock(&dongles->hb_spinlock); 447 spin_unlock(&dongles->hb_spinlock);
448 448
449 return 0; 449 return 0;
450} 450}
451EXPORT_SYMBOL(irda_device_register_dongle); 451EXPORT_SYMBOL(irda_device_register_dongle);
452 452
@@ -462,7 +462,7 @@ void irda_device_unregister_dongle(struct dongle_reg *dongle)
462 462
463 spin_lock(&dongles->hb_spinlock); 463 spin_lock(&dongles->hb_spinlock);
464 node = hashbin_remove(dongles, dongle->type, NULL); 464 node = hashbin_remove(dongles, dongle->type, NULL);
465 if (!node) 465 if (!node)
466 IRDA_ERROR("%s: dongle not found!\n", __FUNCTION__); 466 IRDA_ERROR("%s: dongle not found!\n", __FUNCTION__);
467 spin_unlock(&dongles->hb_spinlock); 467 spin_unlock(&dongles->hb_spinlock);
468} 468}
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 8f1c6d65b247..98b0fa965790 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -79,10 +79,10 @@ static int iriap_data_indication(void *instance, void *sap,
79 79
80static void iriap_watchdog_timer_expired(void *data); 80static void iriap_watchdog_timer_expired(void *data);
81 81
82static inline void iriap_start_watchdog_timer(struct iriap_cb *self, 82static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
83 int timeout) 83 int timeout)
84{ 84{
85 irda_start_timer(&self->watchdog_timer, timeout, self, 85 irda_start_timer(&self->watchdog_timer, timeout, self,
86 iriap_watchdog_timer_expired); 86 iriap_watchdog_timer_expired);
87} 87}
88 88
@@ -674,7 +674,7 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
674 if (attrib == NULL) { 674 if (attrib == NULL) {
675 IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr); 675 IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr);
676 iriap_getvaluebyclass_response(self, obj->id, 676 iriap_getvaluebyclass_response(self, obj->id,
677 IAS_ATTRIB_UNKNOWN, 677 IAS_ATTRIB_UNKNOWN,
678 &irias_missing); 678 &irias_missing);
679 return; 679 return;
680 } 680 }
@@ -971,7 +971,7 @@ static const char *ias_value_types[] = {
971 "IAS_STRING" 971 "IAS_STRING"
972}; 972};
973 973
974static inline struct ias_object *irias_seq_idx(loff_t pos) 974static inline struct ias_object *irias_seq_idx(loff_t pos)
975{ 975{
976 struct ias_object *obj; 976 struct ias_object *obj;
977 977
@@ -980,7 +980,7 @@ static inline struct ias_object *irias_seq_idx(loff_t pos)
980 if (pos-- == 0) 980 if (pos-- == 0)
981 break; 981 break;
982 } 982 }
983 983
984 return obj; 984 return obj;
985} 985}
986 986
@@ -995,7 +995,7 @@ static void *irias_seq_next(struct seq_file *seq, void *v, loff_t *pos)
995{ 995{
996 ++*pos; 996 ++*pos;
997 997
998 return (v == SEQ_START_TOKEN) 998 return (v == SEQ_START_TOKEN)
999 ? (void *) hashbin_get_first(irias_objects) 999 ? (void *) hashbin_get_first(irias_objects)
1000 : (void *) hashbin_get_next(irias_objects); 1000 : (void *) hashbin_get_next(irias_objects);
1001} 1001}
@@ -1027,7 +1027,7 @@ static int irias_seq_show(struct seq_file *seq, void *v)
1027 for (attrib = (struct ias_attrib *) hashbin_get_first(obj->attribs); 1027 for (attrib = (struct ias_attrib *) hashbin_get_first(obj->attribs);
1028 attrib != NULL; 1028 attrib != NULL;
1029 attrib = (struct ias_attrib *) hashbin_get_next(obj->attribs)) { 1029 attrib = (struct ias_attrib *) hashbin_get_next(obj->attribs)) {
1030 1030
1031 IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC, 1031 IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC,
1032 goto outloop; ); 1032 goto outloop; );
1033 1033
@@ -1046,14 +1046,14 @@ static int irias_seq_show(struct seq_file *seq, void *v)
1046 attrib->value->t.string); 1046 attrib->value->t.string);
1047 break; 1047 break;
1048 case IAS_OCT_SEQ: 1048 case IAS_OCT_SEQ:
1049 seq_printf(seq, "octet sequence (%d bytes)\n", 1049 seq_printf(seq, "octet sequence (%d bytes)\n",
1050 attrib->value->len); 1050 attrib->value->len);
1051 break; 1051 break;
1052 case IAS_MISSING: 1052 case IAS_MISSING:
1053 seq_puts(seq, "missing\n"); 1053 seq_puts(seq, "missing\n");
1054 break; 1054 break;
1055 default: 1055 default:
1056 seq_printf(seq, "type %d?\n", 1056 seq_printf(seq, "type %d?\n",
1057 attrib->value->type); 1057 attrib->value->type);
1058 } 1058 }
1059 seq_putc(seq, '\n'); 1059 seq_putc(seq, '\n');
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 2a571b43ebec..4adaae242b9e 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -57,8 +57,8 @@ static char *strndup(char *str, size_t max)
57 len = max; 57 len = max;
58 58
59 /* Allocate new string */ 59 /* Allocate new string */
60 new_str = kmalloc(len + 1, GFP_ATOMIC); 60 new_str = kmalloc(len + 1, GFP_ATOMIC);
61 if (new_str == NULL) { 61 if (new_str == NULL) {
62 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 62 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
63 return NULL; 63 return NULL;
64 } 64 }
@@ -78,7 +78,7 @@ static char *strndup(char *str, size_t max)
78 */ 78 */
79struct ias_object *irias_new_object( char *name, int id) 79struct ias_object *irias_new_object( char *name, int id)
80{ 80{
81 struct ias_object *obj; 81 struct ias_object *obj;
82 82
83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
84 84
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index 95cf1234ea17..a4c1c9545827 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlan_client.c 3 * Filename: irlan_client.c
4 * Version: 0.9 4 * Version: 0.9
5 * Description: IrDA LAN Access Protocol (IrLAN) Client 5 * Description: IrDA LAN Access Protocol (IrLAN) Client
@@ -11,17 +11,17 @@
11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov> 11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk> 12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> 13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
14 * 14 *
15 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, 15 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
16 * All Rights Reserved. 16 * All Rights Reserved.
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as 19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of 20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version. 21 * the License, or (at your option) any later version.
22 * 22 *
23 * Neither Dag Brattli nor University of Tromsø admit liability nor 23 * Neither Dag Brattli nor University of Tromsø admit liability nor
24 * provide warranty for any of this software. This material is 24 * provide warranty for any of this software. This material is
25 * provided "AS-IS" and at no charge. 25 * provided "AS-IS" and at no charge.
26 * 26 *
27 ********************************************************************/ 27 ********************************************************************/
@@ -54,35 +54,35 @@
54 54
55#undef CONFIG_IRLAN_GRATUITOUS_ARP 55#undef CONFIG_IRLAN_GRATUITOUS_ARP
56 56
57static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap, 57static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
58 LM_REASON reason, 58 LM_REASON reason,
59 struct sk_buff *); 59 struct sk_buff *);
60static int irlan_client_ctrl_data_indication(void *instance, void *sap, 60static int irlan_client_ctrl_data_indication(void *instance, void *sap,
61 struct sk_buff *skb); 61 struct sk_buff *skb);
62static void irlan_client_ctrl_connect_confirm(void *instance, void *sap, 62static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
63 struct qos_info *qos, 63 struct qos_info *qos,
64 __u32 max_sdu_size, 64 __u32 max_sdu_size,
65 __u8 max_header_size, 65 __u8 max_header_size,
66 struct sk_buff *); 66 struct sk_buff *);
67static void irlan_check_response_param(struct irlan_cb *self, char *param, 67static void irlan_check_response_param(struct irlan_cb *self, char *param,
68 char *value, int val_len); 68 char *value, int val_len);
69static void irlan_client_open_ctrl_tsap(struct irlan_cb *self); 69static void irlan_client_open_ctrl_tsap(struct irlan_cb *self);
70 70
71static void irlan_client_kick_timer_expired(void *data) 71static void irlan_client_kick_timer_expired(void *data)
72{ 72{
73 struct irlan_cb *self = (struct irlan_cb *) data; 73 struct irlan_cb *self = (struct irlan_cb *) data;
74 74
75 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 75 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
76 76
77 IRDA_ASSERT(self != NULL, return;); 77 IRDA_ASSERT(self != NULL, return;);
78 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 78 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
79 79
80 /* 80 /*
81 * If we are in peer mode, the client may not have got the discovery 81 * If we are in peer mode, the client may not have got the discovery
82 * indication it needs to make progress. If the client is still in 82 * indication it needs to make progress. If the client is still in
83 * IDLE state, we must kick it to, but only if the provider is not IDLE 83 * IDLE state, we must kick it to, but only if the provider is not IDLE
84 */ 84 */
85 if ((self->provider.access_type == ACCESS_PEER) && 85 if ((self->provider.access_type == ACCESS_PEER) &&
86 (self->client.state == IRLAN_IDLE) && 86 (self->client.state == IRLAN_IDLE) &&
87 (self->provider.state != IRLAN_IDLE)) { 87 (self->provider.state != IRLAN_IDLE)) {
88 irlan_client_wakeup(self, self->saddr, self->daddr); 88 irlan_client_wakeup(self, self->saddr, self->daddr);
@@ -92,8 +92,8 @@ static void irlan_client_kick_timer_expired(void *data)
92static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) 92static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
93{ 93{
94 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 94 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
95 95
96 irda_start_timer(&self->client.kick_timer, timeout, (void *) self, 96 irda_start_timer(&self->client.kick_timer, timeout, (void *) self,
97 irlan_client_kick_timer_expired); 97 irlan_client_kick_timer_expired);
98} 98}
99 99
@@ -110,11 +110,11 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
110 IRDA_ASSERT(self != NULL, return;); 110 IRDA_ASSERT(self != NULL, return;);
111 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 111 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
112 112
113 /* 113 /*
114 * Check if we are already awake, or if we are a provider in direct 114 * Check if we are already awake, or if we are a provider in direct
115 * mode (in that case we must leave the client idle 115 * mode (in that case we must leave the client idle
116 */ 116 */
117 if ((self->client.state != IRLAN_IDLE) || 117 if ((self->client.state != IRLAN_IDLE) ||
118 (self->provider.access_type == ACCESS_DIRECT)) 118 (self->provider.access_type == ACCESS_DIRECT))
119 { 119 {
120 IRDA_DEBUG(0, "%s(), already awake!\n", __FUNCTION__ ); 120 IRDA_DEBUG(0, "%s(), already awake!\n", __FUNCTION__ );
@@ -135,7 +135,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
135 irlan_open_data_tsap(self); 135 irlan_open_data_tsap(self);
136 136
137 irlan_do_client_event(self, IRLAN_DISCOVERY_INDICATION, NULL); 137 irlan_do_client_event(self, IRLAN_DISCOVERY_INDICATION, NULL);
138 138
139 /* Start kick timer */ 139 /* Start kick timer */
140 irlan_client_start_kick_timer(self, 2*HZ); 140 irlan_client_start_kick_timer(self, 2*HZ);
141} 141}
@@ -148,11 +148,11 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
148 */ 148 */
149void irlan_client_discovery_indication(discinfo_t *discovery, 149void irlan_client_discovery_indication(discinfo_t *discovery,
150 DISCOVERY_MODE mode, 150 DISCOVERY_MODE mode,
151 void *priv) 151 void *priv)
152{ 152{
153 struct irlan_cb *self; 153 struct irlan_cb *self;
154 __u32 saddr, daddr; 154 __u32 saddr, daddr;
155 155
156 IRDA_DEBUG(1, "%s()\n", __FUNCTION__ ); 156 IRDA_DEBUG(1, "%s()\n", __FUNCTION__ );
157 157
158 IRDA_ASSERT(discovery != NULL, return;); 158 IRDA_ASSERT(discovery != NULL, return;);
@@ -177,35 +177,35 @@ void irlan_client_discovery_indication(discinfo_t *discovery,
177 177
178 IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__ , 178 IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__ ,
179 daddr); 179 daddr);
180 180
181 irlan_client_wakeup(self, saddr, daddr); 181 irlan_client_wakeup(self, saddr, daddr);
182 } 182 }
183IRDA_ASSERT_LABEL(out:) 183IRDA_ASSERT_LABEL(out:)
184 rcu_read_unlock(); 184 rcu_read_unlock();
185} 185}
186 186
187/* 187/*
188 * Function irlan_client_data_indication (handle, skb) 188 * Function irlan_client_data_indication (handle, skb)
189 * 189 *
190 * This function gets the data that is received on the control channel 190 * This function gets the data that is received on the control channel
191 * 191 *
192 */ 192 */
193static int irlan_client_ctrl_data_indication(void *instance, void *sap, 193static int irlan_client_ctrl_data_indication(void *instance, void *sap,
194 struct sk_buff *skb) 194 struct sk_buff *skb)
195{ 195{
196 struct irlan_cb *self; 196 struct irlan_cb *self;
197 197
198 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 198 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
199 199
200 self = (struct irlan_cb *) instance; 200 self = (struct irlan_cb *) instance;
201 201
202 IRDA_ASSERT(self != NULL, return -1;); 202 IRDA_ASSERT(self != NULL, return -1;);
203 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 203 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
204 IRDA_ASSERT(skb != NULL, return -1;); 204 IRDA_ASSERT(skb != NULL, return -1;);
205
206 irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb);
207 205
208 /* Ready for a new command */ 206 irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb);
207
208 /* Ready for a new command */
209 IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __FUNCTION__ ); 209 IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __FUNCTION__ );
210 self->client.tx_busy = FALSE; 210 self->client.tx_busy = FALSE;
211 211
@@ -215,27 +215,27 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
215 return 0; 215 return 0;
216} 216}
217 217
218static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap, 218static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
219 LM_REASON reason, 219 LM_REASON reason,
220 struct sk_buff *userdata) 220 struct sk_buff *userdata)
221{ 221{
222 struct irlan_cb *self; 222 struct irlan_cb *self;
223 struct tsap_cb *tsap; 223 struct tsap_cb *tsap;
224 struct sk_buff *skb; 224 struct sk_buff *skb;
225 225
226 IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__ , reason); 226 IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__ , reason);
227 227
228 self = (struct irlan_cb *) instance; 228 self = (struct irlan_cb *) instance;
229 tsap = (struct tsap_cb *) sap; 229 tsap = (struct tsap_cb *) sap;
230 230
231 IRDA_ASSERT(self != NULL, return;); 231 IRDA_ASSERT(self != NULL, return;);
232 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 232 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
233 IRDA_ASSERT(tsap != NULL, return;); 233 IRDA_ASSERT(tsap != NULL, return;);
234 IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;); 234 IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
235 235
236 IRDA_ASSERT(tsap == self->client.tsap_ctrl, return;); 236 IRDA_ASSERT(tsap == self->client.tsap_ctrl, return;);
237 237
238 /* Remove frames queued on the control channel */ 238 /* Remove frames queued on the control channel */
239 while ((skb = skb_dequeue(&self->client.txq)) != NULL) { 239 while ((skb = skb_dequeue(&self->client.txq)) != NULL) {
240 dev_kfree_skb(skb); 240 dev_kfree_skb(skb);
241 } 241 }
@@ -272,7 +272,7 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
272 notify.disconnect_indication = irlan_client_ctrl_disconnect_indication; 272 notify.disconnect_indication = irlan_client_ctrl_disconnect_indication;
273 notify.instance = self; 273 notify.instance = self;
274 strlcpy(notify.name, "IrLAN ctrl (c)", sizeof(notify.name)); 274 strlcpy(notify.name, "IrLAN ctrl (c)", sizeof(notify.name));
275 275
276 tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify); 276 tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
277 if (!tsap) { 277 if (!tsap) {
278 IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__ ); 278 IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__ );
@@ -287,11 +287,11 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
287 * Connection to peer IrLAN laye confirmed 287 * Connection to peer IrLAN laye confirmed
288 * 288 *
289 */ 289 */
290static void irlan_client_ctrl_connect_confirm(void *instance, void *sap, 290static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
291 struct qos_info *qos, 291 struct qos_info *qos,
292 __u32 max_sdu_size, 292 __u32 max_sdu_size,
293 __u8 max_header_size, 293 __u8 max_header_size,
294 struct sk_buff *skb) 294 struct sk_buff *skb)
295{ 295{
296 struct irlan_cb *self; 296 struct irlan_cb *self;
297 297
@@ -316,7 +316,7 @@ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
316 * Print return code of request to peer IrLAN layer. 316 * Print return code of request to peer IrLAN layer.
317 * 317 *
318 */ 318 */
319static void print_ret_code(__u8 code) 319static void print_ret_code(__u8 code)
320{ 320{
321 switch(code) { 321 switch(code) {
322 case 0: 322 case 0:
@@ -358,7 +358,7 @@ static void print_ret_code(__u8 code)
358/* 358/*
359 * Function irlan_client_parse_response (self, skb) 359 * Function irlan_client_parse_response (self, skb)
360 * 360 *
361 * Extract all parameters from received buffer, then feed them to 361 * Extract all parameters from received buffer, then feed them to
362 * check_params for parsing 362 * check_params for parsing
363 */ 363 */
364void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) 364void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
@@ -369,30 +369,30 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
369 int ret; 369 int ret;
370 __u16 val_len; 370 __u16 val_len;
371 int i; 371 int i;
372 char *name; 372 char *name;
373 char *value; 373 char *value;
374
375 IRDA_ASSERT(skb != NULL, return;);
374 376
375 IRDA_ASSERT(skb != NULL, return;);
376
377 IRDA_DEBUG(4, "%s() skb->len=%d\n", __FUNCTION__ , (int) skb->len); 377 IRDA_DEBUG(4, "%s() skb->len=%d\n", __FUNCTION__ , (int) skb->len);
378 378
379 IRDA_ASSERT(self != NULL, return;); 379 IRDA_ASSERT(self != NULL, return;);
380 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 380 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
381 381
382 if (!skb) { 382 if (!skb) {
383 IRDA_ERROR("%s(), Got NULL skb!\n", __FUNCTION__); 383 IRDA_ERROR("%s(), Got NULL skb!\n", __FUNCTION__);
384 return; 384 return;
385 } 385 }
386 frame = skb->data; 386 frame = skb->data;
387 387
388 /* 388 /*
389 * Check return code and print it if not success 389 * Check return code and print it if not success
390 */ 390 */
391 if (frame[0]) { 391 if (frame[0]) {
392 print_ret_code(frame[0]); 392 print_ret_code(frame[0]);
393 return; 393 return;
394 } 394 }
395 395
396 name = kmalloc(255, GFP_ATOMIC); 396 name = kmalloc(255, GFP_ATOMIC);
397 if (!name) 397 if (!name)
398 return; 398 return;
@@ -406,11 +406,11 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
406 count = frame[1]; 406 count = frame[1];
407 407
408 IRDA_DEBUG(4, "%s(), got %d parameters\n", __FUNCTION__ , count); 408 IRDA_DEBUG(4, "%s(), got %d parameters\n", __FUNCTION__ , count);
409 409
410 ptr = frame+2; 410 ptr = frame+2;
411 411
412 /* For all parameters */ 412 /* For all parameters */
413 for (i=0; i<count;i++) { 413 for (i=0; i<count;i++) {
414 ret = irlan_extract_param(ptr, name, value, &val_len); 414 ret = irlan_extract_param(ptr, name, value, &val_len);
415 if (ret < 0) { 415 if (ret < 0) {
416 IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __FUNCTION__ ); 416 IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __FUNCTION__ );
@@ -418,7 +418,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
418 } 418 }
419 ptr += ret; 419 ptr += ret;
420 irlan_check_response_param(self, name, value, val_len); 420 irlan_check_response_param(self, name, value, val_len);
421 } 421 }
422 /* Cleanup */ 422 /* Cleanup */
423 kfree(name); 423 kfree(name);
424 kfree(value); 424 kfree(value);
@@ -430,8 +430,8 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
430 * Check which parameter is received and update local variables 430 * Check which parameter is received and update local variables
431 * 431 *
432 */ 432 */
433static void irlan_check_response_param(struct irlan_cb *self, char *param, 433static void irlan_check_response_param(struct irlan_cb *self, char *param,
434 char *value, int val_len) 434 char *value, int val_len)
435{ 435{
436 __u16 tmp_cpu; /* Temporary value in host order */ 436 __u16 tmp_cpu; /* Temporary value in host order */
437 __u8 *bytes; 437 __u8 *bytes;
@@ -465,7 +465,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
465 self->client.filter_type |= IRLAN_BROADCAST; 465 self->client.filter_type |= IRLAN_BROADCAST;
466 else if (strcmp(value, "IPX_SOCKET") == 0) 466 else if (strcmp(value, "IPX_SOCKET") == 0)
467 self->client.filter_type |= IRLAN_IPX_SOCKET; 467 self->client.filter_type |= IRLAN_IPX_SOCKET;
468 468
469 } 469 }
470 if (strcmp(param, "ACCESS_TYPE") == 0) { 470 if (strcmp(param, "ACCESS_TYPE") == 0) {
471 if (strcmp(value, "DIRECT") == 0) 471 if (strcmp(value, "DIRECT") == 0)
@@ -480,7 +480,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
480 } 480 }
481 /* IRLAN version */ 481 /* IRLAN version */
482 if (strcmp(param, "IRLAN_VER") == 0) { 482 if (strcmp(param, "IRLAN_VER") == 0) {
483 IRDA_DEBUG(4, "IrLAN version %d.%d\n", (__u8) value[0], 483 IRDA_DEBUG(4, "IrLAN version %d.%d\n", (__u8) value[0],
484 (__u8) value[1]); 484 (__u8) value[1]);
485 485
486 self->version[0] = value[0]; 486 self->version[0] = value[0];
@@ -497,17 +497,17 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
497 memcpy(&tmp_cpu, value, 2); /* Align value */ 497 memcpy(&tmp_cpu, value, 2); /* Align value */
498 le16_to_cpus(&tmp_cpu); /* Convert to host order */ 498 le16_to_cpus(&tmp_cpu); /* Convert to host order */
499 self->client.recv_arb_val = tmp_cpu; 499 self->client.recv_arb_val = tmp_cpu;
500 IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __FUNCTION__ , 500 IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __FUNCTION__ ,
501 self->client.recv_arb_val); 501 self->client.recv_arb_val);
502 } 502 }
503 if (strcmp(param, "MAX_FRAME") == 0) { 503 if (strcmp(param, "MAX_FRAME") == 0) {
504 memcpy(&tmp_cpu, value, 2); /* Align value */ 504 memcpy(&tmp_cpu, value, 2); /* Align value */
505 le16_to_cpus(&tmp_cpu); /* Convert to host order */ 505 le16_to_cpus(&tmp_cpu); /* Convert to host order */
506 self->client.max_frame = tmp_cpu; 506 self->client.max_frame = tmp_cpu;
507 IRDA_DEBUG(4, "%s(), max frame=%d\n", __FUNCTION__ , 507 IRDA_DEBUG(4, "%s(), max frame=%d\n", __FUNCTION__ ,
508 self->client.max_frame); 508 self->client.max_frame);
509 } 509 }
510 510
511 /* RECONNECT_KEY, in case the link goes down! */ 511 /* RECONNECT_KEY, in case the link goes down! */
512 if (strcmp(param, "RECONNECT_KEY") == 0) { 512 if (strcmp(param, "RECONNECT_KEY") == 0) {
513 IRDA_DEBUG(4, "Got reconnect key: "); 513 IRDA_DEBUG(4, "Got reconnect key: ");
@@ -521,9 +521,9 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
521 if (strcmp(param, "FILTER_ENTRY") == 0) { 521 if (strcmp(param, "FILTER_ENTRY") == 0) {
522 bytes = value; 522 bytes = value;
523 IRDA_DEBUG(4, "Ethernet address = %02x:%02x:%02x:%02x:%02x:%02x\n", 523 IRDA_DEBUG(4, "Ethernet address = %02x:%02x:%02x:%02x:%02x:%02x\n",
524 bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], 524 bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
525 bytes[5]); 525 bytes[5]);
526 for (i = 0; i < 6; i++) 526 for (i = 0; i < 6; i++)
527 self->dev->dev_addr[i] = bytes[i]; 527 self->dev->dev_addr[i] = bytes[i];
528 } 528 }
529} 529}
@@ -534,11 +534,11 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
534 * Got results from remote LM-IAS 534 * Got results from remote LM-IAS
535 * 535 *
536 */ 536 */
537void irlan_client_get_value_confirm(int result, __u16 obj_id, 537void irlan_client_get_value_confirm(int result, __u16 obj_id,
538 struct ias_value *value, void *priv) 538 struct ias_value *value, void *priv)
539{ 539{
540 struct irlan_cb *self; 540 struct irlan_cb *self;
541 541
542 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 542 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
543 543
544 IRDA_ASSERT(priv != NULL, return;); 544 IRDA_ASSERT(priv != NULL, return;);
@@ -553,7 +553,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
553 /* Check if request succeeded */ 553 /* Check if request succeeded */
554 if (result != IAS_SUCCESS) { 554 if (result != IAS_SUCCESS) {
555 IRDA_DEBUG(2, "%s(), got NULL value!\n", __FUNCTION__ ); 555 IRDA_DEBUG(2, "%s(), got NULL value!\n", __FUNCTION__ );
556 irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, 556 irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL,
557 NULL); 557 NULL);
558 return; 558 return;
559 } 559 }
diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c
index ce943b69e996..843ab6fbb394 100644
--- a/net/irda/irlan/irlan_client_event.c
+++ b/net/irda/irlan/irlan_client_event.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlan_client_event.c 3 * Filename: irlan_client_event.c
4 * Version: 0.9 4 * Version: 0.9
5 * Description: IrLAN client state machine 5 * Description: IrLAN client state machine
@@ -8,17 +8,17 @@
8 * Created at: Sun Aug 31 20:14:37 1997 8 * Created at: Sun Aug 31 20:14:37 1997
9 * Modified at: Sun Dec 26 21:52:24 1999 9 * Modified at: Sun Dec 26 21:52:24 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, 12 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
13 * All Rights Reserved. 13 * All Rights Reserved.
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
24 ********************************************************************/ 24 ********************************************************************/
@@ -36,31 +36,31 @@
36#include <net/irda/irlan_client.h> 36#include <net/irda/irlan_client.h>
37#include <net/irda/irlan_event.h> 37#include <net/irda/irlan_event.h>
38 38
39static int irlan_client_state_idle (struct irlan_cb *self, IRLAN_EVENT event, 39static int irlan_client_state_idle (struct irlan_cb *self, IRLAN_EVENT event,
40 struct sk_buff *skb); 40 struct sk_buff *skb);
41static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, 41static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
42 struct sk_buff *skb); 42 struct sk_buff *skb);
43static int irlan_client_state_conn (struct irlan_cb *self, IRLAN_EVENT event, 43static int irlan_client_state_conn (struct irlan_cb *self, IRLAN_EVENT event,
44 struct sk_buff *skb); 44 struct sk_buff *skb);
45static int irlan_client_state_info (struct irlan_cb *self, IRLAN_EVENT event, 45static int irlan_client_state_info (struct irlan_cb *self, IRLAN_EVENT event,
46 struct sk_buff *skb); 46 struct sk_buff *skb);
47static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, 47static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
48 struct sk_buff *skb); 48 struct sk_buff *skb);
49static int irlan_client_state_open (struct irlan_cb *self, IRLAN_EVENT event, 49static int irlan_client_state_open (struct irlan_cb *self, IRLAN_EVENT event,
50 struct sk_buff *skb); 50 struct sk_buff *skb);
51static int irlan_client_state_wait (struct irlan_cb *self, IRLAN_EVENT event, 51static int irlan_client_state_wait (struct irlan_cb *self, IRLAN_EVENT event,
52 struct sk_buff *skb); 52 struct sk_buff *skb);
53static int irlan_client_state_arb (struct irlan_cb *self, IRLAN_EVENT event, 53static int irlan_client_state_arb (struct irlan_cb *self, IRLAN_EVENT event,
54 struct sk_buff *skb); 54 struct sk_buff *skb);
55static int irlan_client_state_data (struct irlan_cb *self, IRLAN_EVENT event, 55static int irlan_client_state_data (struct irlan_cb *self, IRLAN_EVENT event,
56 struct sk_buff *skb); 56 struct sk_buff *skb);
57static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, 57static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
58 struct sk_buff *skb); 58 struct sk_buff *skb);
59static int irlan_client_state_sync (struct irlan_cb *self, IRLAN_EVENT event, 59static int irlan_client_state_sync (struct irlan_cb *self, IRLAN_EVENT event,
60 struct sk_buff *skb); 60 struct sk_buff *skb);
61 61
62static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) = 62static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) =
63{ 63{
64 irlan_client_state_idle, 64 irlan_client_state_idle,
65 irlan_client_state_query, 65 irlan_client_state_query,
66 irlan_client_state_conn, 66 irlan_client_state_conn,
@@ -74,8 +74,8 @@ static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) =
74 irlan_client_state_sync 74 irlan_client_state_sync
75}; 75};
76 76
77void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, 77void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
78 struct sk_buff *skb) 78 struct sk_buff *skb)
79{ 79{
80 IRDA_ASSERT(self != NULL, return;); 80 IRDA_ASSERT(self != NULL, return;);
81 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 81 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -89,14 +89,14 @@ void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
89 * IDLE, We are waiting for an indication that there is a provider 89 * IDLE, We are waiting for an indication that there is a provider
90 * available. 90 * available.
91 */ 91 */
92static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, 92static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
93 struct sk_buff *skb) 93 struct sk_buff *skb)
94{ 94{
95 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 95 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
96 96
97 IRDA_ASSERT(self != NULL, return -1;); 97 IRDA_ASSERT(self != NULL, return -1;);
98 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 98 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
99 99
100 switch (event) { 100 switch (event) {
101 case IRLAN_DISCOVERY_INDICATION: 101 case IRLAN_DISCOVERY_INDICATION:
102 if (self->client.iriap) { 102 if (self->client.iriap) {
@@ -104,7 +104,7 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
104 __FUNCTION__); 104 __FUNCTION__);
105 return -EBUSY; 105 return -EBUSY;
106 } 106 }
107 107
108 self->client.iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, 108 self->client.iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
109 irlan_client_get_value_confirm); 109 irlan_client_get_value_confirm);
110 /* Get some values from peer IAS */ 110 /* Get some values from peer IAS */
@@ -120,7 +120,7 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
120 IRDA_DEBUG(4, "%s(), Unknown event %d\n", __FUNCTION__ , event); 120 IRDA_DEBUG(4, "%s(), Unknown event %d\n", __FUNCTION__ , event);
121 break; 121 break;
122 } 122 }
123 if (skb) 123 if (skb)
124 dev_kfree_skb(skb); 124 dev_kfree_skb(skb);
125 125
126 return 0; 126 return 0;
@@ -133,23 +133,23 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
133 * to provider, just waiting for the confirm. 133 * to provider, just waiting for the confirm.
134 * 134 *
135 */ 135 */
136static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, 136static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
137 struct sk_buff *skb) 137 struct sk_buff *skb)
138{ 138{
139 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 139 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
140 140
141 IRDA_ASSERT(self != NULL, return -1;); 141 IRDA_ASSERT(self != NULL, return -1;);
142 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 142 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
143 143
144 switch(event) { 144 switch(event) {
145 case IRLAN_IAS_PROVIDER_AVAIL: 145 case IRLAN_IAS_PROVIDER_AVAIL:
146 IRDA_ASSERT(self->dtsap_sel_ctrl != 0, return -1;); 146 IRDA_ASSERT(self->dtsap_sel_ctrl != 0, return -1;);
147 147
148 self->client.open_retries = 0; 148 self->client.open_retries = 0;
149 149
150 irttp_connect_request(self->client.tsap_ctrl, 150 irttp_connect_request(self->client.tsap_ctrl,
151 self->dtsap_sel_ctrl, 151 self->dtsap_sel_ctrl,
152 self->saddr, self->daddr, NULL, 152 self->saddr, self->daddr, NULL,
153 IRLAN_MTU, NULL); 153 IRLAN_MTU, NULL);
154 irlan_next_client_state(self, IRLAN_CONN); 154 irlan_next_client_state(self, IRLAN_CONN);
155 break; 155 break;
@@ -158,7 +158,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
158 irlan_next_client_state(self, IRLAN_IDLE); 158 irlan_next_client_state(self, IRLAN_IDLE);
159 159
160 /* Give the client a kick! */ 160 /* Give the client a kick! */
161 if ((self->provider.access_type == ACCESS_PEER) && 161 if ((self->provider.access_type == ACCESS_PEER) &&
162 (self->provider.state != IRLAN_IDLE)) 162 (self->provider.state != IRLAN_IDLE))
163 irlan_client_wakeup(self, self->saddr, self->daddr); 163 irlan_client_wakeup(self, self->saddr, self->daddr);
164 break; 164 break;
@@ -175,7 +175,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
175 } 175 }
176 if (skb) 176 if (skb)
177 dev_kfree_skb(skb); 177 dev_kfree_skb(skb);
178 178
179 return 0; 179 return 0;
180} 180}
181 181
@@ -186,13 +186,13 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
186 * commands yet. 186 * commands yet.
187 * 187 *
188 */ 188 */
189static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, 189static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
190 struct sk_buff *skb) 190 struct sk_buff *skb)
191{ 191{
192 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 192 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
193 193
194 IRDA_ASSERT(self != NULL, return -1;); 194 IRDA_ASSERT(self != NULL, return -1;);
195 195
196 switch (event) { 196 switch (event) {
197 case IRLAN_CONNECT_COMPLETE: 197 case IRLAN_CONNECT_COMPLETE:
198 /* Send getinfo cmd */ 198 /* Send getinfo cmd */
@@ -212,7 +212,7 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
212 } 212 }
213 if (skb) 213 if (skb)
214 dev_kfree_skb(skb); 214 dev_kfree_skb(skb);
215 215
216 return 0; 216 return 0;
217} 217}
218 218
@@ -221,24 +221,24 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
221 * 221 *
222 * INFO, We have issued a GetInfo command and is awaiting a reply. 222 * INFO, We have issued a GetInfo command and is awaiting a reply.
223 */ 223 */
224static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, 224static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
225 struct sk_buff *skb) 225 struct sk_buff *skb)
226{ 226{
227 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 227 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
228 228
229 IRDA_ASSERT(self != NULL, return -1;); 229 IRDA_ASSERT(self != NULL, return -1;);
230 230
231 switch (event) { 231 switch (event) {
232 case IRLAN_DATA_INDICATION: 232 case IRLAN_DATA_INDICATION:
233 IRDA_ASSERT(skb != NULL, return -1;); 233 IRDA_ASSERT(skb != NULL, return -1;);
234 234
235 irlan_client_parse_response(self, skb); 235 irlan_client_parse_response(self, skb);
236 236
237 irlan_next_client_state(self, IRLAN_MEDIA); 237 irlan_next_client_state(self, IRLAN_MEDIA);
238 238
239 irlan_get_media_char(self); 239 irlan_get_media_char(self);
240 break; 240 break;
241 241
242 case IRLAN_LMP_DISCONNECT: 242 case IRLAN_LMP_DISCONNECT:
243 case IRLAN_LAP_DISCONNECT: 243 case IRLAN_LAP_DISCONNECT:
244 irlan_next_client_state(self, IRLAN_IDLE); 244 irlan_next_client_state(self, IRLAN_IDLE);
@@ -252,7 +252,7 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
252 } 252 }
253 if (skb) 253 if (skb)
254 dev_kfree_skb(skb); 254 dev_kfree_skb(skb);
255 255
256 return 0; 256 return 0;
257} 257}
258 258
@@ -263,11 +263,11 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
263 * reply. 263 * reply.
264 * 264 *
265 */ 265 */
266static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, 266static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
267 struct sk_buff *skb) 267 struct sk_buff *skb)
268{ 268{
269 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 269 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
270 270
271 IRDA_ASSERT(self != NULL, return -1;); 271 IRDA_ASSERT(self != NULL, return -1;);
272 272
273 switch(event) { 273 switch(event) {
@@ -289,7 +289,7 @@ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
289 } 289 }
290 if (skb) 290 if (skb)
291 dev_kfree_skb(skb); 291 dev_kfree_skb(skb);
292 292
293 return 0; 293 return 0;
294} 294}
295 295
@@ -300,47 +300,47 @@ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
300 * reply 300 * reply
301 * 301 *
302 */ 302 */
303static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, 303static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
304 struct sk_buff *skb) 304 struct sk_buff *skb)
305{ 305{
306 struct qos_info qos; 306 struct qos_info qos;
307 307
308 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 308 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
309 309
310 IRDA_ASSERT(self != NULL, return -1;); 310 IRDA_ASSERT(self != NULL, return -1;);
311 311
312 switch(event) { 312 switch(event) {
313 case IRLAN_DATA_INDICATION: 313 case IRLAN_DATA_INDICATION:
314 irlan_client_parse_response(self, skb); 314 irlan_client_parse_response(self, skb);
315 315
316 /* 316 /*
317 * Check if we have got the remote TSAP for data 317 * Check if we have got the remote TSAP for data
318 * communications 318 * communications
319 */ 319 */
320 IRDA_ASSERT(self->dtsap_sel_data != 0, return -1;); 320 IRDA_ASSERT(self->dtsap_sel_data != 0, return -1;);
321 321
322 /* Check which access type we are dealing with */ 322 /* Check which access type we are dealing with */
323 switch (self->client.access_type) { 323 switch (self->client.access_type) {
324 case ACCESS_PEER: 324 case ACCESS_PEER:
325 if (self->provider.state == IRLAN_OPEN) { 325 if (self->provider.state == IRLAN_OPEN) {
326 326
327 irlan_next_client_state(self, IRLAN_ARB); 327 irlan_next_client_state(self, IRLAN_ARB);
328 irlan_do_client_event(self, IRLAN_CHECK_CON_ARB, 328 irlan_do_client_event(self, IRLAN_CHECK_CON_ARB,
329 NULL); 329 NULL);
330 } else { 330 } else {
331 331
332 irlan_next_client_state(self, IRLAN_WAIT); 332 irlan_next_client_state(self, IRLAN_WAIT);
333 } 333 }
334 break; 334 break;
335 case ACCESS_DIRECT: 335 case ACCESS_DIRECT:
336 case ACCESS_HOSTED: 336 case ACCESS_HOSTED:
337 qos.link_disc_time.bits = 0x01; /* 3 secs */ 337 qos.link_disc_time.bits = 0x01; /* 3 secs */
338 338
339 irttp_connect_request(self->tsap_data, 339 irttp_connect_request(self->tsap_data,
340 self->dtsap_sel_data, 340 self->dtsap_sel_data,
341 self->saddr, self->daddr, &qos, 341 self->saddr, self->daddr, &qos,
342 IRLAN_MTU, NULL); 342 IRLAN_MTU, NULL);
343 343
344 irlan_next_client_state(self, IRLAN_DATA); 344 irlan_next_client_state(self, IRLAN_DATA);
345 break; 345 break;
346 default: 346 default:
@@ -359,7 +359,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
359 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); 359 IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event);
360 break; 360 break;
361 } 361 }
362 362
363 if (skb) 363 if (skb)
364 dev_kfree_skb(skb); 364 dev_kfree_skb(skb);
365 365
@@ -373,13 +373,13 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
373 * provider OPEN state. 373 * provider OPEN state.
374 * 374 *
375 */ 375 */
376static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, 376static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event,
377 struct sk_buff *skb) 377 struct sk_buff *skb)
378{ 378{
379 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 379 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
380 380
381 IRDA_ASSERT(self != NULL, return -1;); 381 IRDA_ASSERT(self != NULL, return -1;);
382 382
383 switch(event) { 383 switch(event) {
384 case IRLAN_PROVIDER_SIGNAL: 384 case IRLAN_PROVIDER_SIGNAL:
385 irlan_next_client_state(self, IRLAN_ARB); 385 irlan_next_client_state(self, IRLAN_ARB);
@@ -398,36 +398,36 @@ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event,
398 } 398 }
399 if (skb) 399 if (skb)
400 dev_kfree_skb(skb); 400 dev_kfree_skb(skb);
401 401
402 return 0; 402 return 0;
403} 403}
404 404
405static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, 405static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
406 struct sk_buff *skb) 406 struct sk_buff *skb)
407{ 407{
408 struct qos_info qos; 408 struct qos_info qos;
409 409
410 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 410 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
411 411
412 IRDA_ASSERT(self != NULL, return -1;); 412 IRDA_ASSERT(self != NULL, return -1;);
413 413
414 switch(event) { 414 switch(event) {
415 case IRLAN_CHECK_CON_ARB: 415 case IRLAN_CHECK_CON_ARB:
416 if (self->client.recv_arb_val == self->provider.send_arb_val) { 416 if (self->client.recv_arb_val == self->provider.send_arb_val) {
417 irlan_next_client_state(self, IRLAN_CLOSE); 417 irlan_next_client_state(self, IRLAN_CLOSE);
418 irlan_close_data_channel(self); 418 irlan_close_data_channel(self);
419 } else if (self->client.recv_arb_val < 419 } else if (self->client.recv_arb_val <
420 self->provider.send_arb_val) 420 self->provider.send_arb_val)
421 { 421 {
422 qos.link_disc_time.bits = 0x01; /* 3 secs */ 422 qos.link_disc_time.bits = 0x01; /* 3 secs */
423 423
424 irlan_next_client_state(self, IRLAN_DATA); 424 irlan_next_client_state(self, IRLAN_DATA);
425 irttp_connect_request(self->tsap_data, 425 irttp_connect_request(self->tsap_data,
426 self->dtsap_sel_data, 426 self->dtsap_sel_data,
427 self->saddr, self->daddr, &qos, 427 self->saddr, self->daddr, &qos,
428 IRLAN_MTU, NULL); 428 IRLAN_MTU, NULL);
429 } else if (self->client.recv_arb_val > 429 } else if (self->client.recv_arb_val >
430 self->provider.send_arb_val) 430 self->provider.send_arb_val)
431 { 431 {
432 IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __FUNCTION__ ); 432 IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __FUNCTION__ );
433 } 433 }
@@ -448,7 +448,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
448 } 448 }
449 if (skb) 449 if (skb)
450 dev_kfree_skb(skb); 450 dev_kfree_skb(skb);
451 451
452 return 0; 452 return 0;
453} 453}
454 454
@@ -459,8 +459,8 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
459 * the local and remote machines. 459 * the local and remote machines.
460 * 460 *
461 */ 461 */
462static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, 462static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
463 struct sk_buff *skb) 463 struct sk_buff *skb)
464{ 464{
465 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 465 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
466 466
@@ -470,7 +470,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
470 switch(event) { 470 switch(event) {
471 case IRLAN_DATA_INDICATION: 471 case IRLAN_DATA_INDICATION:
472 irlan_client_parse_response(self, skb); 472 irlan_client_parse_response(self, skb);
473 break; 473 break;
474 case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */ 474 case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
475 case IRLAN_LAP_DISCONNECT: 475 case IRLAN_LAP_DISCONNECT:
476 irlan_next_client_state(self, IRLAN_IDLE); 476 irlan_next_client_state(self, IRLAN_IDLE);
@@ -481,18 +481,18 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
481 } 481 }
482 if (skb) 482 if (skb)
483 dev_kfree_skb(skb); 483 dev_kfree_skb(skb);
484 484
485 return 0; 485 return 0;
486} 486}
487 487
488/* 488/*
489 * Function irlan_client_state_close (self, event, skb, info) 489 * Function irlan_client_state_close (self, event, skb, info)
490 * 490 *
491 * 491 *
492 * 492 *
493 */ 493 */
494static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, 494static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
495 struct sk_buff *skb) 495 struct sk_buff *skb)
496{ 496{
497 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 497 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
498 498
@@ -505,17 +505,17 @@ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
505/* 505/*
506 * Function irlan_client_state_sync (self, event, skb, info) 506 * Function irlan_client_state_sync (self, event, skb, info)
507 * 507 *
508 * 508 *
509 * 509 *
510 */ 510 */
511static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, 511static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event,
512 struct sk_buff *skb) 512 struct sk_buff *skb)
513{ 513{
514 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 514 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
515 515
516 if (skb) 516 if (skb)
517 dev_kfree_skb(skb); 517 dev_kfree_skb(skb);
518 518
519 return 0; 519 return 0;
520} 520}
521 521
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 310776dd6109..9c3dc57ff746 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlan_common.c 3 * Filename: irlan_common.c
4 * Version: 0.9 4 * Version: 0.9
5 * Description: IrDA LAN Access Protocol Implementation 5 * Description: IrDA LAN Access Protocol Implementation
@@ -8,17 +8,17 @@
8 * Created at: Sun Aug 31 20:14:37 1997 8 * Created at: Sun Aug 31 20:14:37 1997
9 * Modified at: Sun Dec 26 21:53:10 1999 9 * Modified at: Sun Dec 26 21:53:10 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>, 12 * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
13 * All Rights Reserved. 13 * All Rights Reserved.
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
24 ********************************************************************/ 24 ********************************************************************/
@@ -49,12 +49,12 @@
49 49
50#include <net/irda/irlan_common.h> 50#include <net/irda/irlan_common.h>
51#include <net/irda/irlan_client.h> 51#include <net/irda/irlan_client.h>
52#include <net/irda/irlan_provider.h> 52#include <net/irda/irlan_provider.h>
53#include <net/irda/irlan_eth.h> 53#include <net/irda/irlan_eth.h>
54#include <net/irda/irlan_filter.h> 54#include <net/irda/irlan_filter.h>
55 55
56 56
57/* 57/*
58 * Send gratuitous ARP when connected to a new AP or not. May be a clever 58 * Send gratuitous ARP when connected to a new AP or not. May be a clever
59 * thing to do, but for some reason the machine crashes if you use DHCP. So 59 * thing to do, but for some reason the machine crashes if you use DHCP. So
60 * lets not use it by default. 60 * lets not use it by default.
@@ -106,8 +106,8 @@ extern struct proc_dir_entry *proc_irda;
106 106
107static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr); 107static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr);
108static void __irlan_close(struct irlan_cb *self); 108static void __irlan_close(struct irlan_cb *self);
109static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, 109static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
110 __u8 value_byte, __u16 value_short, 110 __u8 value_byte, __u16 value_short,
111 __u8 *value_array, __u16 value_len); 111 __u8 *value_array, __u16 value_len);
112static void irlan_open_unicast_addr(struct irlan_cb *self); 112static void irlan_open_unicast_addr(struct irlan_cb *self);
113static void irlan_get_unicast_addr(struct irlan_cb *self); 113static void irlan_get_unicast_addr(struct irlan_cb *self);
@@ -177,7 +177,7 @@ err_ckey:
177 return -ENOMEM; 177 return -ENOMEM;
178} 178}
179 179
180static void __exit irlan_cleanup(void) 180static void __exit irlan_cleanup(void)
181{ 181{
182 struct irlan_cb *self, *next; 182 struct irlan_cb *self, *next;
183 183
@@ -201,7 +201,7 @@ static void __exit irlan_cleanup(void)
201/* 201/*
202 * Function irlan_open (void) 202 * Function irlan_open (void)
203 * 203 *
204 * Open new instance of a client/provider, we should only register the 204 * Open new instance of a client/provider, we should only register the
205 * network device if this instance is ment for a particular client/provider 205 * network device if this instance is ment for a particular client/provider
206 */ 206 */
207static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr) 207static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr)
@@ -229,9 +229,9 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr)
229 /* Provider access can only be PEER, DIRECT, or HOSTED */ 229 /* Provider access can only be PEER, DIRECT, or HOSTED */
230 self->provider.access_type = access; 230 self->provider.access_type = access;
231 if (access == ACCESS_DIRECT) { 231 if (access == ACCESS_DIRECT) {
232 /* 232 /*
233 * Since we are emulating an IrLAN sever we will have to 233 * Since we are emulating an IrLAN sever we will have to
234 * give ourself an ethernet address! 234 * give ourself an ethernet address!
235 */ 235 */
236 dev->dev_addr[0] = 0x40; 236 dev->dev_addr[0] = 0x40;
237 dev->dev_addr[1] = 0x00; 237 dev->dev_addr[1] = 0x00;
@@ -245,15 +245,15 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr)
245 self->disconnect_reason = LM_USER_REQUEST; 245 self->disconnect_reason = LM_USER_REQUEST;
246 init_timer(&self->watchdog_timer); 246 init_timer(&self->watchdog_timer);
247 init_timer(&self->client.kick_timer); 247 init_timer(&self->client.kick_timer);
248 init_waitqueue_head(&self->open_wait); 248 init_waitqueue_head(&self->open_wait);
249 249
250 skb_queue_head_init(&self->client.txq); 250 skb_queue_head_init(&self->client.txq);
251 251
252 irlan_next_client_state(self, IRLAN_IDLE); 252 irlan_next_client_state(self, IRLAN_IDLE);
253 irlan_next_provider_state(self, IRLAN_IDLE); 253 irlan_next_provider_state(self, IRLAN_IDLE);
254 254
255 if (register_netdev(dev)) { 255 if (register_netdev(dev)) {
256 IRDA_DEBUG(2, "%s(), register_netdev() failed!\n", 256 IRDA_DEBUG(2, "%s(), register_netdev() failed!\n",
257 __FUNCTION__ ); 257 __FUNCTION__ );
258 self = NULL; 258 self = NULL;
259 free_netdev(dev); 259 free_netdev(dev);
@@ -268,14 +268,14 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr)
268/* 268/*
269 * Function __irlan_close (self) 269 * Function __irlan_close (self)
270 * 270 *
271 * This function closes and deallocates the IrLAN client instances. Be 271 * This function closes and deallocates the IrLAN client instances. Be
272 * aware that other functions which calls client_close() must 272 * aware that other functions which calls client_close() must
273 * remove self from irlans list first. 273 * remove self from irlans list first.
274 */ 274 */
275static void __irlan_close(struct irlan_cb *self) 275static void __irlan_close(struct irlan_cb *self)
276{ 276{
277 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 277 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
278 278
279 ASSERT_RTNL(); 279 ASSERT_RTNL();
280 IRDA_ASSERT(self != NULL, return;); 280 IRDA_ASSERT(self != NULL, return;);
281 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 281 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -285,8 +285,8 @@ static void __irlan_close(struct irlan_cb *self)
285 285
286 /* Close all open connections and remove TSAPs */ 286 /* Close all open connections and remove TSAPs */
287 irlan_close_tsaps(self); 287 irlan_close_tsaps(self);
288 288
289 if (self->client.iriap) 289 if (self->client.iriap)
290 iriap_close(self->client.iriap); 290 iriap_close(self->client.iriap);
291 291
292 /* Remove frames queued on the control channel */ 292 /* Remove frames queued on the control channel */
@@ -316,17 +316,17 @@ struct irlan_cb *irlan_get_any(void)
316static void irlan_connect_indication(void *instance, void *sap, 316static void irlan_connect_indication(void *instance, void *sap,
317 struct qos_info *qos, 317 struct qos_info *qos,
318 __u32 max_sdu_size, 318 __u32 max_sdu_size,
319 __u8 max_header_size, 319 __u8 max_header_size,
320 struct sk_buff *skb) 320 struct sk_buff *skb)
321{ 321{
322 struct irlan_cb *self; 322 struct irlan_cb *self;
323 struct tsap_cb *tsap; 323 struct tsap_cb *tsap;
324 324
325 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 325 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
326 326
327 self = (struct irlan_cb *) instance; 327 self = (struct irlan_cb *) instance;
328 tsap = (struct tsap_cb *) sap; 328 tsap = (struct tsap_cb *) sap;
329 329
330 IRDA_ASSERT(self != NULL, return;); 330 IRDA_ASSERT(self != NULL, return;);
331 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 331 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
332 IRDA_ASSERT(tsap == self->tsap_data,return;); 332 IRDA_ASSERT(tsap == self->tsap_data,return;);
@@ -349,9 +349,9 @@ static void irlan_connect_indication(void *instance, void *sap,
349 irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, NULL); 349 irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, NULL);
350 350
351 if (self->provider.access_type == ACCESS_PEER) { 351 if (self->provider.access_type == ACCESS_PEER) {
352 /* 352 /*
353 * Data channel is open, so we are now allowed to 353 * Data channel is open, so we are now allowed to
354 * configure the remote filter 354 * configure the remote filter
355 */ 355 */
356 irlan_get_unicast_addr(self); 356 irlan_get_unicast_addr(self);
357 irlan_open_unicast_addr(self); 357 irlan_open_unicast_addr(self);
@@ -361,10 +361,10 @@ static void irlan_connect_indication(void *instance, void *sap,
361} 361}
362 362
363static void irlan_connect_confirm(void *instance, void *sap, 363static void irlan_connect_confirm(void *instance, void *sap,
364 struct qos_info *qos, 364 struct qos_info *qos,
365 __u32 max_sdu_size, 365 __u32 max_sdu_size,
366 __u8 max_header_size, 366 __u8 max_header_size,
367 struct sk_buff *skb) 367 struct sk_buff *skb)
368{ 368{
369 struct irlan_cb *self; 369 struct irlan_cb *self;
370 370
@@ -381,16 +381,16 @@ static void irlan_connect_confirm(void *instance, void *sap,
381 IRDA_DEBUG(0, "%s: We are now connected!\n", __FUNCTION__); 381 IRDA_DEBUG(0, "%s: We are now connected!\n", __FUNCTION__);
382 del_timer(&self->watchdog_timer); 382 del_timer(&self->watchdog_timer);
383 383
384 /* 384 /*
385 * Data channel is open, so we are now allowed to configure the remote 385 * Data channel is open, so we are now allowed to configure the remote
386 * filter 386 * filter
387 */ 387 */
388 irlan_get_unicast_addr(self); 388 irlan_get_unicast_addr(self);
389 irlan_open_unicast_addr(self); 389 irlan_open_unicast_addr(self);
390 390
391 /* Open broadcast and multicast filter by default */ 391 /* Open broadcast and multicast filter by default */
392 irlan_set_broadcast_filter(self, TRUE); 392 irlan_set_broadcast_filter(self, TRUE);
393 irlan_set_multicast_filter(self, TRUE); 393 irlan_set_multicast_filter(self, TRUE);
394 394
395 /* Ready to transfer Ethernet frames */ 395 /* Ready to transfer Ethernet frames */
396 netif_start_queue(self->dev); 396 netif_start_queue(self->dev);
@@ -408,29 +408,29 @@ static void irlan_connect_confirm(void *instance, void *sap,
408 * the specified connection (handle) 408 * the specified connection (handle)
409 */ 409 */
410static void irlan_disconnect_indication(void *instance, 410static void irlan_disconnect_indication(void *instance,
411 void *sap, LM_REASON reason, 411 void *sap, LM_REASON reason,
412 struct sk_buff *userdata) 412 struct sk_buff *userdata)
413{ 413{
414 struct irlan_cb *self; 414 struct irlan_cb *self;
415 struct tsap_cb *tsap; 415 struct tsap_cb *tsap;
416 416
417 IRDA_DEBUG(0, "%s(), reason=%d\n", __FUNCTION__ , reason); 417 IRDA_DEBUG(0, "%s(), reason=%d\n", __FUNCTION__ , reason);
418 418
419 self = (struct irlan_cb *) instance; 419 self = (struct irlan_cb *) instance;
420 tsap = (struct tsap_cb *) sap; 420 tsap = (struct tsap_cb *) sap;
421 421
422 IRDA_ASSERT(self != NULL, return;); 422 IRDA_ASSERT(self != NULL, return;);
423 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 423 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
424 IRDA_ASSERT(tsap != NULL, return;); 424 IRDA_ASSERT(tsap != NULL, return;);
425 IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;); 425 IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
426 426
427 IRDA_ASSERT(tsap == self->tsap_data, return;); 427 IRDA_ASSERT(tsap == self->tsap_data, return;);
428 428
429 IRDA_DEBUG(2, "IrLAN, data channel disconnected by peer!\n"); 429 IRDA_DEBUG(2, "IrLAN, data channel disconnected by peer!\n");
430 430
431 /* Save reason so we know if we should try to reconnect or not */ 431 /* Save reason so we know if we should try to reconnect or not */
432 self->disconnect_reason = reason; 432 self->disconnect_reason = reason;
433 433
434 switch (reason) { 434 switch (reason) {
435 case LM_USER_REQUEST: /* User request */ 435 case LM_USER_REQUEST: /* User request */
436 IRDA_DEBUG(2, "%s(), User requested\n", __FUNCTION__ ); 436 IRDA_DEBUG(2, "%s(), User requested\n", __FUNCTION__ );
@@ -451,7 +451,7 @@ static void irlan_disconnect_indication(void *instance,
451 IRDA_ERROR("%s(), Unknown disconnect reason\n", __FUNCTION__); 451 IRDA_ERROR("%s(), Unknown disconnect reason\n", __FUNCTION__);
452 break; 452 break;
453 } 453 }
454 454
455 /* If you want to pass the skb to *both* state machines, you will 455 /* If you want to pass the skb to *both* state machines, you will
456 * need to skb_clone() it, so that you don't free it twice. 456 * need to skb_clone() it, so that you don't free it twice.
457 * As the state machines don't need it, git rid of it here... 457 * As the state machines don't need it, git rid of it here...
@@ -461,7 +461,7 @@ static void irlan_disconnect_indication(void *instance,
461 461
462 irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL); 462 irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
463 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL); 463 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
464 464
465 wake_up_interruptible(&self->open_wait); 465 wake_up_interruptible(&self->open_wait);
466} 466}
467 467
@@ -480,12 +480,12 @@ void irlan_open_data_tsap(struct irlan_cb *self)
480 return; 480 return;
481 481
482 irda_notify_init(&notify); 482 irda_notify_init(&notify);
483 483
484 notify.data_indication = irlan_eth_receive; 484 notify.data_indication = irlan_eth_receive;
485 notify.udata_indication = irlan_eth_receive; 485 notify.udata_indication = irlan_eth_receive;
486 notify.connect_indication = irlan_connect_indication; 486 notify.connect_indication = irlan_connect_indication;
487 notify.connect_confirm = irlan_connect_confirm; 487 notify.connect_confirm = irlan_connect_confirm;
488 notify.flow_indication = irlan_eth_flow_indication; 488 notify.flow_indication = irlan_eth_flow_indication;
489 notify.disconnect_indication = irlan_disconnect_indication; 489 notify.disconnect_indication = irlan_disconnect_indication;
490 notify.instance = self; 490 notify.instance = self;
491 strlcpy(notify.name, "IrLAN data", sizeof(notify.name)); 491 strlcpy(notify.name, "IrLAN data", sizeof(notify.name));
@@ -497,7 +497,7 @@ void irlan_open_data_tsap(struct irlan_cb *self)
497 } 497 }
498 self->tsap_data = tsap; 498 self->tsap_data = tsap;
499 499
500 /* 500 /*
501 * This is the data TSAP selector which we will pass to the client 501 * This is the data TSAP selector which we will pass to the client
502 * when the client ask for it. 502 * when the client ask for it.
503 */ 503 */
@@ -518,13 +518,13 @@ void irlan_close_tsaps(struct irlan_cb *self)
518 self->tsap_data = NULL; 518 self->tsap_data = NULL;
519 } 519 }
520 if (self->client.tsap_ctrl) { 520 if (self->client.tsap_ctrl) {
521 irttp_disconnect_request(self->client.tsap_ctrl, NULL, 521 irttp_disconnect_request(self->client.tsap_ctrl, NULL,
522 P_NORMAL); 522 P_NORMAL);
523 irttp_close_tsap(self->client.tsap_ctrl); 523 irttp_close_tsap(self->client.tsap_ctrl);
524 self->client.tsap_ctrl = NULL; 524 self->client.tsap_ctrl = NULL;
525 } 525 }
526 if (self->provider.tsap_ctrl) { 526 if (self->provider.tsap_ctrl) {
527 irttp_disconnect_request(self->provider.tsap_ctrl, NULL, 527 irttp_disconnect_request(self->provider.tsap_ctrl, NULL,
528 P_NORMAL); 528 P_NORMAL);
529 irttp_close_tsap(self->provider.tsap_ctrl); 529 irttp_close_tsap(self->provider.tsap_ctrl);
530 self->provider.tsap_ctrl = NULL; 530 self->provider.tsap_ctrl = NULL;
@@ -545,8 +545,8 @@ void irlan_ias_register(struct irlan_cb *self, __u8 tsap_sel)
545 545
546 IRDA_ASSERT(self != NULL, return;); 546 IRDA_ASSERT(self != NULL, return;);
547 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 547 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
548 548
549 /* 549 /*
550 * Check if object has already been registered by a previous provider. 550 * Check if object has already been registered by a previous provider.
551 * If that is the case, we just change the value of the attribute 551 * If that is the case, we just change the value of the attribute
552 */ 552 */
@@ -560,9 +560,9 @@ void irlan_ias_register(struct irlan_cb *self, __u8 tsap_sel)
560 irias_object_change_attribute("IrLAN", "IrDA:TinyTP:LsapSel", 560 irias_object_change_attribute("IrLAN", "IrDA:TinyTP:LsapSel",
561 new_value); 561 new_value);
562 } 562 }
563 563
564 /* Register PnP object only if not registered before */ 564 /* Register PnP object only if not registered before */
565 if (!irias_find_object("PnP")) { 565 if (!irias_find_object("PnP")) {
566 obj = irias_new_object("PnP", IAS_PNP_ID); 566 obj = irias_new_object("PnP", IAS_PNP_ID);
567#if 0 567#if 0
568 irias_add_string_attrib(obj, "Name", sysctl_devname, 568 irias_add_string_attrib(obj, "Name", sysctl_devname,
@@ -606,10 +606,10 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
606 self->client.tx_busy = FALSE; 606 self->client.tx_busy = FALSE;
607 return 0; 607 return 0;
608 } 608 }
609 609
610 /* Check that it's really possible to send commands */ 610 /* Check that it's really possible to send commands */
611 if ((self->client.tsap_ctrl == NULL) || 611 if ((self->client.tsap_ctrl == NULL) ||
612 (self->client.state == IRLAN_IDLE)) 612 (self->client.state == IRLAN_IDLE))
613 { 613 {
614 self->client.tx_busy = FALSE; 614 self->client.tx_busy = FALSE;
615 dev_kfree_skb(skb); 615 dev_kfree_skb(skb);
@@ -649,7 +649,7 @@ void irlan_get_provider_info(struct irlan_cb *self)
649 __u8 *frame; 649 __u8 *frame;
650 650
651 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 651 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
652 652
653 IRDA_ASSERT(self != NULL, return;); 653 IRDA_ASSERT(self != NULL, return;);
654 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 654 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
655 655
@@ -661,12 +661,12 @@ void irlan_get_provider_info(struct irlan_cb *self)
661 /* Reserve space for TTP, LMP, and LAP header */ 661 /* Reserve space for TTP, LMP, and LAP header */
662 skb_reserve(skb, self->client.max_header_size); 662 skb_reserve(skb, self->client.max_header_size);
663 skb_put(skb, 2); 663 skb_put(skb, 2);
664 664
665 frame = skb->data; 665 frame = skb->data;
666 666
667 frame[0] = CMD_GET_PROVIDER_INFO; 667 frame[0] = CMD_GET_PROVIDER_INFO;
668 frame[1] = 0x00; /* Zero parameters */ 668 frame[1] = 0x00; /* Zero parameters */
669 669
670 irlan_ctrl_data_request(self, skb); 670 irlan_ctrl_data_request(self, skb);
671} 671}
672 672
@@ -676,16 +676,16 @@ void irlan_get_provider_info(struct irlan_cb *self)
676 * Send an Open Data Command to provider 676 * Send an Open Data Command to provider
677 * 677 *
678 */ 678 */
679void irlan_open_data_channel(struct irlan_cb *self) 679void irlan_open_data_channel(struct irlan_cb *self)
680{ 680{
681 struct sk_buff *skb; 681 struct sk_buff *skb;
682 __u8 *frame; 682 __u8 *frame;
683 683
684 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 684 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
685 685
686 IRDA_ASSERT(self != NULL, return;); 686 IRDA_ASSERT(self != NULL, return;);
687 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 687 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
688 688
689 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER + 689 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
690 IRLAN_STRING_PARAMETER_LEN("MEDIA", "802.3") + 690 IRLAN_STRING_PARAMETER_LEN("MEDIA", "802.3") +
691 IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "DIRECT"), 691 IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "DIRECT"),
@@ -695,11 +695,11 @@ void irlan_open_data_channel(struct irlan_cb *self)
695 695
696 skb_reserve(skb, self->client.max_header_size); 696 skb_reserve(skb, self->client.max_header_size);
697 skb_put(skb, 2); 697 skb_put(skb, 2);
698 698
699 frame = skb->data; 699 frame = skb->data;
700 700
701 /* Build frame */ 701 /* Build frame */
702 frame[0] = CMD_OPEN_DATA_CHANNEL; 702 frame[0] = CMD_OPEN_DATA_CHANNEL;
703 frame[1] = 0x02; /* Two parameters */ 703 frame[1] = 0x02; /* Two parameters */
704 704
705 irlan_insert_string_param(skb, "MEDIA", "802.3"); 705 irlan_insert_string_param(skb, "MEDIA", "802.3");
@@ -711,11 +711,11 @@ void irlan_open_data_channel(struct irlan_cb *self)
711 irlan_ctrl_data_request(self, skb); 711 irlan_ctrl_data_request(self, skb);
712} 712}
713 713
714void irlan_close_data_channel(struct irlan_cb *self) 714void irlan_close_data_channel(struct irlan_cb *self)
715{ 715{
716 struct sk_buff *skb; 716 struct sk_buff *skb;
717 __u8 *frame; 717 __u8 *frame;
718 718
719 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 719 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
720 720
721 IRDA_ASSERT(self != NULL, return;); 721 IRDA_ASSERT(self != NULL, return;);
@@ -733,11 +733,11 @@ void irlan_close_data_channel(struct irlan_cb *self)
733 733
734 skb_reserve(skb, self->client.max_header_size); 734 skb_reserve(skb, self->client.max_header_size);
735 skb_put(skb, 2); 735 skb_put(skb, 2);
736 736
737 frame = skb->data; 737 frame = skb->data;
738 738
739 /* Build frame */ 739 /* Build frame */
740 frame[0] = CMD_CLOSE_DATA_CHAN; 740 frame[0] = CMD_CLOSE_DATA_CHAN;
741 frame[1] = 0x01; /* One parameter */ 741 frame[1] = 0x01; /* One parameter */
742 742
743 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data); 743 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
@@ -748,7 +748,7 @@ void irlan_close_data_channel(struct irlan_cb *self)
748/* 748/*
749 * Function irlan_open_unicast_addr (self) 749 * Function irlan_open_unicast_addr (self)
750 * 750 *
751 * Make IrLAN provider accept ethernet frames addressed to the unicast 751 * Make IrLAN provider accept ethernet frames addressed to the unicast
752 * address. 752 * address.
753 * 753 *
754 */ 754 */
@@ -756,12 +756,12 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
756{ 756{
757 struct sk_buff *skb; 757 struct sk_buff *skb;
758 __u8 *frame; 758 __u8 *frame;
759 759
760 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 760 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
761 761
762 IRDA_ASSERT(self != NULL, return;); 762 IRDA_ASSERT(self != NULL, return;);
763 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 763 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
764 764
765 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER + 765 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
766 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") + 766 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
767 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") + 767 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
@@ -773,15 +773,15 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
773 /* Reserve space for TTP, LMP, and LAP header */ 773 /* Reserve space for TTP, LMP, and LAP header */
774 skb_reserve(skb, self->max_header_size); 774 skb_reserve(skb, self->max_header_size);
775 skb_put(skb, 2); 775 skb_put(skb, 2);
776 776
777 frame = skb->data; 777 frame = skb->data;
778 778
779 frame[0] = CMD_FILTER_OPERATION; 779 frame[0] = CMD_FILTER_OPERATION;
780 frame[1] = 0x03; /* Three parameters */ 780 frame[1] = 0x03; /* Three parameters */
781 irlan_insert_byte_param(skb, "DATA_CHAN" , self->dtsap_sel_data); 781 irlan_insert_byte_param(skb, "DATA_CHAN" , self->dtsap_sel_data);
782 irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED"); 782 irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
783 irlan_insert_string_param(skb, "FILTER_MODE", "FILTER"); 783 irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
784 784
785 irlan_ctrl_data_request(self, skb); 785 irlan_ctrl_data_request(self, skb);
786} 786}
787 787
@@ -794,17 +794,17 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
794 * one but then _we_ have to initiate all communication with other 794 * one but then _we_ have to initiate all communication with other
795 * hosts, since ARP request for this host will not be answered. 795 * hosts, since ARP request for this host will not be answered.
796 */ 796 */
797void irlan_set_broadcast_filter(struct irlan_cb *self, int status) 797void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
798{ 798{
799 struct sk_buff *skb; 799 struct sk_buff *skb;
800 __u8 *frame; 800 __u8 *frame;
801 801
802 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 802 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
803 803
804 IRDA_ASSERT(self != NULL, return;); 804 IRDA_ASSERT(self != NULL, return;);
805 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 805 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
806 806
807 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER + 807 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
808 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") + 808 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
809 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BROADCAST") + 809 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BROADCAST") +
810 /* We may waste one byte here...*/ 810 /* We may waste one byte here...*/
@@ -816,17 +816,17 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
816 /* Reserve space for TTP, LMP, and LAP header */ 816 /* Reserve space for TTP, LMP, and LAP header */
817 skb_reserve(skb, self->client.max_header_size); 817 skb_reserve(skb, self->client.max_header_size);
818 skb_put(skb, 2); 818 skb_put(skb, 2);
819 819
820 frame = skb->data; 820 frame = skb->data;
821 821
822 frame[0] = CMD_FILTER_OPERATION; 822 frame[0] = CMD_FILTER_OPERATION;
823 frame[1] = 0x03; /* Three parameters */ 823 frame[1] = 0x03; /* Three parameters */
824 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data); 824 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
825 irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST"); 825 irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST");
826 if (status) 826 if (status)
827 irlan_insert_string_param(skb, "FILTER_MODE", "FILTER"); 827 irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
828 else 828 else
829 irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); 829 irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
830 830
831 irlan_ctrl_data_request(self, skb); 831 irlan_ctrl_data_request(self, skb);
832} 832}
@@ -835,14 +835,14 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
835 * Function irlan_set_multicast_filter (self, status) 835 * Function irlan_set_multicast_filter (self, status)
836 * 836 *
837 * Make IrLAN provider accept ethernet frames addressed to the multicast 837 * Make IrLAN provider accept ethernet frames addressed to the multicast
838 * address. 838 * address.
839 * 839 *
840 */ 840 */
841void irlan_set_multicast_filter(struct irlan_cb *self, int status) 841void irlan_set_multicast_filter(struct irlan_cb *self, int status)
842{ 842{
843 struct sk_buff *skb; 843 struct sk_buff *skb;
844 __u8 *frame; 844 __u8 *frame;
845 845
846 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 846 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
847 847
848 IRDA_ASSERT(self != NULL, return;); 848 IRDA_ASSERT(self != NULL, return;);
@@ -856,21 +856,21 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
856 GFP_ATOMIC); 856 GFP_ATOMIC);
857 if (!skb) 857 if (!skb)
858 return; 858 return;
859 859
860 /* Reserve space for TTP, LMP, and LAP header */ 860 /* Reserve space for TTP, LMP, and LAP header */
861 skb_reserve(skb, self->client.max_header_size); 861 skb_reserve(skb, self->client.max_header_size);
862 skb_put(skb, 2); 862 skb_put(skb, 2);
863 863
864 frame = skb->data; 864 frame = skb->data;
865 865
866 frame[0] = CMD_FILTER_OPERATION; 866 frame[0] = CMD_FILTER_OPERATION;
867 frame[1] = 0x03; /* Three parameters */ 867 frame[1] = 0x03; /* Three parameters */
868 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data); 868 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
869 irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST"); 869 irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST");
870 if (status) 870 if (status)
871 irlan_insert_string_param(skb, "FILTER_MODE", "ALL"); 871 irlan_insert_string_param(skb, "FILTER_MODE", "ALL");
872 else 872 else
873 irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); 873 irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
874 874
875 irlan_ctrl_data_request(self, skb); 875 irlan_ctrl_data_request(self, skb);
876} 876}
@@ -887,12 +887,12 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
887{ 887{
888 struct sk_buff *skb; 888 struct sk_buff *skb;
889 __u8 *frame; 889 __u8 *frame;
890 890
891 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 891 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
892 892
893 IRDA_ASSERT(self != NULL, return;); 893 IRDA_ASSERT(self != NULL, return;);
894 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 894 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
895 895
896 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER + 896 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
897 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") + 897 IRLAN_BYTE_PARAMETER_LEN("DATA_CHAN") +
898 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") + 898 IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
@@ -905,34 +905,34 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
905 /* Reserve space for TTP, LMP, and LAP header */ 905 /* Reserve space for TTP, LMP, and LAP header */
906 skb_reserve(skb, self->client.max_header_size); 906 skb_reserve(skb, self->client.max_header_size);
907 skb_put(skb, 2); 907 skb_put(skb, 2);
908 908
909 frame = skb->data; 909 frame = skb->data;
910 910
911 frame[0] = CMD_FILTER_OPERATION; 911 frame[0] = CMD_FILTER_OPERATION;
912 frame[1] = 0x03; /* Three parameters */ 912 frame[1] = 0x03; /* Three parameters */
913 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data); 913 irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
914 irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED"); 914 irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
915 irlan_insert_string_param(skb, "FILTER_OPERATION", "DYNAMIC"); 915 irlan_insert_string_param(skb, "FILTER_OPERATION", "DYNAMIC");
916 916
917 irlan_ctrl_data_request(self, skb); 917 irlan_ctrl_data_request(self, skb);
918} 918}
919 919
920/* 920/*
921 * Function irlan_get_media_char (self) 921 * Function irlan_get_media_char (self)
922 * 922 *
923 * 923 *
924 * 924 *
925 */ 925 */
926void irlan_get_media_char(struct irlan_cb *self) 926void irlan_get_media_char(struct irlan_cb *self)
927{ 927{
928 struct sk_buff *skb; 928 struct sk_buff *skb;
929 __u8 *frame; 929 __u8 *frame;
930 930
931 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 931 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
932 932
933 IRDA_ASSERT(self != NULL, return;); 933 IRDA_ASSERT(self != NULL, return;);
934 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 934 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
935 935
936 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER + 936 skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
937 IRLAN_STRING_PARAMETER_LEN("MEDIA", "802.3"), 937 IRLAN_STRING_PARAMETER_LEN("MEDIA", "802.3"),
938 GFP_ATOMIC); 938 GFP_ATOMIC);
@@ -943,13 +943,13 @@ void irlan_get_media_char(struct irlan_cb *self)
943 /* Reserve space for TTP, LMP, and LAP header */ 943 /* Reserve space for TTP, LMP, and LAP header */
944 skb_reserve(skb, self->client.max_header_size); 944 skb_reserve(skb, self->client.max_header_size);
945 skb_put(skb, 2); 945 skb_put(skb, 2);
946 946
947 frame = skb->data; 947 frame = skb->data;
948 948
949 /* Build frame */ 949 /* Build frame */
950 frame[0] = CMD_GET_MEDIA_CHAR; 950 frame[0] = CMD_GET_MEDIA_CHAR;
951 frame[1] = 0x01; /* One parameter */ 951 frame[1] = 0x01; /* One parameter */
952 952
953 irlan_insert_string_param(skb, "MEDIA", "802.3"); 953 irlan_insert_string_param(skb, "MEDIA", "802.3");
954 irlan_ctrl_data_request(self, skb); 954 irlan_ctrl_data_request(self, skb);
955} 955}
@@ -980,7 +980,7 @@ int irlan_insert_string_param(struct sk_buff *skb, char *param, char *string)
980{ 980{
981 int string_len = strlen(string); 981 int string_len = strlen(string);
982 982
983 return __irlan_insert_param(skb, param, IRLAN_ARRAY, 0, 0, string, 983 return __irlan_insert_param(skb, param, IRLAN_ARRAY, 0, 0, string,
984 string_len); 984 string_len);
985} 985}
986 986
@@ -993,7 +993,7 @@ int irlan_insert_string_param(struct sk_buff *skb, char *param, char *string)
993int irlan_insert_array_param(struct sk_buff *skb, char *name, __u8 *array, 993int irlan_insert_array_param(struct sk_buff *skb, char *name, __u8 *array,
994 __u16 array_len) 994 __u16 array_len)
995{ 995{
996 return __irlan_insert_param(skb, name, IRLAN_ARRAY, 0, 0, array, 996 return __irlan_insert_param(skb, name, IRLAN_ARRAY, 0, 0, array,
997 array_len); 997 array_len);
998} 998}
999 999
@@ -1006,19 +1006,19 @@ int irlan_insert_array_param(struct sk_buff *skb, char *name, __u8 *array,
1006 * | Name Length[1] | Param Name[1..255] | Val Length[2] | Value[0..1016]| 1006 * | Name Length[1] | Param Name[1..255] | Val Length[2] | Value[0..1016]|
1007 * ----------------------------------------------------------------------- 1007 * -----------------------------------------------------------------------
1008 */ 1008 */
1009static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, 1009static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
1010 __u8 value_byte, __u16 value_short, 1010 __u8 value_byte, __u16 value_short,
1011 __u8 *value_array, __u16 value_len) 1011 __u8 *value_array, __u16 value_len)
1012{ 1012{
1013 __u8 *frame; 1013 __u8 *frame;
1014 __u8 param_len; 1014 __u8 param_len;
1015 __le16 tmp_le; /* Temporary value in little endian format */ 1015 __le16 tmp_le; /* Temporary value in little endian format */
1016 int n=0; 1016 int n=0;
1017 1017
1018 if (skb == NULL) { 1018 if (skb == NULL) {
1019 IRDA_DEBUG(2, "%s(), Got NULL skb\n", __FUNCTION__ ); 1019 IRDA_DEBUG(2, "%s(), Got NULL skb\n", __FUNCTION__ );
1020 return 0; 1020 return 0;
1021 } 1021 }
1022 1022
1023 param_len = strlen(param); 1023 param_len = strlen(param);
1024 switch (type) { 1024 switch (type) {
@@ -1037,7 +1037,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
1037 return 0; 1037 return 0;
1038 break; 1038 break;
1039 } 1039 }
1040 1040
1041 /* Insert at end of sk-buffer */ 1041 /* Insert at end of sk-buffer */
1042 frame = skb->tail; 1042 frame = skb->tail;
1043 1043
@@ -1045,15 +1045,15 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
1045 if (skb_tailroom(skb) < (param_len+value_len+3)) { 1045 if (skb_tailroom(skb) < (param_len+value_len+3)) {
1046 IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __FUNCTION__ ); 1046 IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __FUNCTION__ );
1047 return 0; 1047 return 0;
1048 } 1048 }
1049 skb_put(skb, param_len+value_len+3); 1049 skb_put(skb, param_len+value_len+3);
1050 1050
1051 /* Insert parameter length */ 1051 /* Insert parameter length */
1052 frame[n++] = param_len; 1052 frame[n++] = param_len;
1053 1053
1054 /* Insert parameter */ 1054 /* Insert parameter */
1055 memcpy(frame+n, param, param_len); n += param_len; 1055 memcpy(frame+n, param, param_len); n += param_len;
1056 1056
1057 /* Insert value length (2 byte little endian format, LSB first) */ 1057 /* Insert value length (2 byte little endian format, LSB first) */
1058 tmp_le = cpu_to_le16(value_len); 1058 tmp_le = cpu_to_le16(value_len);
1059 memcpy(frame+n, &tmp_le, 2); n += 2; /* To avoid alignment problems */ 1059 memcpy(frame+n, &tmp_le, 2); n += 2; /* To avoid alignment problems */
@@ -1082,36 +1082,36 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
1082 * Function irlan_extract_param (buf, name, value, len) 1082 * Function irlan_extract_param (buf, name, value, len)
1083 * 1083 *
1084 * Extracts a single parameter name/value pair from buffer and updates 1084 * Extracts a single parameter name/value pair from buffer and updates
1085 * the buffer pointer to point to the next name/value pair. 1085 * the buffer pointer to point to the next name/value pair.
1086 */ 1086 */
1087int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) 1087int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1088{ 1088{
1089 __u8 name_len; 1089 __u8 name_len;
1090 __u16 val_len; 1090 __u16 val_len;
1091 int n=0; 1091 int n=0;
1092 1092
1093 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 1093 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
1094 1094
1095 /* get length of parameter name (1 byte) */ 1095 /* get length of parameter name (1 byte) */
1096 name_len = buf[n++]; 1096 name_len = buf[n++];
1097 1097
1098 if (name_len > 254) { 1098 if (name_len > 254) {
1099 IRDA_DEBUG(2, "%s(), name_len > 254\n", __FUNCTION__ ); 1099 IRDA_DEBUG(2, "%s(), name_len > 254\n", __FUNCTION__ );
1100 return -RSP_INVALID_COMMAND_FORMAT; 1100 return -RSP_INVALID_COMMAND_FORMAT;
1101 } 1101 }
1102 1102
1103 /* get parameter name */ 1103 /* get parameter name */
1104 memcpy(name, buf+n, name_len); 1104 memcpy(name, buf+n, name_len);
1105 name[name_len] = '\0'; 1105 name[name_len] = '\0';
1106 n+=name_len; 1106 n+=name_len;
1107 1107
1108 /* 1108 /*
1109 * Get length of parameter value (2 bytes in little endian 1109 * Get length of parameter value (2 bytes in little endian
1110 * format) 1110 * format)
1111 */ 1111 */
1112 memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ 1112 memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
1113 le16_to_cpus(&val_len); n+=2; 1113 le16_to_cpus(&val_len); n+=2;
1114 1114
1115 if (val_len > 1016) { 1115 if (val_len > 1016) {
1116 IRDA_DEBUG(2, "%s(), parameter length to long\n", __FUNCTION__ ); 1116 IRDA_DEBUG(2, "%s(), parameter length to long\n", __FUNCTION__ );
1117 return -RSP_INVALID_COMMAND_FORMAT; 1117 return -RSP_INVALID_COMMAND_FORMAT;
@@ -1122,9 +1122,9 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1122 memcpy(value, buf+n, val_len); 1122 memcpy(value, buf+n, val_len);
1123 value[val_len] = '\0'; 1123 value[val_len] = '\0';
1124 n+=val_len; 1124 n+=val_len;
1125 1125
1126 IRDA_DEBUG(4, "Parameter: %s ", name); 1126 IRDA_DEBUG(4, "Parameter: %s ", name);
1127 IRDA_DEBUG(4, "Value: %s\n", value); 1127 IRDA_DEBUG(4, "Value: %s\n", value);
1128 1128
1129 return n; 1129 return n;
1130} 1130}
@@ -1133,7 +1133,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1133 1133
1134/* 1134/*
1135 * Start of reading /proc entries. 1135 * Start of reading /proc entries.
1136 * Return entry at pos, 1136 * Return entry at pos,
1137 * or start_token to indicate print header line 1137 * or start_token to indicate print header line
1138 * or NULL if end of file 1138 * or NULL if end of file
1139 */ 1139 */
@@ -1147,7 +1147,7 @@ static void *irlan_seq_start(struct seq_file *seq, loff_t *pos)
1147 return SEQ_START_TOKEN; 1147 return SEQ_START_TOKEN;
1148 1148
1149 list_for_each_entry(self, &irlans, dev_list) { 1149 list_for_each_entry(self, &irlans, dev_list) {
1150 if (*pos == i) 1150 if (*pos == i)
1151 return self; 1151 return self;
1152 ++i; 1152 ++i;
1153 } 1153 }
@@ -1160,12 +1160,12 @@ static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1160 struct list_head *nxt; 1160 struct list_head *nxt;
1161 1161
1162 ++*pos; 1162 ++*pos;
1163 if (v == SEQ_START_TOKEN) 1163 if (v == SEQ_START_TOKEN)
1164 nxt = irlans.next; 1164 nxt = irlans.next;
1165 else 1165 else
1166 nxt = ((struct irlan_cb *)v)->dev_list.next; 1166 nxt = ((struct irlan_cb *)v)->dev_list.next;
1167 1167
1168 return (nxt == &irlans) ? NULL 1168 return (nxt == &irlans) ? NULL
1169 : list_entry(nxt, struct irlan_cb, dev_list); 1169 : list_entry(nxt, struct irlan_cb, dev_list);
1170} 1170}
1171 1171
@@ -1185,7 +1185,7 @@ static int irlan_seq_show(struct seq_file *seq, void *v)
1185 seq_puts(seq, "IrLAN instances:\n"); 1185 seq_puts(seq, "IrLAN instances:\n");
1186 else { 1186 else {
1187 struct irlan_cb *self = v; 1187 struct irlan_cb *self = v;
1188 1188
1189 IRDA_ASSERT(self != NULL, return -1;); 1189 IRDA_ASSERT(self != NULL, return -1;);
1190 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 1190 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
1191 1191
@@ -1201,17 +1201,17 @@ static int irlan_seq_show(struct seq_file *seq, void *v)
1201 self->daddr); 1201 self->daddr);
1202 seq_printf(seq,"version: %d.%d,\n", 1202 seq_printf(seq,"version: %d.%d,\n",
1203 self->version[1], self->version[0]); 1203 self->version[1], self->version[0]);
1204 seq_printf(seq,"access type: %s\n", 1204 seq_printf(seq,"access type: %s\n",
1205 irlan_access[self->client.access_type]); 1205 irlan_access[self->client.access_type]);
1206 seq_printf(seq,"media: %s\n", 1206 seq_printf(seq,"media: %s\n",
1207 irlan_media[self->media]); 1207 irlan_media[self->media]);
1208 1208
1209 seq_printf(seq,"local filter:\n"); 1209 seq_printf(seq,"local filter:\n");
1210 seq_printf(seq,"remote filter: "); 1210 seq_printf(seq,"remote filter: ");
1211 irlan_print_filter(seq, self->client.filter_type); 1211 irlan_print_filter(seq, self->client.filter_type);
1212 seq_printf(seq,"tx busy: %s\n", 1212 seq_printf(seq,"tx busy: %s\n",
1213 netif_queue_stopped(self->dev) ? "TRUE" : "FALSE"); 1213 netif_queue_stopped(self->dev) ? "TRUE" : "FALSE");
1214 1214
1215 seq_putc(seq,'\n'); 1215 seq_putc(seq,'\n');
1216 } 1216 }
1217 return 0; 1217 return 0;
@@ -1231,7 +1231,7 @@ static int irlan_seq_open(struct inode *inode, struct file *file)
1231#endif 1231#endif
1232 1232
1233MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); 1233MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1234MODULE_DESCRIPTION("The Linux IrDA LAN protocol"); 1234MODULE_DESCRIPTION("The Linux IrDA LAN protocol");
1235MODULE_LICENSE("GPL"); 1235MODULE_LICENSE("GPL");
1236 1236
1237module_param(eth, bool, 0); 1237module_param(eth, bool, 0);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index b0ccc455b747..672ab3f69033 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -1,8 +1,8 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlan_eth.c 3 * Filename: irlan_eth.c
4 * Version: 4 * Version:
5 * Description: 5 * Description:
6 * Status: Experimental. 6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no> 7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Thu Oct 15 08:37:58 1998 8 * Created at: Thu Oct 15 08:37:58 1998
@@ -11,18 +11,18 @@
11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov> 11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk> 12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> 13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
14 * 14 *
15 * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved. 15 * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
16 * 16 *
17 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as 18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of 19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version. 20 * the License, or (at your option) any later version.
21 * 21 *
22 * Neither Dag Brattli nor University of Tromsø admit liability nor 22 * Neither Dag Brattli nor University of Tromsø admit liability nor
23 * provide warranty for any of this software. This material is 23 * provide warranty for any of this software. This material is
24 * provided "AS-IS" and at no charge. 24 * provided "AS-IS" and at no charge.
25 * 25 *
26 ********************************************************************/ 26 ********************************************************************/
27 27
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -55,7 +55,7 @@ static void irlan_eth_setup(struct net_device *dev)
55{ 55{
56 dev->open = irlan_eth_open; 56 dev->open = irlan_eth_open;
57 dev->stop = irlan_eth_close; 57 dev->stop = irlan_eth_close;
58 dev->hard_start_xmit = irlan_eth_xmit; 58 dev->hard_start_xmit = irlan_eth_xmit;
59 dev->get_stats = irlan_eth_get_stats; 59 dev->get_stats = irlan_eth_get_stats;
60 dev->set_multicast_list = irlan_eth_set_multicast_list; 60 dev->set_multicast_list = irlan_eth_set_multicast_list;
61 dev->destructor = free_netdev; 61 dev->destructor = free_netdev;
@@ -63,8 +63,8 @@ static void irlan_eth_setup(struct net_device *dev)
63 SET_MODULE_OWNER(dev); 63 SET_MODULE_OWNER(dev);
64 64
65 ether_setup(dev); 65 ether_setup(dev);
66 66
67 /* 67 /*
68 * Lets do all queueing in IrTTP instead of this device driver. 68 * Lets do all queueing in IrTTP instead of this device driver.
69 * Queueing here as well can introduce some strange latency 69 * Queueing here as well can introduce some strange latency
70 * problems, which we will avoid by setting the queue size to 0. 70 * problems, which we will avoid by setting the queue size to 0.
@@ -104,17 +104,17 @@ struct net_device *alloc_irlandev(const char *name)
104static int irlan_eth_open(struct net_device *dev) 104static int irlan_eth_open(struct net_device *dev)
105{ 105{
106 struct irlan_cb *self = netdev_priv(dev); 106 struct irlan_cb *self = netdev_priv(dev);
107 107
108 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 108 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
109 109
110 /* Ready to play! */ 110 /* Ready to play! */
111 netif_stop_queue(dev); /* Wait until data link is ready */ 111 netif_stop_queue(dev); /* Wait until data link is ready */
112 112
113 /* We are now open, so time to do some work */ 113 /* We are now open, so time to do some work */
114 self->disconnect_reason = 0; 114 self->disconnect_reason = 0;
115 irlan_client_wakeup(self, self->saddr, self->daddr); 115 irlan_client_wakeup(self, self->saddr, self->daddr);
116 116
117 /* Make sure we have a hardware address before we return, 117 /* Make sure we have a hardware address before we return,
118 so DHCP clients gets happy */ 118 so DHCP clients gets happy */
119 return wait_event_interruptible(self->open_wait, 119 return wait_event_interruptible(self->open_wait,
120 !self->tsap_data->connected); 120 !self->tsap_data->connected);
@@ -124,30 +124,30 @@ static int irlan_eth_open(struct net_device *dev)
124 * Function irlan_eth_close (dev) 124 * Function irlan_eth_close (dev)
125 * 125 *
126 * Stop the ether network device, his function will usually be called by 126 * Stop the ether network device, his function will usually be called by
127 * ifconfig down. We should now disconnect the link, We start the 127 * ifconfig down. We should now disconnect the link, We start the
128 * close timer, so that the instance will be removed if we are unable 128 * close timer, so that the instance will be removed if we are unable
129 * to discover the remote device after the disconnect. 129 * to discover the remote device after the disconnect.
130 */ 130 */
131static int irlan_eth_close(struct net_device *dev) 131static int irlan_eth_close(struct net_device *dev)
132{ 132{
133 struct irlan_cb *self = netdev_priv(dev); 133 struct irlan_cb *self = netdev_priv(dev);
134 134
135 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 135 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
136 136
137 /* Stop device */ 137 /* Stop device */
138 netif_stop_queue(dev); 138 netif_stop_queue(dev);
139 139
140 irlan_close_data_channel(self); 140 irlan_close_data_channel(self);
141 irlan_close_tsaps(self); 141 irlan_close_tsaps(self);
142 142
143 irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL); 143 irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
144 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL); 144 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
145 145
146 /* Remove frames queued on the control channel */ 146 /* Remove frames queued on the control channel */
147 skb_queue_purge(&self->client.txq); 147 skb_queue_purge(&self->client.txq);
148 148
149 self->client.tx_busy = 0; 149 self->client.tx_busy = 0;
150 150
151 return 0; 151 return 0;
152} 152}
153 153
@@ -164,7 +164,7 @@ static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
164 164
165 /* skb headroom large enough to contain all IrDA-headers? */ 165 /* skb headroom large enough to contain all IrDA-headers? */
166 if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) { 166 if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
167 struct sk_buff *new_skb = 167 struct sk_buff *new_skb =
168 skb_realloc_headroom(skb, self->max_header_size); 168 skb_realloc_headroom(skb, self->max_header_size);
169 169
170 /* We have to free the original skb anyway */ 170 /* We have to free the original skb anyway */
@@ -176,7 +176,7 @@ static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
176 176
177 /* Use the new skb instead */ 177 /* Use the new skb instead */
178 skb = new_skb; 178 skb = new_skb;
179 } 179 }
180 180
181 dev->trans_start = jiffies; 181 dev->trans_start = jiffies;
182 182
@@ -187,7 +187,7 @@ static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
187 ret = irttp_data_request(self->tsap_data, skb); 187 ret = irttp_data_request(self->tsap_data, skb);
188 188
189 if (ret < 0) { 189 if (ret < 0) {
190 /* 190 /*
191 * IrTTPs tx queue is full, so we just have to 191 * IrTTPs tx queue is full, so we just have to
192 * drop the frame! You might think that we should 192 * drop the frame! You might think that we should
193 * just return -1 and don't deallocate the frame, 193 * just return -1 and don't deallocate the frame,
@@ -195,15 +195,15 @@ static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
195 * we have replaced the original skb with a new 195 * we have replaced the original skb with a new
196 * one with larger headroom, and that would really 196 * one with larger headroom, and that would really
197 * confuse do_dev_queue_xmit() in dev.c! I have 197 * confuse do_dev_queue_xmit() in dev.c! I have
198 * tried :-) DB 198 * tried :-) DB
199 */ 199 */
200 /* irttp_data_request already free the packet */ 200 /* irttp_data_request already free the packet */
201 self->stats.tx_dropped++; 201 self->stats.tx_dropped++;
202 } else { 202 } else {
203 self->stats.tx_packets++; 203 self->stats.tx_packets++;
204 self->stats.tx_bytes += skb->len; 204 self->stats.tx_bytes += skb->len;
205 } 205 }
206 206
207 return 0; 207 return 0;
208} 208}
209 209
@@ -218,37 +218,37 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
218 struct irlan_cb *self = instance; 218 struct irlan_cb *self = instance;
219 219
220 if (skb == NULL) { 220 if (skb == NULL) {
221 ++self->stats.rx_dropped; 221 ++self->stats.rx_dropped;
222 return 0; 222 return 0;
223 } 223 }
224 if (skb->len < ETH_HLEN) { 224 if (skb->len < ETH_HLEN) {
225 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", 225 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
226 __FUNCTION__, skb->len); 226 __FUNCTION__, skb->len);
227 ++self->stats.rx_dropped; 227 ++self->stats.rx_dropped;
228 dev_kfree_skb(skb); 228 dev_kfree_skb(skb);
229 return 0; 229 return 0;
230 } 230 }
231 231
232 /* 232 /*
233 * Adopt this frame! Important to set all these fields since they 233 * Adopt this frame! Important to set all these fields since they
234 * might have been previously set by the low level IrDA network 234 * might have been previously set by the low level IrDA network
235 * device driver 235 * device driver
236 */ 236 */
237 skb->dev = self->dev; 237 skb->dev = self->dev;
238 skb->protocol=eth_type_trans(skb, skb->dev); /* Remove eth header */ 238 skb->protocol=eth_type_trans(skb, skb->dev); /* Remove eth header */
239 239
240 self->stats.rx_packets++; 240 self->stats.rx_packets++;
241 self->stats.rx_bytes += skb->len; 241 self->stats.rx_bytes += skb->len;
242 242
243 netif_rx(skb); /* Eat it! */ 243 netif_rx(skb); /* Eat it! */
244 244
245 return 0; 245 return 0;
246} 246}
247 247
248/* 248/*
249 * Function irlan_eth_flow (status) 249 * Function irlan_eth_flow (status)
250 * 250 *
251 * Do flow control between IP/Ethernet and IrLAN/IrTTP. This is done by 251 * Do flow control between IP/Ethernet and IrLAN/IrTTP. This is done by
252 * controlling the queue stop/start. 252 * controlling the queue stop/start.
253 * 253 *
254 * The IrDA link layer has the advantage to have flow control, and 254 * The IrDA link layer has the advantage to have flow control, and
@@ -268,11 +268,11 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
268 268
269 IRDA_ASSERT(self != NULL, return;); 269 IRDA_ASSERT(self != NULL, return;);
270 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 270 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
271 271
272 dev = self->dev; 272 dev = self->dev;
273 273
274 IRDA_ASSERT(dev != NULL, return;); 274 IRDA_ASSERT(dev != NULL, return;);
275 275
276 IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __FUNCTION__, 276 IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __FUNCTION__,
277 flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", 277 flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
278 netif_running(dev)); 278 netif_running(dev));
@@ -301,10 +301,10 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev)
301{ 301{
302 struct in_device *in_dev; 302 struct in_device *in_dev;
303 303
304 /* 304 /*
305 * When we get a new MAC address do a gratuitous ARP. This 305 * When we get a new MAC address do a gratuitous ARP. This
306 * is useful if we have changed access points on the same 306 * is useful if we have changed access points on the same
307 * subnet. 307 * subnet.
308 */ 308 */
309#ifdef CONFIG_INET 309#ifdef CONFIG_INET
310 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n"); 310 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n");
@@ -313,10 +313,10 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev)
313 if (in_dev == NULL) 313 if (in_dev == NULL)
314 goto out; 314 goto out;
315 if (in_dev->ifa_list) 315 if (in_dev->ifa_list)
316 316
317 arp_send(ARPOP_REQUEST, ETH_P_ARP, 317 arp_send(ARPOP_REQUEST, ETH_P_ARP,
318 in_dev->ifa_list->ifa_address, 318 in_dev->ifa_list->ifa_address,
319 dev, 319 dev,
320 in_dev->ifa_list->ifa_address, 320 in_dev->ifa_list->ifa_address,
321 NULL, dev->dev_addr, NULL); 321 NULL, dev->dev_addr, NULL);
322out: 322out:
@@ -331,9 +331,9 @@ out:
331 * 331 *
332 */ 332 */
333#define HW_MAX_ADDRS 4 /* Must query to get it! */ 333#define HW_MAX_ADDRS 4 /* Must query to get it! */
334static void irlan_eth_set_multicast_list(struct net_device *dev) 334static void irlan_eth_set_multicast_list(struct net_device *dev)
335{ 335{
336 struct irlan_cb *self = netdev_priv(dev); 336 struct irlan_cb *self = netdev_priv(dev);
337 337
338 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 338 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
339 339
@@ -346,7 +346,7 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
346 if (dev->flags & IFF_PROMISC) { 346 if (dev->flags & IFF_PROMISC) {
347 /* Enable promiscuous mode */ 347 /* Enable promiscuous mode */
348 IRDA_WARNING("Promiscous mode not implemented by IrLAN!\n"); 348 IRDA_WARNING("Promiscous mode not implemented by IrLAN!\n");
349 } 349 }
350 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) { 350 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
351 /* Disable promiscuous mode, use normal mode. */ 351 /* Disable promiscuous mode, use normal mode. */
352 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__ ); 352 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__ );
@@ -378,7 +378,7 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
378 * Get the current statistics for this device 378 * Get the current statistics for this device
379 * 379 *
380 */ 380 */
381static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev) 381static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev)
382{ 382{
383 struct irlan_cb *self = netdev_priv(dev); 383 struct irlan_cb *self = netdev_priv(dev);
384 384
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c
index 2778d8c6aa31..623e0fd16c19 100644
--- a/net/irda/irlan/irlan_event.c
+++ b/net/irda/irlan/irlan_event.c
@@ -1,25 +1,25 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlan_event.c 3 * Filename: irlan_event.c
4 * Version: 4 * Version:
5 * Description: 5 * Description:
6 * Status: Experimental. 6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no> 7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Tue Oct 20 09:10:16 1998 8 * Created at: Tue Oct 20 09:10:16 1998
9 * Modified at: Sat Oct 30 12:59:01 1999 9 * Modified at: Sat Oct 30 12:59:01 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 * 13 *
14 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as 15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
23 ********************************************************************/ 23 ********************************************************************/
24 24
25#include <net/irda/irlan_event.h> 25#include <net/irda/irlan_event.h>
@@ -38,7 +38,7 @@ char *irlan_state[] = {
38 "IRLAN_SYNC", 38 "IRLAN_SYNC",
39}; 39};
40 40
41void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) 41void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state)
42{ 42{
43 IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__ , irlan_state[state]); 43 IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__ , irlan_state[state]);
44 44
@@ -48,7 +48,7 @@ void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state)
48 self->client.state = state; 48 self->client.state = state;
49} 49}
50 50
51void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state) 51void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state)
52{ 52{
53 IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__ , irlan_state[state]); 53 IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__ , irlan_state[state]);
54 54
diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c
index ca7d358dab52..e6346b88f934 100644
--- a/net/irda/irlan/irlan_filter.c
+++ b/net/irda/irlan/irlan_filter.c
@@ -1,25 +1,25 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlan_filter.c 3 * Filename: irlan_filter.c
4 * Version: 4 * Version:
5 * Description: 5 * Description:
6 * Status: Experimental. 6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no> 7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Fri Jan 29 11:16:38 1999 8 * Created at: Fri Jan 29 11:16:38 1999
9 * Modified at: Sat Oct 30 12:58:45 1999 9 * Modified at: Sat Oct 30 12:58:45 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 * 13 *
14 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as 15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
23 ********************************************************************/ 23 ********************************************************************/
24 24
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
@@ -40,7 +40,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
40 IRDA_ASSERT(self != NULL, return;); 40 IRDA_ASSERT(self != NULL, return;);
41 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 41 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
42 42
43 if ((self->provider.filter_type == IRLAN_DIRECTED) && 43 if ((self->provider.filter_type == IRLAN_DIRECTED) &&
44 (self->provider.filter_operation == DYNAMIC)) 44 (self->provider.filter_operation == DYNAMIC))
45 { 45 {
46 IRDA_DEBUG(0, "Giving peer a dynamic Ethernet address\n"); 46 IRDA_DEBUG(0, "Giving peer a dynamic Ethernet address\n");
@@ -48,12 +48,12 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
48 self->provider.mac_address[1] = 0x00; 48 self->provider.mac_address[1] = 0x00;
49 self->provider.mac_address[2] = 0x00; 49 self->provider.mac_address[2] = 0x00;
50 self->provider.mac_address[3] = 0x00; 50 self->provider.mac_address[3] = 0x00;
51 51
52 /* Use arbitration value to generate MAC address */ 52 /* Use arbitration value to generate MAC address */
53 if (self->provider.access_type == ACCESS_PEER) { 53 if (self->provider.access_type == ACCESS_PEER) {
54 self->provider.mac_address[4] = 54 self->provider.mac_address[4] =
55 self->provider.send_arb_val & 0xff; 55 self->provider.send_arb_val & 0xff;
56 self->provider.mac_address[5] = 56 self->provider.mac_address[5] =
57 (self->provider.send_arb_val >> 8) & 0xff; 57 (self->provider.send_arb_val >> 8) & 0xff;
58 } else { 58 } else {
59 /* Just generate something for now */ 59 /* Just generate something for now */
@@ -65,12 +65,12 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
65 skb->data[1] = 0x03; 65 skb->data[1] = 0x03;
66 irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); 66 irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
67 irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001); 67 irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001);
68 irlan_insert_array_param(skb, "FILTER_ENTRY", 68 irlan_insert_array_param(skb, "FILTER_ENTRY",
69 self->provider.mac_address, 6); 69 self->provider.mac_address, 6);
70 return; 70 return;
71 } 71 }
72 72
73 if ((self->provider.filter_type == IRLAN_DIRECTED) && 73 if ((self->provider.filter_type == IRLAN_DIRECTED) &&
74 (self->provider.filter_mode == FILTER)) 74 (self->provider.filter_mode == FILTER))
75 { 75 {
76 IRDA_DEBUG(0, "Directed filter on\n"); 76 IRDA_DEBUG(0, "Directed filter on\n");
@@ -78,7 +78,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
78 skb->data[1] = 0x00; 78 skb->data[1] = 0x00;
79 return; 79 return;
80 } 80 }
81 if ((self->provider.filter_type == IRLAN_DIRECTED) && 81 if ((self->provider.filter_type == IRLAN_DIRECTED) &&
82 (self->provider.filter_mode == NONE)) 82 (self->provider.filter_mode == NONE))
83 { 83 {
84 IRDA_DEBUG(0, "Directed filter off\n"); 84 IRDA_DEBUG(0, "Directed filter off\n");
@@ -87,7 +87,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
87 return; 87 return;
88 } 88 }
89 89
90 if ((self->provider.filter_type == IRLAN_BROADCAST) && 90 if ((self->provider.filter_type == IRLAN_BROADCAST) &&
91 (self->provider.filter_mode == FILTER)) 91 (self->provider.filter_mode == FILTER))
92 { 92 {
93 IRDA_DEBUG(0, "Broadcast filter on\n"); 93 IRDA_DEBUG(0, "Broadcast filter on\n");
@@ -95,7 +95,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
95 skb->data[1] = 0x00; 95 skb->data[1] = 0x00;
96 return; 96 return;
97 } 97 }
98 if ((self->provider.filter_type == IRLAN_BROADCAST) && 98 if ((self->provider.filter_type == IRLAN_BROADCAST) &&
99 (self->provider.filter_mode == NONE)) 99 (self->provider.filter_mode == NONE))
100 { 100 {
101 IRDA_DEBUG(0, "Broadcast filter off\n"); 101 IRDA_DEBUG(0, "Broadcast filter off\n");
@@ -103,7 +103,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
103 skb->data[1] = 0x00; 103 skb->data[1] = 0x00;
104 return; 104 return;
105 } 105 }
106 if ((self->provider.filter_type == IRLAN_MULTICAST) && 106 if ((self->provider.filter_type == IRLAN_MULTICAST) &&
107 (self->provider.filter_mode == FILTER)) 107 (self->provider.filter_mode == FILTER))
108 { 108 {
109 IRDA_DEBUG(0, "Multicast filter on\n"); 109 IRDA_DEBUG(0, "Multicast filter on\n");
@@ -111,7 +111,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
111 skb->data[1] = 0x00; 111 skb->data[1] = 0x00;
112 return; 112 return;
113 } 113 }
114 if ((self->provider.filter_type == IRLAN_MULTICAST) && 114 if ((self->provider.filter_type == IRLAN_MULTICAST) &&
115 (self->provider.filter_mode == NONE)) 115 (self->provider.filter_mode == NONE))
116 { 116 {
117 IRDA_DEBUG(0, "Multicast filter off\n"); 117 IRDA_DEBUG(0, "Multicast filter off\n");
@@ -119,7 +119,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
119 skb->data[1] = 0x00; 119 skb->data[1] = 0x00;
120 return; 120 return;
121 } 121 }
122 if ((self->provider.filter_type == IRLAN_MULTICAST) && 122 if ((self->provider.filter_type == IRLAN_MULTICAST) &&
123 (self->provider.filter_operation == GET)) 123 (self->provider.filter_operation == GET))
124 { 124 {
125 IRDA_DEBUG(0, "Multicast filter get\n"); 125 IRDA_DEBUG(0, "Multicast filter get\n");
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 58efde919667..aac66434e473 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlan_provider.c 3 * Filename: irlan_provider.c
4 * Version: 0.9 4 * Version: 0.9
5 * Description: IrDA LAN Access Protocol Implementation 5 * Description: IrDA LAN Access Protocol Implementation
@@ -11,17 +11,17 @@
11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov> 11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk> 12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> 13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
14 * 14 *
15 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, 15 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
16 * All Rights Reserved. 16 * All Rights Reserved.
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as 19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of 20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version. 21 * the License, or (at your option) any later version.
22 * 22 *
23 * Neither Dag Brattli nor University of Tromsø admit liability nor 23 * Neither Dag Brattli nor University of Tromsø admit liability nor
24 * provide warranty for any of this software. This material is 24 * provide warranty for any of this software. This material is
25 * provided "AS-IS" and at no charge. 25 * provided "AS-IS" and at no charge.
26 * 26 *
27 ********************************************************************/ 27 ********************************************************************/
@@ -52,8 +52,8 @@
52#include <net/irda/irlan_filter.h> 52#include <net/irda/irlan_filter.h>
53#include <net/irda/irlan_client.h> 53#include <net/irda/irlan_client.h>
54 54
55static void irlan_provider_connect_indication(void *instance, void *sap, 55static void irlan_provider_connect_indication(void *instance, void *sap,
56 struct qos_info *qos, 56 struct qos_info *qos,
57 __u32 max_sdu_size, 57 __u32 max_sdu_size,
58 __u8 max_header_size, 58 __u8 max_header_size,
59 struct sk_buff *skb); 59 struct sk_buff *skb);
@@ -64,14 +64,14 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
64 * This function gets the data that is received on the control channel 64 * This function gets the data that is received on the control channel
65 * 65 *
66 */ 66 */
67static int irlan_provider_data_indication(void *instance, void *sap, 67static int irlan_provider_data_indication(void *instance, void *sap,
68 struct sk_buff *skb) 68 struct sk_buff *skb)
69{ 69{
70 struct irlan_cb *self; 70 struct irlan_cb *self;
71 __u8 code; 71 __u8 code;
72 72
73 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 73 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
74 74
75 self = (struct irlan_cb *) instance; 75 self = (struct irlan_cb *) instance;
76 76
77 IRDA_ASSERT(self != NULL, return -1;); 77 IRDA_ASSERT(self != NULL, return -1;);
@@ -83,16 +83,16 @@ static int irlan_provider_data_indication(void *instance, void *sap,
83 switch(code) { 83 switch(code) {
84 case CMD_GET_PROVIDER_INFO: 84 case CMD_GET_PROVIDER_INFO:
85 IRDA_DEBUG(4, "Got GET_PROVIDER_INFO command!\n"); 85 IRDA_DEBUG(4, "Got GET_PROVIDER_INFO command!\n");
86 irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb); 86 irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb);
87 break; 87 break;
88 88
89 case CMD_GET_MEDIA_CHAR: 89 case CMD_GET_MEDIA_CHAR:
90 IRDA_DEBUG(4, "Got GET_MEDIA_CHAR command!\n"); 90 IRDA_DEBUG(4, "Got GET_MEDIA_CHAR command!\n");
91 irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb); 91 irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb);
92 break; 92 break;
93 case CMD_OPEN_DATA_CHANNEL: 93 case CMD_OPEN_DATA_CHANNEL:
94 IRDA_DEBUG(4, "Got OPEN_DATA_CHANNEL command!\n"); 94 IRDA_DEBUG(4, "Got OPEN_DATA_CHANNEL command!\n");
95 irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb); 95 irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb);
96 break; 96 break;
97 case CMD_FILTER_OPERATION: 97 case CMD_FILTER_OPERATION:
98 IRDA_DEBUG(4, "Got FILTER_OPERATION command!\n"); 98 IRDA_DEBUG(4, "Got FILTER_OPERATION command!\n");
@@ -119,9 +119,9 @@ static int irlan_provider_data_indication(void *instance, void *sap,
119 * Got connection from peer IrLAN client 119 * Got connection from peer IrLAN client
120 * 120 *
121 */ 121 */
122static void irlan_provider_connect_indication(void *instance, void *sap, 122static void irlan_provider_connect_indication(void *instance, void *sap,
123 struct qos_info *qos, 123 struct qos_info *qos,
124 __u32 max_sdu_size, 124 __u32 max_sdu_size,
125 __u8 max_header_size, 125 __u8 max_header_size,
126 struct sk_buff *skb) 126 struct sk_buff *skb)
127{ 127{
@@ -130,13 +130,13 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
130 __u32 saddr, daddr; 130 __u32 saddr, daddr;
131 131
132 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 132 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
133 133
134 self = (struct irlan_cb *) instance; 134 self = (struct irlan_cb *) instance;
135 tsap = (struct tsap_cb *) sap; 135 tsap = (struct tsap_cb *) sap;
136 136
137 IRDA_ASSERT(self != NULL, return;); 137 IRDA_ASSERT(self != NULL, return;);
138 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 138 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
139 139
140 IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;); 140 IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;);
141 IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;); 141 IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;);
142 142
@@ -147,13 +147,13 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
147 147
148 irlan_do_provider_event(self, IRLAN_CONNECT_INDICATION, NULL); 148 irlan_do_provider_event(self, IRLAN_CONNECT_INDICATION, NULL);
149 149
150 /* 150 /*
151 * If we are in peer mode, the client may not have got the discovery 151 * If we are in peer mode, the client may not have got the discovery
152 * indication it needs to make progress. If the client is still in 152 * indication it needs to make progress. If the client is still in
153 * IDLE state, we must kick it. 153 * IDLE state, we must kick it.
154 */ 154 */
155 if ((self->provider.access_type == ACCESS_PEER) && 155 if ((self->provider.access_type == ACCESS_PEER) &&
156 (self->client.state == IRLAN_IDLE)) 156 (self->client.state == IRLAN_IDLE))
157 { 157 {
158 irlan_client_wakeup(self, self->saddr, self->daddr); 158 irlan_client_wakeup(self, self->saddr, self->daddr);
159 } 159 }
@@ -175,38 +175,38 @@ void irlan_provider_connect_response(struct irlan_cb *self,
175 irttp_connect_response(tsap, IRLAN_MTU, NULL); 175 irttp_connect_response(tsap, IRLAN_MTU, NULL);
176} 176}
177 177
178static void irlan_provider_disconnect_indication(void *instance, void *sap, 178static void irlan_provider_disconnect_indication(void *instance, void *sap,
179 LM_REASON reason, 179 LM_REASON reason,
180 struct sk_buff *userdata) 180 struct sk_buff *userdata)
181{ 181{
182 struct irlan_cb *self; 182 struct irlan_cb *self;
183 struct tsap_cb *tsap; 183 struct tsap_cb *tsap;
184 184
185 IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__ , reason); 185 IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__ , reason);
186 186
187 self = (struct irlan_cb *) instance; 187 self = (struct irlan_cb *) instance;
188 tsap = (struct tsap_cb *) sap; 188 tsap = (struct tsap_cb *) sap;
189 189
190 IRDA_ASSERT(self != NULL, return;); 190 IRDA_ASSERT(self != NULL, return;);
191 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 191 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
192 IRDA_ASSERT(tsap != NULL, return;); 192 IRDA_ASSERT(tsap != NULL, return;);
193 IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;); 193 IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
194 194
195 IRDA_ASSERT(tsap == self->provider.tsap_ctrl, return;); 195 IRDA_ASSERT(tsap == self->provider.tsap_ctrl, return;);
196 196
197 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL); 197 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
198} 198}
199 199
200/* 200/*
201 * Function irlan_parse_open_data_cmd (self, skb) 201 * Function irlan_parse_open_data_cmd (self, skb)
202 * 202 *
203 * 203 *
204 * 204 *
205 */ 205 */
206int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb) 206int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb)
207{ 207{
208 int ret; 208 int ret;
209 209
210 ret = irlan_provider_parse_command(self, CMD_OPEN_DATA_CHANNEL, skb); 210 ret = irlan_provider_parse_command(self, CMD_OPEN_DATA_CHANNEL, skb);
211 211
212 /* Open data channel */ 212 /* Open data channel */
@@ -218,12 +218,12 @@ int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb)
218/* 218/*
219 * Function parse_command (skb) 219 * Function parse_command (skb)
220 * 220 *
221 * Extract all parameters from received buffer, then feed them to 221 * Extract all parameters from received buffer, then feed them to
222 * check_params for parsing 222 * check_params for parsing
223 * 223 *
224 */ 224 */
225int irlan_provider_parse_command(struct irlan_cb *self, int cmd, 225int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
226 struct sk_buff *skb) 226 struct sk_buff *skb)
227{ 227{
228 __u8 *frame; 228 __u8 *frame;
229 __u8 *ptr; 229 __u8 *ptr;
@@ -231,16 +231,16 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
231 __u16 val_len; 231 __u16 val_len;
232 int i; 232 int i;
233 char *name; 233 char *name;
234 char *value; 234 char *value;
235 int ret = RSP_SUCCESS; 235 int ret = RSP_SUCCESS;
236 236
237 IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;); 237 IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;);
238 238
239 IRDA_DEBUG(4, "%s(), skb->len=%d\n", __FUNCTION__ , (int)skb->len); 239 IRDA_DEBUG(4, "%s(), skb->len=%d\n", __FUNCTION__ , (int)skb->len);
240 240
241 IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;); 241 IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;);
242 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;); 242 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;);
243 243
244 if (!skb) 244 if (!skb)
245 return -RSP_PROTOCOL_ERROR; 245 return -RSP_PROTOCOL_ERROR;
246 246
@@ -259,11 +259,11 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
259 count = frame[1]; 259 count = frame[1];
260 260
261 IRDA_DEBUG(4, "Got %d parameters\n", count); 261 IRDA_DEBUG(4, "Got %d parameters\n", count);
262 262
263 ptr = frame+2; 263 ptr = frame+2;
264 264
265 /* For all parameters */ 265 /* For all parameters */
266 for (i=0; i<count;i++) { 266 for (i=0; i<count;i++) {
267 ret = irlan_extract_param(ptr, name, value, &val_len); 267 ret = irlan_extract_param(ptr, name, value, &val_len);
268 if (ret < 0) { 268 if (ret < 0) {
269 IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __FUNCTION__ ); 269 IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __FUNCTION__ );
@@ -286,7 +286,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
286 * Send reply to query to peer IrLAN layer 286 * Send reply to query to peer IrLAN layer
287 * 287 *
288 */ 288 */
289void irlan_provider_send_reply(struct irlan_cb *self, int command, 289void irlan_provider_send_reply(struct irlan_cb *self, int command,
290 int ret_code) 290 int ret_code)
291{ 291{
292 struct sk_buff *skb; 292 struct sk_buff *skb;
@@ -310,7 +310,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
310 /* Reserve space for TTP, LMP, and LAP header */ 310 /* Reserve space for TTP, LMP, and LAP header */
311 skb_reserve(skb, self->provider.max_header_size); 311 skb_reserve(skb, self->provider.max_header_size);
312 skb_put(skb, 2); 312 skb_put(skb, 2);
313 313
314 switch (command) { 314 switch (command) {
315 case CMD_GET_PROVIDER_INFO: 315 case CMD_GET_PROVIDER_INFO:
316 skb->data[0] = 0x00; /* Success */ 316 skb->data[0] = 0x00; /* Success */
@@ -356,7 +356,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
356 skb->data[0] = 0x00; /* Success */ 356 skb->data[0] = 0x00; /* Success */
357 if (self->provider.send_arb_val) { 357 if (self->provider.send_arb_val) {
358 skb->data[1] = 0x03; /* 3 parameters */ 358 skb->data[1] = 0x03; /* 3 parameters */
359 irlan_insert_short_param(skb, "CON_ARB", 359 irlan_insert_short_param(skb, "CON_ARB",
360 self->provider.send_arb_val); 360 self->provider.send_arb_val);
361 } else 361 } else
362 skb->data[1] = 0x02; /* 2 parameters */ 362 skb->data[1] = 0x02; /* 2 parameters */
@@ -378,13 +378,13 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
378 * Function irlan_provider_register(void) 378 * Function irlan_provider_register(void)
379 * 379 *
380 * Register provider support so we can accept incoming connections. 380 * Register provider support so we can accept incoming connections.
381 * 381 *
382 */ 382 */
383int irlan_provider_open_ctrl_tsap(struct irlan_cb *self) 383int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
384{ 384{
385 struct tsap_cb *tsap; 385 struct tsap_cb *tsap;
386 notify_t notify; 386 notify_t notify;
387 387
388 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 388 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
389 389
390 IRDA_ASSERT(self != NULL, return -1;); 390 IRDA_ASSERT(self != NULL, return -1;);
@@ -393,7 +393,7 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
393 /* Check if already open */ 393 /* Check if already open */
394 if (self->provider.tsap_ctrl) 394 if (self->provider.tsap_ctrl)
395 return -1; 395 return -1;
396 396
397 /* 397 /*
398 * First register well known control TSAP 398 * First register well known control TSAP
399 */ 399 */
diff --git a/net/irda/irlan/irlan_provider_event.c b/net/irda/irlan/irlan_provider_event.c
index 5a086f9827ed..ef401bd6ea00 100644
--- a/net/irda/irlan/irlan_provider_event.c
+++ b/net/irda/irlan/irlan_provider_event.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlan_provider_event.c 3 * Filename: irlan_provider_event.c
4 * Version: 0.9 4 * Version: 0.9
5 * Description: IrLAN provider state machine) 5 * Description: IrLAN provider state machine)
@@ -8,16 +8,16 @@
8 * Created at: Sun Aug 31 20:14:37 1997 8 * Created at: Sun Aug 31 20:14:37 1997
9 * Modified at: Sat Oct 30 12:52:41 1999 9 * Modified at: Sat Oct 30 12:52:41 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved. 12 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
13 * 13 *
14 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as 15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of 16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version. 17 * the License, or (at your option) any later version.
18 * 18 *
19 * Neither Dag Brattli nor University of Tromsø admit liability nor 19 * Neither Dag Brattli nor University of Tromsø admit liability nor
20 * provide warranty for any of this software. This material is 20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge. 21 * provided "AS-IS" and at no charge.
22 * 22 *
23 ********************************************************************/ 23 ********************************************************************/
@@ -30,18 +30,18 @@
30#include <net/irda/irlan_provider.h> 30#include <net/irda/irlan_provider.h>
31#include <net/irda/irlan_event.h> 31#include <net/irda/irlan_event.h>
32 32
33static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, 33static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
34 struct sk_buff *skb); 34 struct sk_buff *skb);
35static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, 35static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
36 struct sk_buff *skb); 36 struct sk_buff *skb);
37static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, 37static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
38 struct sk_buff *skb); 38 struct sk_buff *skb);
39static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, 39static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
40 struct sk_buff *skb); 40 struct sk_buff *skb);
41 41
42static int (*state[])(struct irlan_cb *self, IRLAN_EVENT event, 42static int (*state[])(struct irlan_cb *self, IRLAN_EVENT event,
43 struct sk_buff *skb) = 43 struct sk_buff *skb) =
44{ 44{
45 irlan_provider_state_idle, 45 irlan_provider_state_idle,
46 NULL, /* Query */ 46 NULL, /* Query */
47 NULL, /* Info */ 47 NULL, /* Info */
@@ -55,8 +55,8 @@ static int (*state[])(struct irlan_cb *self, IRLAN_EVENT event,
55 NULL, /* Sync */ 55 NULL, /* Sync */
56}; 56};
57 57
58void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event, 58void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event,
59 struct sk_buff *skb) 59 struct sk_buff *skb)
60{ 60{
61 IRDA_ASSERT(*state[ self->provider.state] != NULL, return;); 61 IRDA_ASSERT(*state[ self->provider.state] != NULL, return;);
62 62
@@ -73,9 +73,9 @@ static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
73 struct sk_buff *skb) 73 struct sk_buff *skb)
74{ 74{
75 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 75 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
76 76
77 IRDA_ASSERT(self != NULL, return -1;); 77 IRDA_ASSERT(self != NULL, return -1;);
78 78
79 switch(event) { 79 switch(event) {
80 case IRLAN_CONNECT_INDICATION: 80 case IRLAN_CONNECT_INDICATION:
81 irlan_provider_connect_response( self, self->provider.tsap_ctrl); 81 irlan_provider_connect_response( self, self->provider.tsap_ctrl);
@@ -96,13 +96,13 @@ static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
96 * 96 *
97 * INFO, We have issued a GetInfo command and is awaiting a reply. 97 * INFO, We have issued a GetInfo command and is awaiting a reply.
98 */ 98 */
99static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, 99static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
100 struct sk_buff *skb) 100 struct sk_buff *skb)
101{ 101{
102 int ret; 102 int ret;
103 103
104 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 104 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
105 105
106 IRDA_ASSERT(self != NULL, return -1;); 106 IRDA_ASSERT(self != NULL, return -1;);
107 107
108 switch(event) { 108 switch(event) {
@@ -110,7 +110,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
110 /* Be sure to use 802.3 in case of peer mode */ 110 /* Be sure to use 802.3 in case of peer mode */
111 if (self->provider.access_type == ACCESS_PEER) { 111 if (self->provider.access_type == ACCESS_PEER) {
112 self->media = MEDIA_802_3; 112 self->media = MEDIA_802_3;
113 113
114 /* Check if client has started yet */ 114 /* Check if client has started yet */
115 if (self->client.state == IRLAN_IDLE) { 115 if (self->client.state == IRLAN_IDLE) {
116 /* This should get the client going */ 116 /* This should get the client going */
@@ -118,15 +118,15 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
118 } 118 }
119 } 119 }
120 120
121 irlan_provider_send_reply(self, CMD_GET_PROVIDER_INFO, 121 irlan_provider_send_reply(self, CMD_GET_PROVIDER_INFO,
122 RSP_SUCCESS); 122 RSP_SUCCESS);
123 /* Keep state */ 123 /* Keep state */
124 break; 124 break;
125 case IRLAN_GET_MEDIA_CMD: 125 case IRLAN_GET_MEDIA_CMD:
126 irlan_provider_send_reply(self, CMD_GET_MEDIA_CHAR, 126 irlan_provider_send_reply(self, CMD_GET_MEDIA_CHAR,
127 RSP_SUCCESS); 127 RSP_SUCCESS);
128 /* Keep state */ 128 /* Keep state */
129 break; 129 break;
130 case IRLAN_OPEN_DATA_CMD: 130 case IRLAN_OPEN_DATA_CMD:
131 ret = irlan_parse_open_data_cmd(self, skb); 131 ret = irlan_parse_open_data_cmd(self, skb);
132 if (self->provider.access_type == ACCESS_PEER) { 132 if (self->provider.access_type == ACCESS_PEER) {
@@ -152,7 +152,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
152 } 152 }
153 if (skb) 153 if (skb)
154 dev_kfree_skb(skb); 154 dev_kfree_skb(skb);
155 155
156 return 0; 156 return 0;
157} 157}
158 158
@@ -163,7 +163,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
163 * reply 163 * reply
164 * 164 *
165 */ 165 */
166static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, 166static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
167 struct sk_buff *skb) 167 struct sk_buff *skb)
168{ 168{
169 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 169 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
@@ -173,11 +173,11 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
173 switch(event) { 173 switch(event) {
174 case IRLAN_FILTER_CONFIG_CMD: 174 case IRLAN_FILTER_CONFIG_CMD:
175 irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb); 175 irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb);
176 irlan_provider_send_reply(self, CMD_FILTER_OPERATION, 176 irlan_provider_send_reply(self, CMD_FILTER_OPERATION,
177 RSP_SUCCESS); 177 RSP_SUCCESS);
178 /* Keep state */ 178 /* Keep state */
179 break; 179 break;
180 case IRLAN_DATA_CONNECT_INDICATION: 180 case IRLAN_DATA_CONNECT_INDICATION:
181 irlan_next_provider_state(self, IRLAN_DATA); 181 irlan_next_provider_state(self, IRLAN_DATA);
182 irlan_provider_connect_response(self, self->tsap_data); 182 irlan_provider_connect_response(self, self->tsap_data);
183 break; 183 break;
@@ -202,8 +202,8 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
202 * the local and remote machines. 202 * the local and remote machines.
203 * 203 *
204 */ 204 */
205static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, 205static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
206 struct sk_buff *skb) 206 struct sk_buff *skb)
207{ 207{
208 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 208 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
209 209
@@ -213,7 +213,7 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
213 switch(event) { 213 switch(event) {
214 case IRLAN_FILTER_CONFIG_CMD: 214 case IRLAN_FILTER_CONFIG_CMD:
215 irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb); 215 irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb);
216 irlan_provider_send_reply(self, CMD_FILTER_OPERATION, 216 irlan_provider_send_reply(self, CMD_FILTER_OPERATION,
217 RSP_SUCCESS); 217 RSP_SUCCESS);
218 break; 218 break;
219 case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */ 219 case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
@@ -226,7 +226,7 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
226 } 226 }
227 if (skb) 227 if (skb)
228 dev_kfree_skb(skb); 228 dev_kfree_skb(skb);
229 229
230 return 0; 230 return 0;
231} 231}
232 232
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index e7852a07495e..fd73e4af715a 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -87,7 +87,7 @@ int __init irlap_init(void)
87 /* Allocate master array */ 87 /* Allocate master array */
88 irlap = hashbin_new(HB_LOCK); 88 irlap = hashbin_new(HB_LOCK);
89 if (irlap == NULL) { 89 if (irlap == NULL) {
90 IRDA_ERROR("%s: can't allocate irlap hashbin!\n", 90 IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
91 __FUNCTION__); 91 __FUNCTION__);
92 return -ENOMEM; 92 return -ENOMEM;
93 } 93 }
@@ -701,8 +701,8 @@ void irlap_update_nr_received(struct irlap_cb *self, int nr)
701 int count = 0; 701 int count = 0;
702 702
703 /* 703 /*
704 * Remove all the ack-ed frames from the window queue. 704 * Remove all the ack-ed frames from the window queue.
705 */ 705 */
706 706
707 /* 707 /*
708 * Optimize for the common case. It is most likely that the receiver 708 * Optimize for the common case. It is most likely that the receiver
@@ -1109,13 +1109,13 @@ static void *irlap_seq_start(struct seq_file *seq, loff_t *pos)
1109 spin_lock_irq(&irlap->hb_spinlock); 1109 spin_lock_irq(&irlap->hb_spinlock);
1110 iter->id = 0; 1110 iter->id = 0;
1111 1111
1112 for (self = (struct irlap_cb *) hashbin_get_first(irlap); 1112 for (self = (struct irlap_cb *) hashbin_get_first(irlap);
1113 self; self = (struct irlap_cb *) hashbin_get_next(irlap)) { 1113 self; self = (struct irlap_cb *) hashbin_get_next(irlap)) {
1114 if (iter->id == *pos) 1114 if (iter->id == *pos)
1115 break; 1115 break;
1116 ++iter->id; 1116 ++iter->id;
1117 } 1117 }
1118 1118
1119 return self; 1119 return self;
1120} 1120}
1121 1121
@@ -1137,7 +1137,7 @@ static int irlap_seq_show(struct seq_file *seq, void *v)
1137{ 1137{
1138 const struct irlap_iter_state *iter = seq->private; 1138 const struct irlap_iter_state *iter = seq->private;
1139 const struct irlap_cb *self = v; 1139 const struct irlap_cb *self = v;
1140 1140
1141 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;); 1141 IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;);
1142 1142
1143 seq_printf(seq, "irlap%d ", iter->id); 1143 seq_printf(seq, "irlap%d ", iter->id);
@@ -1222,7 +1222,7 @@ static int irlap_seq_open(struct inode *inode, struct file *file)
1222 struct seq_file *seq; 1222 struct seq_file *seq;
1223 int rc = -ENOMEM; 1223 int rc = -ENOMEM;
1224 struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1224 struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1225 1225
1226 if (!s) 1226 if (!s)
1227 goto out; 1227 goto out;
1228 1228
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 99faff68c399..7b6433fe1dc2 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -827,7 +827,7 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
827 irlap_disconnect_indication(self, LAP_DISC_INDICATION); 827 irlap_disconnect_indication(self, LAP_DISC_INDICATION);
828 break; 828 break;
829 default: 829 default:
830 IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, 830 IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__,
831 event, irlap_event[event]); 831 event, irlap_event[event]);
832 832
833 ret = -1; 833 ret = -1;
@@ -864,7 +864,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
864 * between 15 msecs and 45 msecs. 864 * between 15 msecs and 45 msecs.
865 */ 865 */
866 irlap_start_backoff_timer(self, msecs_to_jiffies(20 + 866 irlap_start_backoff_timer(self, msecs_to_jiffies(20 +
867 (jiffies % 30))); 867 (jiffies % 30)));
868 } else { 868 } else {
869 /* Always switch state before calling upper layers */ 869 /* Always switch state before calling upper layers */
870 irlap_next_state(self, LAP_NDM); 870 irlap_next_state(self, LAP_NDM);
@@ -1377,7 +1377,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1377 /* Resend rejected frames */ 1377 /* Resend rejected frames */
1378 irlap_resend_rejected_frames(self, CMD_FRAME); 1378 irlap_resend_rejected_frames(self, CMD_FRAME);
1379 1379
1380 /* Give peer some time to retransmit! 1380 /* Give peer some time to retransmit!
1381 * But account for our own Tx. */ 1381 * But account for our own Tx. */
1382 irlap_start_final_timer(self, 2 * self->final_timeout); 1382 irlap_start_final_timer(self, 2 * self->final_timeout);
1383 1383
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index dba349c832d0..0b04603e9c47 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -414,7 +414,7 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
414 IRDA_ERROR("%s: frame to short!\n", __FUNCTION__); 414 IRDA_ERROR("%s: frame to short!\n", __FUNCTION__);
415 return; 415 return;
416 } 416 }
417 417
418 xid = (struct xid_frame *) skb->data; 418 xid = (struct xid_frame *) skb->data;
419 419
420 info->daddr = le32_to_cpu(xid->saddr); 420 info->daddr = le32_to_cpu(xid->saddr);
@@ -485,7 +485,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
485 IRDA_ERROR("%s: frame to short!\n", __FUNCTION__); 485 IRDA_ERROR("%s: frame to short!\n", __FUNCTION__);
486 return; 486 return;
487 } 487 }
488 488
489 xid = (struct xid_frame *) skb->data; 489 xid = (struct xid_frame *) skb->data;
490 490
491 info->daddr = le32_to_cpu(xid->saddr); 491 info->daddr = le32_to_cpu(xid->saddr);
@@ -524,7 +524,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
524 */ 524 */
525 if (info->s == 0xff) { 525 if (info->s == 0xff) {
526 /* Check if things are sane at this point... */ 526 /* Check if things are sane at this point... */
527 if((discovery_info == NULL) || 527 if((discovery_info == NULL) ||
528 !pskb_may_pull(skb, 3)) { 528 !pskb_may_pull(skb, 3)) {
529 IRDA_ERROR("%s: discovery frame to short!\n", 529 IRDA_ERROR("%s: discovery frame to short!\n",
530 __FUNCTION__); 530 __FUNCTION__);
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 7e5d12ab3b90..b134c3cf2bdb 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -116,7 +116,7 @@ int __init irlmp_init(void)
116 * Remove IrLMP layer 116 * Remove IrLMP layer
117 * 117 *
118 */ 118 */
119void __exit irlmp_cleanup(void) 119void __exit irlmp_cleanup(void)
120{ 120{
121 /* Check for main structure */ 121 /* Check for main structure */
122 IRDA_ASSERT(irlmp != NULL, return;); 122 IRDA_ASSERT(irlmp != NULL, return;);
@@ -892,7 +892,7 @@ void irlmp_discovery_request(int nslots)
892 892
893 /* 893 /*
894 * Start a single discovery operation if discovery is not already 894 * Start a single discovery operation if discovery is not already
895 * running 895 * running
896 */ 896 */
897 if (!sysctl_discovery) { 897 if (!sysctl_discovery) {
898 /* Check if user wants to override the default */ 898 /* Check if user wants to override the default */
@@ -1528,12 +1528,12 @@ int irlmp_unregister_service(void *handle)
1528 1528
1529 /* Refresh current hint bits */ 1529 /* Refresh current hint bits */
1530 spin_lock_irqsave(&irlmp->services->hb_spinlock, flags); 1530 spin_lock_irqsave(&irlmp->services->hb_spinlock, flags);
1531 service = (irlmp_service_t *) hashbin_get_first(irlmp->services); 1531 service = (irlmp_service_t *) hashbin_get_first(irlmp->services);
1532 while (service) { 1532 while (service) {
1533 irlmp->hints.word |= service->hints.word; 1533 irlmp->hints.word |= service->hints.word;
1534 1534
1535 service = (irlmp_service_t *)hashbin_get_next(irlmp->services); 1535 service = (irlmp_service_t *)hashbin_get_next(irlmp->services);
1536 } 1536 }
1537 spin_unlock_irqrestore(&irlmp->services->hb_spinlock, flags); 1537 spin_unlock_irqrestore(&irlmp->services->hb_spinlock, flags);
1538 return 0; 1538 return 0;
1539} 1539}
@@ -1861,7 +1861,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
1861 1861
1862 spin_lock_irq(&iter->hashbin->hb_spinlock); 1862 spin_lock_irq(&iter->hashbin->hb_spinlock);
1863 for (element = hashbin_get_first(iter->hashbin); 1863 for (element = hashbin_get_first(iter->hashbin);
1864 element != NULL; 1864 element != NULL;
1865 element = hashbin_get_next(iter->hashbin)) { 1865 element = hashbin_get_next(iter->hashbin)) {
1866 if (!off || *off-- == 0) { 1866 if (!off || *off-- == 0) {
1867 /* NB: hashbin left locked */ 1867 /* NB: hashbin left locked */
@@ -1918,7 +1918,7 @@ static void *irlmp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1918 if (v == NULL) { /* no more in this hash bin */ 1918 if (v == NULL) { /* no more in this hash bin */
1919 spin_unlock_irq(&iter->hashbin->hb_spinlock); 1919 spin_unlock_irq(&iter->hashbin->hb_spinlock);
1920 1920
1921 if (iter->hashbin == irlmp->unconnected_lsaps) 1921 if (iter->hashbin == irlmp->unconnected_lsaps)
1922 v = LINK_START_TOKEN; 1922 v = LINK_START_TOKEN;
1923 1923
1924 iter->hashbin = NULL; 1924 iter->hashbin = NULL;
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index 4c90dd1b4503..65ffa981510a 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -615,7 +615,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
615 default: 615 default:
616 /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we 616 /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we
617 * are *not* yet bound to the IrLAP link. Jean II */ 617 * are *not* yet bound to the IrLAP link. Jean II */
618 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", 618 IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
619 __FUNCTION__, irlmp_event[event], self->slsap_sel); 619 __FUNCTION__, irlmp_event[event], self->slsap_sel);
620 break; 620 break;
621 } 621 }
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index 39761a1d18f5..559302d3fe66 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irlmp_frame.c 3 * Filename: irlmp_frame.c
4 * Version: 0.9 4 * Version: 0.9
5 * Description: IrLMP frame implementation 5 * Description: IrLMP frame implementation
@@ -8,18 +8,18 @@
8 * Created at: Tue Aug 19 02:09:59 1997 8 * Created at: Tue Aug 19 02:09:59 1997
9 * Modified at: Mon Dec 13 13:41:12 1999 9 * Modified at: Mon Dec 13 13:41:12 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no> 12 * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>
13 * All Rights Reserved. 13 * All Rights Reserved.
14 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> 14 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * Neither Dag Brattli nor University of Tromsø admit liability nor 21 * Neither Dag Brattli nor University of Tromsø admit liability nor
22 * provide warranty for any of this software. This material is 22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge. 23 * provided "AS-IS" and at no charge.
24 * 24 *
25 ********************************************************************/ 25 ********************************************************************/
@@ -34,7 +34,7 @@
34#include <net/irda/irlmp_frame.h> 34#include <net/irda/irlmp_frame.h>
35#include <net/irda/discovery.h> 35#include <net/irda/discovery.h>
36 36
37static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap, 37static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap,
38 __u8 slsap, int status, hashbin_t *); 38 __u8 slsap, int status, hashbin_t *);
39 39
40inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, 40inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
@@ -56,18 +56,18 @@ inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
56 * Send Link Control Frame to IrLAP 56 * Send Link Control Frame to IrLAP
57 */ 57 */
58void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, 58void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
59 __u8 opcode, struct sk_buff *skb) 59 __u8 opcode, struct sk_buff *skb)
60{ 60{
61 __u8 *frame; 61 __u8 *frame;
62 62
63 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 63 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
64 64
65 IRDA_ASSERT(self != NULL, return;); 65 IRDA_ASSERT(self != NULL, return;);
66 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 66 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
67 IRDA_ASSERT(skb != NULL, return;); 67 IRDA_ASSERT(skb != NULL, return;);
68 68
69 frame = skb->data; 69 frame = skb->data;
70 70
71 frame[0] = dlsap | CONTROL_BIT; 71 frame[0] = dlsap | CONTROL_BIT;
72 frame[1] = slsap; 72 frame[1] = slsap;
73 73
@@ -87,14 +87,14 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
87 * Used by IrLAP to pass received data frames to IrLMP layer 87 * Used by IrLAP to pass received data frames to IrLMP layer
88 * 88 *
89 */ 89 */
90void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, 90void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
91 int unreliable) 91 int unreliable)
92{ 92{
93 struct lsap_cb *lsap; 93 struct lsap_cb *lsap;
94 __u8 slsap_sel; /* Source (this) LSAP address */ 94 __u8 slsap_sel; /* Source (this) LSAP address */
95 __u8 dlsap_sel; /* Destination LSAP address */ 95 __u8 dlsap_sel; /* Destination LSAP address */
96 __u8 *fp; 96 __u8 *fp;
97 97
98 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 98 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
99 99
100 IRDA_ASSERT(self != NULL, return;); 100 IRDA_ASSERT(self != NULL, return;);
@@ -104,11 +104,11 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
104 fp = skb->data; 104 fp = skb->data;
105 105
106 /* 106 /*
107 * The next statements may be confusing, but we do this so that 107 * The next statements may be confusing, but we do this so that
108 * destination LSAP of received frame is source LSAP in our view 108 * destination LSAP of received frame is source LSAP in our view
109 */ 109 */
110 slsap_sel = fp[0] & LSAP_MASK; 110 slsap_sel = fp[0] & LSAP_MASK;
111 dlsap_sel = fp[1]; 111 dlsap_sel = fp[1];
112 112
113 /* 113 /*
114 * Check if this is an incoming connection, since we must deal with 114 * Check if this is an incoming connection, since we must deal with
@@ -118,11 +118,11 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
118 IRDA_DEBUG(3, "%s(), incoming connection, " 118 IRDA_DEBUG(3, "%s(), incoming connection, "
119 "source LSAP=%d, dest LSAP=%d\n", 119 "source LSAP=%d, dest LSAP=%d\n",
120 __FUNCTION__, slsap_sel, dlsap_sel); 120 __FUNCTION__, slsap_sel, dlsap_sel);
121 121
122 /* Try to find LSAP among the unconnected LSAPs */ 122 /* Try to find LSAP among the unconnected LSAPs */
123 lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD, 123 lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD,
124 irlmp->unconnected_lsaps); 124 irlmp->unconnected_lsaps);
125 125
126 /* Maybe LSAP was already connected, so try one more time */ 126 /* Maybe LSAP was already connected, so try one more time */
127 if (!lsap) { 127 if (!lsap) {
128 IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __FUNCTION__); 128 IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __FUNCTION__);
@@ -130,9 +130,9 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
130 self->lsaps); 130 self->lsaps);
131 } 131 }
132 } else 132 } else
133 lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, 133 lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
134 self->lsaps); 134 self->lsaps);
135 135
136 if (lsap == NULL) { 136 if (lsap == NULL) {
137 IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n"); 137 IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n");
138 IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n", 138 IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n",
@@ -146,8 +146,8 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
146 return; 146 return;
147 } 147 }
148 148
149 /* 149 /*
150 * Check if we received a control frame? 150 * Check if we received a control frame?
151 */ 151 */
152 if (fp[0] & CONTROL_BIT) { 152 if (fp[0] & CONTROL_BIT) {
153 switch (fp[2]) { 153 switch (fp[2]) {
@@ -161,7 +161,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
161 case DISCONNECT: 161 case DISCONNECT:
162 IRDA_DEBUG(4, "%s(), Disconnect indication!\n", 162 IRDA_DEBUG(4, "%s(), Disconnect indication!\n",
163 __FUNCTION__); 163 __FUNCTION__);
164 irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION, 164 irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION,
165 skb); 165 skb);
166 break; 166 break;
167 case ACCESSMODE_CMD: 167 case ACCESSMODE_CMD:
@@ -181,7 +181,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
181 irlmp_udata_indication(lsap, skb); 181 irlmp_udata_indication(lsap, skb);
182 else 182 else
183 irlmp_do_lsap_event(lsap, LM_UDATA_INDICATION, skb); 183 irlmp_do_lsap_event(lsap, LM_UDATA_INDICATION, skb);
184 } else { 184 } else {
185 /* Optimize and bypass the state machine if possible */ 185 /* Optimize and bypass the state machine if possible */
186 if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY) 186 if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
187 irlmp_data_indication(lsap, skb); 187 irlmp_data_indication(lsap, skb);
@@ -193,7 +193,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
193/* 193/*
194 * Function irlmp_link_unitdata_indication (self, skb) 194 * Function irlmp_link_unitdata_indication (self, skb)
195 * 195 *
196 * 196 *
197 * 197 *
198 */ 198 */
199#ifdef CONFIG_IRDA_ULTRA 199#ifdef CONFIG_IRDA_ULTRA
@@ -205,7 +205,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
205 __u8 pid; /* Protocol identifier */ 205 __u8 pid; /* Protocol identifier */
206 __u8 *fp; 206 __u8 *fp;
207 unsigned long flags; 207 unsigned long flags;
208 208
209 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 209 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
210 210
211 IRDA_ASSERT(self != NULL, return;); 211 IRDA_ASSERT(self != NULL, return;);
@@ -215,13 +215,13 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
215 fp = skb->data; 215 fp = skb->data;
216 216
217 /* 217 /*
218 * The next statements may be confusing, but we do this so that 218 * The next statements may be confusing, but we do this so that
219 * destination LSAP of received frame is source LSAP in our view 219 * destination LSAP of received frame is source LSAP in our view
220 */ 220 */
221 slsap_sel = fp[0] & LSAP_MASK; 221 slsap_sel = fp[0] & LSAP_MASK;
222 dlsap_sel = fp[1]; 222 dlsap_sel = fp[1];
223 pid = fp[2]; 223 pid = fp[2];
224 224
225 if (pid & 0x80) { 225 if (pid & 0x80) {
226 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", 226 IRDA_DEBUG(0, "%s(), extension in PID not supp!\n",
227 __FUNCTION__); 227 __FUNCTION__);
@@ -233,7 +233,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
233 IRDA_DEBUG(0, "%s(), dropping frame!\n", __FUNCTION__); 233 IRDA_DEBUG(0, "%s(), dropping frame!\n", __FUNCTION__);
234 return; 234 return;
235 } 235 }
236 236
237 /* Search the connectionless LSAP */ 237 /* Search the connectionless LSAP */
238 spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); 238 spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
239 lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps); 239 lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
@@ -241,10 +241,10 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
241 /* 241 /*
242 * Check if source LSAP and dest LSAP selectors and PID match. 242 * Check if source LSAP and dest LSAP selectors and PID match.
243 */ 243 */
244 if ((lsap->slsap_sel == slsap_sel) && 244 if ((lsap->slsap_sel == slsap_sel) &&
245 (lsap->dlsap_sel == dlsap_sel) && 245 (lsap->dlsap_sel == dlsap_sel) &&
246 (lsap->pid == pid)) 246 (lsap->pid == pid))
247 { 247 {
248 break; 248 break;
249 } 249 }
250 lsap = (struct lsap_cb *) hashbin_get_next(irlmp->unconnected_lsaps); 250 lsap = (struct lsap_cb *) hashbin_get_next(irlmp->unconnected_lsaps);
@@ -262,12 +262,12 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
262/* 262/*
263 * Function irlmp_link_disconnect_indication (reason, userdata) 263 * Function irlmp_link_disconnect_indication (reason, userdata)
264 * 264 *
265 * IrLAP has disconnected 265 * IrLAP has disconnected
266 * 266 *
267 */ 267 */
268void irlmp_link_disconnect_indication(struct lap_cb *lap, 268void irlmp_link_disconnect_indication(struct lap_cb *lap,
269 struct irlap_cb *irlap, 269 struct irlap_cb *irlap,
270 LAP_REASON reason, 270 LAP_REASON reason,
271 struct sk_buff *skb) 271 struct sk_buff *skb)
272{ 272{
273 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 273 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
@@ -278,8 +278,8 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap,
278 lap->reason = reason; 278 lap->reason = reason;
279 lap->daddr = DEV_ADDR_ANY; 279 lap->daddr = DEV_ADDR_ANY;
280 280
281 /* FIXME: must do something with the skb if any */ 281 /* FIXME: must do something with the skb if any */
282 282
283 /* 283 /*
284 * Inform station state machine 284 * Inform station state machine
285 */ 285 */
@@ -292,9 +292,9 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap,
292 * Incoming LAP connection! 292 * Incoming LAP connection!
293 * 293 *
294 */ 294 */
295void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr, 295void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
296 __u32 daddr, struct qos_info *qos, 296 __u32 daddr, struct qos_info *qos,
297 struct sk_buff *skb) 297 struct sk_buff *skb)
298{ 298{
299 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 299 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
300 300
@@ -314,7 +314,7 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
314 * LAP connection confirmed! 314 * LAP connection confirmed!
315 * 315 *
316 */ 316 */
317void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos, 317void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
318 struct sk_buff *skb) 318 struct sk_buff *skb)
319{ 319{
320 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 320 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
@@ -359,7 +359,7 @@ void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
359 * 359 *
360 * Jean II 360 * Jean II
361 */ 361 */
362void irlmp_link_discovery_indication(struct lap_cb *self, 362void irlmp_link_discovery_indication(struct lap_cb *self,
363 discovery_t *discovery) 363 discovery_t *discovery)
364{ 364{
365 IRDA_ASSERT(self != NULL, return;); 365 IRDA_ASSERT(self != NULL, return;);
@@ -367,7 +367,7 @@ void irlmp_link_discovery_indication(struct lap_cb *self,
367 367
368 /* Add to main log, cleanup */ 368 /* Add to main log, cleanup */
369 irlmp_add_discovery(irlmp->cachelog, discovery); 369 irlmp_add_discovery(irlmp->cachelog, discovery);
370 370
371 /* Just handle it the same way as a discovery confirm, 371 /* Just handle it the same way as a discovery confirm,
372 * bypass the LM_LAP state machine (see below) */ 372 * bypass the LM_LAP state machine (see below) */
373 irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_PASSIVE); 373 irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_PASSIVE);
@@ -387,7 +387,7 @@ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
387 387
388 IRDA_ASSERT(self != NULL, return;); 388 IRDA_ASSERT(self != NULL, return;);
389 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); 389 IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
390 390
391 /* Add to main log, cleanup */ 391 /* Add to main log, cleanup */
392 irlmp_add_discovery_log(irlmp->cachelog, log); 392 irlmp_add_discovery_log(irlmp->cachelog, log);
393 393
@@ -420,7 +420,7 @@ static inline void irlmp_update_cache(struct lap_cb *lap,
420 * Find handle associated with destination and source LSAP 420 * Find handle associated with destination and source LSAP
421 * 421 *
422 * Any IrDA connection (LSAP/TSAP) is uniquely identified by 422 * Any IrDA connection (LSAP/TSAP) is uniquely identified by
423 * 3 parameters, the local lsap, the remote lsap and the remote address. 423 * 3 parameters, the local lsap, the remote lsap and the remote address.
424 * We may initiate multiple connections to the same remote service 424 * We may initiate multiple connections to the same remote service
425 * (they will have different local lsap), a remote device may initiate 425 * (they will have different local lsap), a remote device may initiate
426 * multiple connections to the same local service (they will have 426 * multiple connections to the same local service (they will have
@@ -433,20 +433,20 @@ static inline void irlmp_update_cache(struct lap_cb *lap,
433 */ 433 */
434static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel, 434static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
435 __u8 slsap_sel, int status, 435 __u8 slsap_sel, int status,
436 hashbin_t *queue) 436 hashbin_t *queue)
437{ 437{
438 struct lsap_cb *lsap; 438 struct lsap_cb *lsap;
439 unsigned long flags; 439 unsigned long flags;
440 440
441 /* 441 /*
442 * Optimize for the common case. We assume that the last frame 442 * Optimize for the common case. We assume that the last frame
443 * received is in the same connection as the last one, so check in 443 * received is in the same connection as the last one, so check in
444 * cache first to avoid the linear search 444 * cache first to avoid the linear search
445 */ 445 */
446#ifdef CONFIG_IRDA_CACHE_LAST_LSAP 446#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
447 if ((self->cache.valid) && 447 if ((self->cache.valid) &&
448 (self->cache.slsap_sel == slsap_sel) && 448 (self->cache.slsap_sel == slsap_sel) &&
449 (self->cache.dlsap_sel == dlsap_sel)) 449 (self->cache.dlsap_sel == dlsap_sel))
450 { 450 {
451 return (self->cache.lsap); 451 return (self->cache.lsap);
452 } 452 }
@@ -456,14 +456,14 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
456 456
457 lsap = (struct lsap_cb *) hashbin_get_first(queue); 457 lsap = (struct lsap_cb *) hashbin_get_first(queue);
458 while (lsap != NULL) { 458 while (lsap != NULL) {
459 /* 459 /*
460 * If this is an incoming connection, then the destination 460 * If this is an incoming connection, then the destination
461 * LSAP selector may have been specified as LM_ANY so that 461 * LSAP selector may have been specified as LM_ANY so that
462 * any client can connect. In that case we only need to check 462 * any client can connect. In that case we only need to check
463 * if the source LSAP (in our view!) match! 463 * if the source LSAP (in our view!) match!
464 */ 464 */
465 if ((status == CONNECT_CMD) && 465 if ((status == CONNECT_CMD) &&
466 (lsap->slsap_sel == slsap_sel) && 466 (lsap->slsap_sel == slsap_sel) &&
467 (lsap->dlsap_sel == LSAP_ANY)) { 467 (lsap->dlsap_sel == LSAP_ANY)) {
468 /* This is where the dest lsap sel is set on incoming 468 /* This is where the dest lsap sel is set on incoming
469 * lsaps */ 469 * lsaps */
@@ -473,8 +473,8 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
473 /* 473 /*
474 * Check if source LSAP and dest LSAP selectors match. 474 * Check if source LSAP and dest LSAP selectors match.
475 */ 475 */
476 if ((lsap->slsap_sel == slsap_sel) && 476 if ((lsap->slsap_sel == slsap_sel) &&
477 (lsap->dlsap_sel == dlsap_sel)) 477 (lsap->dlsap_sel == dlsap_sel))
478 break; 478 break;
479 479
480 lsap = (struct lsap_cb *) hashbin_get_next(queue); 480 lsap = (struct lsap_cb *) hashbin_get_next(queue);
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index 2869b16e417d..826e6c4ca5d5 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irmod.c 3 * Filename: irmod.c
4 * Version: 0.9 4 * Version: 0.9
5 * Description: IrDA stack main entry points 5 * Description: IrDA stack main entry points
@@ -8,19 +8,19 @@
8 * Created at: Mon Dec 15 13:55:39 1997 8 * Created at: Mon Dec 15 13:55:39 1997
9 * Modified at: Wed Jan 5 15:12:41 2000 9 * Modified at: Wed Jan 5 15:12:41 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1997, 1999-2000 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1997, 1999-2000 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com> 13 * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com>
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
24 ********************************************************************/ 24 ********************************************************************/
25 25
26/* 26/*
@@ -52,7 +52,7 @@ extern void irda_sysctl_unregister(void);
52extern int irsock_init(void); 52extern int irsock_init(void);
53extern void irsock_cleanup(void); 53extern void irsock_cleanup(void);
54/* irlap_frame.c */ 54/* irlap_frame.c */
55extern int irlap_driver_rcv(struct sk_buff *, struct net_device *, 55extern int irlap_driver_rcv(struct sk_buff *, struct net_device *,
56 struct packet_type *, struct net_device *); 56 struct packet_type *, struct net_device *);
57 57
58/* 58/*
@@ -104,16 +104,16 @@ static int __init irda_init(void)
104 IRDA_DEBUG(0, "%s()\n", __FUNCTION__); 104 IRDA_DEBUG(0, "%s()\n", __FUNCTION__);
105 105
106 /* Lower layer of the stack */ 106 /* Lower layer of the stack */
107 irlmp_init(); 107 irlmp_init();
108 irlap_init(); 108 irlap_init();
109 109
110 /* Higher layers of the stack */ 110 /* Higher layers of the stack */
111 iriap_init(); 111 iriap_init();
112 irttp_init(); 112 irttp_init();
113 irsock_init(); 113 irsock_init();
114 114
115 /* Add IrDA packet type (Start receiving packets) */ 115 /* Add IrDA packet type (Start receiving packets) */
116 dev_add_pack(&irda_packet_type); 116 dev_add_pack(&irda_packet_type);
117 117
118 /* External APIs */ 118 /* External APIs */
119#ifdef CONFIG_PROC_FS 119#ifdef CONFIG_PROC_FS
@@ -124,7 +124,7 @@ static int __init irda_init(void)
124#endif 124#endif
125 125
126 /* Driver/dongle support */ 126 /* Driver/dongle support */
127 irda_device_init(); 127 irda_device_init();
128 128
129 return 0; 129 return 0;
130} 130}
@@ -140,14 +140,14 @@ static void __exit irda_cleanup(void)
140 /* Remove External APIs */ 140 /* Remove External APIs */
141#ifdef CONFIG_SYSCTL 141#ifdef CONFIG_SYSCTL
142 irda_sysctl_unregister(); 142 irda_sysctl_unregister();
143#endif 143#endif
144#ifdef CONFIG_PROC_FS 144#ifdef CONFIG_PROC_FS
145 irda_proc_unregister(); 145 irda_proc_unregister();
146#endif 146#endif
147 147
148 /* Remove IrDA packet type (stop receiving packets) */ 148 /* Remove IrDA packet type (stop receiving packets) */
149 dev_remove_pack(&irda_packet_type); 149 dev_remove_pack(&irda_packet_type);
150 150
151 /* Remove higher layers */ 151 /* Remove higher layers */
152 irsock_cleanup(); 152 irsock_cleanup();
153 irttp_cleanup(); 153 irttp_cleanup();
@@ -177,8 +177,8 @@ static void __exit irda_cleanup(void)
177 */ 177 */
178subsys_initcall(irda_init); 178subsys_initcall(irda_init);
179module_exit(irda_cleanup); 179module_exit(irda_cleanup);
180 180
181MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> & Jean Tourrilhes <jt@hpl.hp.com>"); 181MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> & Jean Tourrilhes <jt@hpl.hp.com>");
182MODULE_DESCRIPTION("The Linux IrDA Protocol Stack"); 182MODULE_DESCRIPTION("The Linux IrDA Protocol Stack");
183MODULE_LICENSE("GPL"); 183MODULE_LICENSE("GPL");
184MODULE_ALIAS_NETPROTO(PF_IRDA); 184MODULE_ALIAS_NETPROTO(PF_IRDA);
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 80887528e77e..873ae189e37a 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -44,7 +44,7 @@
44 * the generic Linux PPP driver. Because IrNET depend on recent 44 * the generic Linux PPP driver. Because IrNET depend on recent
45 * changes of the PPP driver interface, IrNET will work only with very 45 * changes of the PPP driver interface, IrNET will work only with very
46 * recent kernel (2.3.99-pre6 and up). 46 * recent kernel (2.3.99-pre6 and up).
47 * 47 *
48 * The present implementation offer the following features : 48 * The present implementation offer the following features :
49 * o simple user interface using pppd 49 * o simple user interface using pppd
50 * o efficient implementation (interface directly to PPP and IrTTP) 50 * o efficient implementation (interface directly to PPP and IrTTP)
@@ -327,7 +327,7 @@
327 327
328#define DEBUG_ASSERT 0 /* Verify all assertions */ 328#define DEBUG_ASSERT 0 /* Verify all assertions */
329 329
330/* 330/*
331 * These are the macros we are using to actually print the debug 331 * These are the macros we are using to actually print the debug
332 * statements. Don't look at it, it's ugly... 332 * statements. Don't look at it, it's ugly...
333 * 333 *
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index f65c7a83bc5c..c378e668af0c 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -118,7 +118,7 @@ irnet_open_tsap(irnet_socket * self)
118 118
119 /* Open an IrTTP instance */ 119 /* Open an IrTTP instance */
120 self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, 120 self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
121 &notify); 121 &notify);
122 DABORT(self->tsap == NULL, -ENOMEM, 122 DABORT(self->tsap == NULL, -ENOMEM,
123 IRDA_SR_ERROR, "Unable to allocate TSAP !\n"); 123 IRDA_SR_ERROR, "Unable to allocate TSAP !\n");
124 124
@@ -188,7 +188,7 @@ irnet_ias_to_tsap(irnet_socket * self,
188 if(value->t.integer != -1) 188 if(value->t.integer != -1)
189 /* Get the remote TSAP selector */ 189 /* Get the remote TSAP selector */
190 dtsap_sel = value->t.integer; 190 dtsap_sel = value->t.integer;
191 else 191 else
192 self->errno = -EADDRNOTAVAIL; 192 self->errno = -EADDRNOTAVAIL;
193 break; 193 break;
194 default: 194 default:
@@ -280,8 +280,8 @@ irnet_connect_tsap(irnet_socket * self)
280 } 280 }
281 281
282 /* Connect to remote device */ 282 /* Connect to remote device */
283 err = irttp_connect_request(self->tsap, self->dtsap_sel, 283 err = irttp_connect_request(self->tsap, self->dtsap_sel,
284 self->rsaddr, self->daddr, NULL, 284 self->rsaddr, self->daddr, NULL,
285 self->max_sdu_size_rx, NULL); 285 self->max_sdu_size_rx, NULL);
286 if(err != 0) 286 if(err != 0)
287 { 287 {
@@ -438,7 +438,7 @@ irnet_dname_to_daddr(irnet_socket * self)
438 if(discoveries == NULL) 438 if(discoveries == NULL)
439 DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n"); 439 DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n");
440 440
441 /* 441 /*
442 * Now, check all discovered devices (if any), and connect 442 * Now, check all discovered devices (if any), and connect
443 * client only about the services that the client is 443 * client only about the services that the client is
444 * interested in... 444 * interested in...
@@ -627,7 +627,7 @@ irda_irnet_destroy(irnet_socket * self)
627 627
628 /* Unregister with LM-IAS */ 628 /* Unregister with LM-IAS */
629 if(self->iriap) 629 if(self->iriap)
630 { 630 {
631 iriap_close(self->iriap); 631 iriap_close(self->iriap);
632 self->iriap = NULL; 632 self->iriap = NULL;
633 } 633 }
@@ -945,7 +945,7 @@ irnet_setup_server(void)
945 945
946 /* Register with LM-IAS (so that people can connect to us) */ 946 /* Register with LM-IAS (so that people can connect to us) */
947 irnet_server.ias_obj = irias_new_object(IRNET_SERVICE_NAME, jiffies); 947 irnet_server.ias_obj = irias_new_object(IRNET_SERVICE_NAME, jiffies);
948 irias_add_integer_attrib(irnet_server.ias_obj, IRNET_IAS_VALUE, 948 irias_add_integer_attrib(irnet_server.ias_obj, IRNET_IAS_VALUE,
949 irnet_server.s.stsap_sel, IAS_KERNEL_ATTR); 949 irnet_server.s.stsap_sel, IAS_KERNEL_ATTR);
950 irias_insert_object(irnet_server.ias_obj); 950 irias_insert_object(irnet_server.ias_obj);
951 951
@@ -1076,7 +1076,7 @@ irnet_data_indication(void * instance,
1076 */ 1076 */
1077static void 1077static void
1078irnet_disconnect_indication(void * instance, 1078irnet_disconnect_indication(void * instance,
1079 void * sap, 1079 void * sap,
1080 LM_REASON reason, 1080 LM_REASON reason,
1081 struct sk_buff *skb) 1081 struct sk_buff *skb)
1082{ 1082{
@@ -1166,10 +1166,10 @@ irnet_disconnect_indication(void * instance,
1166 */ 1166 */
1167static void 1167static void
1168irnet_connect_confirm(void * instance, 1168irnet_connect_confirm(void * instance,
1169 void * sap, 1169 void * sap,
1170 struct qos_info *qos, 1170 struct qos_info *qos,
1171 __u32 max_sdu_size, 1171 __u32 max_sdu_size,
1172 __u8 max_header_size, 1172 __u8 max_header_size,
1173 struct sk_buff *skb) 1173 struct sk_buff *skb)
1174{ 1174{
1175 irnet_socket * self = (irnet_socket *) instance; 1175 irnet_socket * self = (irnet_socket *) instance;
@@ -1235,7 +1235,7 @@ irnet_connect_confirm(void * instance,
1235static void 1235static void
1236irnet_flow_indication(void * instance, 1236irnet_flow_indication(void * instance,
1237 void * sap, 1237 void * sap,
1238 LOCAL_FLOW flow) 1238 LOCAL_FLOW flow)
1239{ 1239{
1240 irnet_socket * self = (irnet_socket *) instance; 1240 irnet_socket * self = (irnet_socket *) instance;
1241 LOCAL_FLOW oldflow = self->tx_flow; 1241 LOCAL_FLOW oldflow = self->tx_flow;
@@ -1308,13 +1308,13 @@ irnet_status_indication(void * instance,
1308 * Some other node is attempting to connect to the IrNET service, and has 1308 * Some other node is attempting to connect to the IrNET service, and has
1309 * sent a connection request on our server socket. 1309 * sent a connection request on our server socket.
1310 * We just redirect the connection to the relevant IrNET socket. 1310 * We just redirect the connection to the relevant IrNET socket.
1311 * 1311 *
1312 * Note : we also make sure that between 2 irnet nodes, there can 1312 * Note : we also make sure that between 2 irnet nodes, there can
1313 * exist only one irnet connection. 1313 * exist only one irnet connection.
1314 */ 1314 */
1315static void 1315static void
1316irnet_connect_indication(void * instance, 1316irnet_connect_indication(void * instance,
1317 void * sap, 1317 void * sap,
1318 struct qos_info *qos, 1318 struct qos_info *qos,
1319 __u32 max_sdu_size, 1319 __u32 max_sdu_size,
1320 __u8 max_header_size, 1320 __u8 max_header_size,
@@ -1463,7 +1463,7 @@ irnet_connect_indication(void * instance,
1463 */ 1463 */
1464static void 1464static void
1465irnet_getvalue_confirm(int result, 1465irnet_getvalue_confirm(int result,
1466 __u16 obj_id, 1466 __u16 obj_id,
1467 struct ias_value *value, 1467 struct ias_value *value,
1468 void * priv) 1468 void * priv)
1469{ 1469{
@@ -1526,7 +1526,7 @@ irnet_getvalue_confirm(int result,
1526 */ 1526 */
1527static void 1527static void
1528irnet_discovervalue_confirm(int result, 1528irnet_discovervalue_confirm(int result,
1529 __u16 obj_id, 1529 __u16 obj_id,
1530 struct ias_value *value, 1530 struct ias_value *value,
1531 void * priv) 1531 void * priv)
1532{ 1532{
@@ -1645,7 +1645,7 @@ irnet_discovery_indication(discinfo_t * discovery,
1645 void * priv) 1645 void * priv)
1646{ 1646{
1647 irnet_socket * self = &irnet_server.s; 1647 irnet_socket * self = &irnet_server.s;
1648 1648
1649 DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); 1649 DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self);
1650 DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR, 1650 DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR,
1651 "Invalid instance (0x%p) !!!\n", priv); 1651 "Invalid instance (0x%p) !!!\n", priv);
@@ -1676,7 +1676,7 @@ irnet_expiry_indication(discinfo_t * expiry,
1676 void * priv) 1676 void * priv)
1677{ 1677{
1678 irnet_socket * self = &irnet_server.s; 1678 irnet_socket * self = &irnet_server.s;
1679 1679
1680 DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self); 1680 DENTER(IRDA_OCB_TRACE, "(self=0x%p)\n", self);
1681 DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR, 1681 DASSERT(priv == &irnet_server, , IRDA_OCB_ERROR,
1682 "Invalid instance (0x%p) !!!\n", priv); 1682 "Invalid instance (0x%p) !!!\n", priv);
@@ -1718,7 +1718,7 @@ irnet_proc_read(char * buf,
1718 int i = 0; 1718 int i = 0;
1719 1719
1720 len = 0; 1720 len = 0;
1721 1721
1722 /* Get the IrNET server information... */ 1722 /* Get the IrNET server information... */
1723 len += sprintf(buf+len, "IrNET server - "); 1723 len += sprintf(buf+len, "IrNET server - ");
1724 len += sprintf(buf+len, "IrDA state: %s, ", 1724 len += sprintf(buf+len, "IrDA state: %s, ",
@@ -1811,7 +1811,7 @@ irda_irnet_init(void)
1811 memset(&irnet_server, 0, sizeof(struct irnet_root)); 1811 memset(&irnet_server, 0, sizeof(struct irnet_root));
1812 1812
1813 /* Setup start of irnet instance list */ 1813 /* Setup start of irnet instance list */
1814 irnet_server.list = hashbin_new(HB_NOLOCK); 1814 irnet_server.list = hashbin_new(HB_NOLOCK);
1815 DABORT(irnet_server.list == NULL, -ENOMEM, 1815 DABORT(irnet_server.list == NULL, -ENOMEM,
1816 MODULE_ERROR, "Can't allocate hashbin!\n"); 1816 MODULE_ERROR, "Can't allocate hashbin!\n");
1817 /* Init spinlock for instance list */ 1817 /* Init spinlock for instance list */
diff --git a/net/irda/irnet/irnet_irda.h b/net/irda/irnet/irnet_irda.h
index f2fecd32d8f6..0ba92d0d5204 100644
--- a/net/irda/irnet/irnet_irda.h
+++ b/net/irda/irnet/irnet_irda.h
@@ -146,7 +146,7 @@ static void
146 void *); 146 void *);
147static void 147static void
148 irnet_discovervalue_confirm(int, 148 irnet_discovervalue_confirm(int,
149 __u16, 149 __u16,
150 struct ias_value *, 150 struct ias_value *,
151 void *); 151 void *);
152#ifdef DISCOVERY_EVENTS 152#ifdef DISCOVERY_EVENTS
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index a1e502ff9070..2f9f8dce5a69 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -93,7 +93,7 @@ irnet_ctrl_write(irnet_socket * ap,
93 93
94 /* Check if we recognised one of the known command 94 /* Check if we recognised one of the known command
95 * We can't use "switch" with strings, so hack with "continue" */ 95 * We can't use "switch" with strings, so hack with "continue" */
96 96
97 /* First command : name -> Requested IrDA nickname */ 97 /* First command : name -> Requested IrDA nickname */
98 if(!strncmp(start, "name", 4)) 98 if(!strncmp(start, "name", 4))
99 { 99 {
@@ -744,7 +744,7 @@ dev_irnet_ioctl(struct inode * inode,
744 break; 744 break;
745 745
746 /* Set DTR/RTS */ 746 /* Set DTR/RTS */
747 case TIOCMBIS: 747 case TIOCMBIS:
748 case TIOCMBIC: 748 case TIOCMBIC:
749 /* Set exclusive/non-exclusive mode */ 749 /* Set exclusive/non-exclusive mode */
750 case TIOCEXCL: 750 case TIOCEXCL:
@@ -941,7 +941,7 @@ ppp_irnet_send(struct ppp_channel * chan,
941 ret = irttp_data_request(self->tsap, skb); 941 ret = irttp_data_request(self->tsap, skb);
942 if(ret < 0) 942 if(ret < 0)
943 { 943 {
944 /* 944 /*
945 * > IrTTPs tx queue is full, so we just have to 945 * > IrTTPs tx queue is full, so we just have to
946 * > drop the frame! You might think that we should 946 * > drop the frame! You might think that we should
947 * > just return -1 and don't deallocate the frame, 947 * > just return -1 and don't deallocate the frame,
@@ -949,7 +949,7 @@ ppp_irnet_send(struct ppp_channel * chan,
949 * > we have replaced the original skb with a new 949 * > we have replaced the original skb with a new
950 * > one with larger headroom, and that would really 950 * > one with larger headroom, and that would really
951 * > confuse do_dev_queue_xmit() in dev.c! I have 951 * > confuse do_dev_queue_xmit() in dev.c! I have
952 * > tried :-) DB 952 * > tried :-) DB
953 * Correction : we verify the flow control above (self->tx_flow), 953 * Correction : we verify the flow control above (self->tx_flow),
954 * so we come here only if IrTTP doesn't like the packet (empty, 954 * so we come here only if IrTTP doesn't like the packet (empty,
955 * too large, IrTTP not connected). In those rare cases, it's ok 955 * too large, IrTTP not connected). In those rare cases, it's ok
@@ -1136,6 +1136,6 @@ irnet_cleanup(void)
1136module_init(irnet_init); 1136module_init(irnet_init);
1137module_exit(irnet_cleanup); 1137module_exit(irnet_cleanup);
1138MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>"); 1138MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>");
1139MODULE_DESCRIPTION("IrNET : Synchronous PPP over IrDA"); 1139MODULE_DESCRIPTION("IrNET : Synchronous PPP over IrDA");
1140MODULE_LICENSE("GPL"); 1140MODULE_LICENSE("GPL");
1141MODULE_ALIAS_CHARDEV(10, 187); 1141MODULE_ALIAS_CHARDEV(10, 187);
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index 88b9c43f6370..d6f9aba5b9dc 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irproc.c 3 * Filename: irproc.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: Various entries in the /proc file system 5 * Description: Various entries in the /proc file system
@@ -10,17 +10,17 @@
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1998-1999, Dag Brattli <dagb@cs.uit.no> 12 * Copyright (c) 1998-1999, Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>, 13 * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>,
14 * All Rights Reserved. 14 * All Rights Reserved.
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * I, Thomas Davis, provide no warranty for any of this software. 21 * I, Thomas Davis, provide no warranty for any of this software.
22 * This material is provided "AS-IS" and at no charge. 22 * This material is provided "AS-IS" and at no charge.
23 * 23 *
24 ********************************************************************/ 24 ********************************************************************/
25 25
26#include <linux/miscdevice.h> 26#include <linux/miscdevice.h>
@@ -46,7 +46,7 @@ struct irda_entry {
46 46
47struct proc_dir_entry *proc_irda; 47struct proc_dir_entry *proc_irda;
48EXPORT_SYMBOL(proc_irda); 48EXPORT_SYMBOL(proc_irda);
49 49
50static struct irda_entry irda_dirs[] = { 50static struct irda_entry irda_dirs[] = {
51 {"discovery", &discovery_seq_fops}, 51 {"discovery", &discovery_seq_fops},
52 {"irttp", &irttp_seq_fops}, 52 {"irttp", &irttp_seq_fops},
@@ -61,7 +61,7 @@ static struct irda_entry irda_dirs[] = {
61 * Register irda entry in /proc file system 61 * Register irda entry in /proc file system
62 * 62 *
63 */ 63 */
64void __init irda_proc_register(void) 64void __init irda_proc_register(void)
65{ 65{
66 int i; 66 int i;
67 struct proc_dir_entry *d; 67 struct proc_dir_entry *d;
@@ -73,7 +73,7 @@ void __init irda_proc_register(void)
73 73
74 for (i=0; i<ARRAY_SIZE(irda_dirs); i++) { 74 for (i=0; i<ARRAY_SIZE(irda_dirs); i++) {
75 d = create_proc_entry(irda_dirs[i].name, 0, proc_irda); 75 d = create_proc_entry(irda_dirs[i].name, 0, proc_irda);
76 if (d) 76 if (d)
77 d->proc_fops = irda_dirs[i].fops; 77 d->proc_fops = irda_dirs[i].fops;
78 } 78 }
79} 79}
@@ -84,17 +84,17 @@ void __init irda_proc_register(void)
84 * Unregister irda entry in /proc file system 84 * Unregister irda entry in /proc file system
85 * 85 *
86 */ 86 */
87void __exit irda_proc_unregister(void) 87void __exit irda_proc_unregister(void)
88{ 88{
89 int i; 89 int i;
90 90
91 if (proc_irda) { 91 if (proc_irda) {
92 for (i=0; i<ARRAY_SIZE(irda_dirs); i++) 92 for (i=0; i<ARRAY_SIZE(irda_dirs); i++)
93 remove_proc_entry(irda_dirs[i].name, proc_irda); 93 remove_proc_entry(irda_dirs[i].name, proc_irda);
94 94
95 remove_proc_entry("irda", proc_net); 95 remove_proc_entry("irda", proc_net);
96 proc_irda = NULL; 96 proc_irda = NULL;
97 } 97 }
98} 98}
99 99
100 100
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 1d26cd33ea13..92662330dbcf 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irqueue.c 3 * Filename: irqueue.c
4 * Version: 0.3 4 * Version: 0.3
5 * Description: General queue implementation 5 * Description: General queue implementation
@@ -10,28 +10,28 @@
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Modified at: Thu Jan 4 14:29:10 CET 2001 11 * Modified at: Thu Jan 4 14:29:10 CET 2001
12 * Modified by: Marc Zyngier <mzyngier@freesurf.fr> 12 * Modified by: Marc Zyngier <mzyngier@freesurf.fr>
13 * 13 *
14 * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no> 14 * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
15 * Copyright (C) 1998, Dag Brattli, 15 * Copyright (C) 1998, Dag Brattli,
16 * All Rights Reserved. 16 * All Rights Reserved.
17 * 17 *
18 * This code is taken from the Vortex Operating System written by Aage 18 * This code is taken from the Vortex Operating System written by Aage
19 * Kvalnes. Aage has agreed that this code can use the GPL licence, 19 * Kvalnes. Aage has agreed that this code can use the GPL licence,
20 * although he does not use that licence in his own code. 20 * although he does not use that licence in his own code.
21 * 21 *
22 * This copyright does however _not_ include the ELF hash() function 22 * This copyright does however _not_ include the ELF hash() function
23 * which I currently don't know which licence or copyright it 23 * which I currently don't know which licence or copyright it
24 * has. Please inform me if you know. 24 * has. Please inform me if you know.
25 * 25 *
26 * This program is free software; you can redistribute it and/or 26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License as 27 * modify it under the terms of the GNU General Public License as
28 * published by the Free Software Foundation; either version 2 of 28 * published by the Free Software Foundation; either version 2 of
29 * the License, or (at your option) any later version. 29 * the License, or (at your option) any later version.
30 * 30 *
31 * Neither Dag Brattli nor University of Tromsø admit liability nor 31 * Neither Dag Brattli nor University of Tromsø admit liability nor
32 * provide warranty for any of this software. This material is 32 * provide warranty for any of this software. This material is
33 * provided "AS-IS" and at no charge. 33 * provided "AS-IS" and at no charge.
34 * 34 *
35 ********************************************************************/ 35 ********************************************************************/
36 36
37/* 37/*
@@ -213,7 +213,7 @@ static __u32 hash( const char* name)
213{ 213{
214 __u32 h = 0; 214 __u32 h = 0;
215 __u32 g; 215 __u32 g;
216 216
217 while(*name) { 217 while(*name) {
218 h = (h<<4) + *name++; 218 h = (h<<4) + *name++;
219 if ((g = (h & 0xf0000000))) 219 if ((g = (h & 0xf0000000)))
@@ -231,7 +231,7 @@ static __u32 hash( const char* name)
231 */ 231 */
232static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) 232static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
233{ 233{
234 234
235 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 235 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
236 236
237 /* 237 /*
@@ -242,7 +242,7 @@ static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
242 * Queue is empty. Insert one element into the queue. 242 * Queue is empty. Insert one element into the queue.
243 */ 243 */
244 element->q_next = element->q_prev = *queue = element; 244 element->q_next = element->q_prev = *queue = element;
245 245
246 } else { 246 } else {
247 /* 247 /*
248 * Queue is not empty. Insert element into front of queue. 248 * Queue is not empty. Insert element into front of queue.
@@ -267,20 +267,20 @@ static irda_queue_t *dequeue_first(irda_queue_t **queue)
267 irda_queue_t *ret; 267 irda_queue_t *ret;
268 268
269 IRDA_DEBUG( 4, "dequeue_first()\n"); 269 IRDA_DEBUG( 4, "dequeue_first()\n");
270 270
271 /* 271 /*
272 * Set return value 272 * Set return value
273 */ 273 */
274 ret = *queue; 274 ret = *queue;
275 275
276 if ( *queue == NULL ) { 276 if ( *queue == NULL ) {
277 /* 277 /*
278 * Queue was empty. 278 * Queue was empty.
279 */ 279 */
280 } else if ( (*queue)->q_next == *queue ) { 280 } else if ( (*queue)->q_next == *queue ) {
281 /* 281 /*
282 * Queue only contained a single element. It will now be 282 * Queue only contained a single element. It will now be
283 * empty. 283 * empty.
284 */ 284 */
285 *queue = NULL; 285 *queue = NULL;
286 } else { 286 } else {
@@ -291,7 +291,7 @@ static irda_queue_t *dequeue_first(irda_queue_t **queue)
291 (*queue)->q_next->q_prev = (*queue)->q_prev; 291 (*queue)->q_next->q_prev = (*queue)->q_prev;
292 *queue = (*queue)->q_next; 292 *queue = (*queue)->q_next;
293 } 293 }
294 294
295 /* 295 /*
296 * Return the removed entry (or NULL of queue was empty). 296 * Return the removed entry (or NULL of queue was empty).
297 */ 297 */
@@ -306,25 +306,25 @@ static irda_queue_t *dequeue_first(irda_queue_t **queue)
306static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) 306static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element)
307{ 307{
308 irda_queue_t *ret; 308 irda_queue_t *ret;
309 309
310 IRDA_DEBUG( 4, "dequeue_general()\n"); 310 IRDA_DEBUG( 4, "dequeue_general()\n");
311 311
312 /* 312 /*
313 * Set return value 313 * Set return value
314 */ 314 */
315 ret = *queue; 315 ret = *queue;
316 316
317 if ( *queue == NULL ) { 317 if ( *queue == NULL ) {
318 /* 318 /*
319 * Queue was empty. 319 * Queue was empty.
320 */ 320 */
321 } else if ( (*queue)->q_next == *queue ) { 321 } else if ( (*queue)->q_next == *queue ) {
322 /* 322 /*
323 * Queue only contained a single element. It will now be 323 * Queue only contained a single element. It will now be
324 * empty. 324 * empty.
325 */ 325 */
326 *queue = NULL; 326 *queue = NULL;
327 327
328 } else { 328 } else {
329 /* 329 /*
330 * Remove specific element. 330 * Remove specific element.
@@ -334,7 +334,7 @@ static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element
334 if ( (*queue) == element) 334 if ( (*queue) == element)
335 (*queue) = element->q_next; 335 (*queue) = element->q_next;
336 } 336 }
337 337
338 /* 338 /*
339 * Return the removed entry (or NULL of queue was empty). 339 * Return the removed entry (or NULL of queue was empty).
340 */ 340 */
@@ -352,7 +352,7 @@ static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element
352hashbin_t *hashbin_new(int type) 352hashbin_t *hashbin_new(int type)
353{ 353{
354 hashbin_t* hashbin; 354 hashbin_t* hashbin;
355 355
356 /* 356 /*
357 * Allocate new hashbin 357 * Allocate new hashbin
358 */ 358 */
@@ -380,8 +380,8 @@ EXPORT_SYMBOL(hashbin_new);
380/* 380/*
381 * Function hashbin_delete (hashbin, free_func) 381 * Function hashbin_delete (hashbin, free_func)
382 * 382 *
383 * Destroy hashbin, the free_func can be a user supplied special routine 383 * Destroy hashbin, the free_func can be a user supplied special routine
384 * for deallocating this structure if it's complex. If not the user can 384 * for deallocating this structure if it's complex. If not the user can
385 * just supply kfree, which should take care of the job. 385 * just supply kfree, which should take care of the job.
386 */ 386 */
387int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) 387int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
@@ -392,7 +392,7 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
392 392
393 IRDA_ASSERT(hashbin != NULL, return -1;); 393 IRDA_ASSERT(hashbin != NULL, return -1;);
394 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); 394 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
395 395
396 /* Synchronize */ 396 /* Synchronize */
397 if ( hashbin->hb_type & HB_LOCK ) { 397 if ( hashbin->hb_type & HB_LOCK ) {
398 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 398 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
@@ -407,11 +407,11 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
407 while (queue ) { 407 while (queue ) {
408 if (free_func) 408 if (free_func)
409 (*free_func)(queue); 409 (*free_func)(queue);
410 queue = dequeue_first( 410 queue = dequeue_first(
411 (irda_queue_t**) &hashbin->hb_queue[i]); 411 (irda_queue_t**) &hashbin->hb_queue[i]);
412 } 412 }
413 } 413 }
414 414
415 /* Cleanup local data */ 415 /* Cleanup local data */
416 hashbin->hb_current = NULL; 416 hashbin->hb_current = NULL;
417 hashbin->magic = ~HB_MAGIC; 417 hashbin->magic = ~HB_MAGIC;
@@ -438,7 +438,7 @@ EXPORT_SYMBOL(hashbin_delete);
438 * Insert an entry into the hashbin 438 * Insert an entry into the hashbin
439 * 439 *
440 */ 440 */
441void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, 441void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
442 const char* name) 442 const char* name)
443{ 443{
444 unsigned long flags = 0; 444 unsigned long flags = 0;
@@ -460,14 +460,14 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
460 if ( hashbin->hb_type & HB_LOCK ) { 460 if ( hashbin->hb_type & HB_LOCK ) {
461 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 461 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
462 } /* Default is no-lock */ 462 } /* Default is no-lock */
463 463
464 /* 464 /*
465 * Store name and key 465 * Store name and key
466 */ 466 */
467 entry->q_hash = hashv; 467 entry->q_hash = hashv;
468 if ( name ) 468 if ( name )
469 strlcpy( entry->q_name, name, sizeof(entry->q_name)); 469 strlcpy( entry->q_name, name, sizeof(entry->q_name));
470 470
471 /* 471 /*
472 * Insert new entry first 472 * Insert new entry first
473 */ 473 */
@@ -482,7 +482,7 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
482} 482}
483EXPORT_SYMBOL(hashbin_insert); 483EXPORT_SYMBOL(hashbin_insert);
484 484
485/* 485/*
486 * Function hashbin_remove_first (hashbin) 486 * Function hashbin_remove_first (hashbin)
487 * 487 *
488 * Remove first entry of the hashbin 488 * Remove first entry of the hashbin
@@ -537,7 +537,7 @@ void *hashbin_remove_first( hashbin_t *hashbin)
537} 537}
538 538
539 539
540/* 540/*
541 * Function hashbin_remove (hashbin, hashv, name) 541 * Function hashbin_remove (hashbin, hashv, name)
542 * 542 *
543 * Remove entry with the given name 543 * Remove entry with the given name
@@ -561,7 +561,7 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
561 561
562 IRDA_ASSERT( hashbin != NULL, return NULL;); 562 IRDA_ASSERT( hashbin != NULL, return NULL;);
563 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 563 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
564 564
565 /* 565 /*
566 * Locate hashbin 566 * Locate hashbin
567 */ 567 */
@@ -601,7 +601,7 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
601 entry = entry->q_next; 601 entry = entry->q_next;
602 } while ( entry != hashbin->hb_queue[ bin ] ); 602 } while ( entry != hashbin->hb_queue[ bin ] );
603 } 603 }
604 604
605 /* 605 /*
606 * If entry was found, dequeue it 606 * If entry was found, dequeue it
607 */ 607 */
@@ -622,18 +622,18 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
622 if ( hashbin->hb_type & HB_LOCK ) { 622 if ( hashbin->hb_type & HB_LOCK ) {
623 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 623 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
624 } /* Default is no-lock */ 624 } /* Default is no-lock */
625 625
626 626
627 /* Return */ 627 /* Return */
628 if ( found ) 628 if ( found )
629 return entry; 629 return entry;
630 else 630 else
631 return NULL; 631 return NULL;
632 632
633} 633}
634EXPORT_SYMBOL(hashbin_remove); 634EXPORT_SYMBOL(hashbin_remove);
635 635
636/* 636/*
637 * Function hashbin_remove_this (hashbin, entry) 637 * Function hashbin_remove_this (hashbin, entry)
638 * 638 *
639 * Remove entry with the given name 639 * Remove entry with the given name
@@ -655,7 +655,7 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
655 IRDA_ASSERT( hashbin != NULL, return NULL;); 655 IRDA_ASSERT( hashbin != NULL, return NULL;);
656 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); 656 IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
657 IRDA_ASSERT( entry != NULL, return NULL;); 657 IRDA_ASSERT( entry != NULL, return NULL;);
658 658
659 /* Synchronize */ 659 /* Synchronize */
660 if ( hashbin->hb_type & HB_LOCK ) { 660 if ( hashbin->hb_type & HB_LOCK ) {
661 spin_lock_irqsave(&hashbin->hb_spinlock, flags); 661 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
@@ -722,7 +722,7 @@ void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name )
722 if ( name ) 722 if ( name )
723 hashv = hash( name ); 723 hashv = hash( name );
724 bin = GET_HASHBIN( hashv ); 724 bin = GET_HASHBIN( hashv );
725 725
726 /* 726 /*
727 * Search for entry 727 * Search for entry
728 */ 728 */
@@ -829,7 +829,7 @@ void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name,
829 * called before any calls to hashbin_get_next()! 829 * called before any calls to hashbin_get_next()!
830 * 830 *
831 */ 831 */
832irda_queue_t *hashbin_get_first( hashbin_t* hashbin) 832irda_queue_t *hashbin_get_first( hashbin_t* hashbin)
833{ 833{
834 irda_queue_t *entry; 834 irda_queue_t *entry;
835 int i; 835 int i;
@@ -860,7 +860,7 @@ EXPORT_SYMBOL(hashbin_get_first);
860 * Get next item in hashbin. A series of hashbin_get_next() calls must 860 * Get next item in hashbin. A series of hashbin_get_next() calls must
861 * be started by a call to hashbin_get_first(). The function returns 861 * be started by a call to hashbin_get_first(). The function returns
862 * NULL when all items have been traversed 862 * NULL when all items have been traversed
863 * 863 *
864 * The context of the search is stored within the hashbin, so you must 864 * The context of the search is stored within the hashbin, so you must
865 * protect yourself from concurrent enumerations. - Jean II 865 * protect yourself from concurrent enumerations. - Jean II
866 */ 866 */
@@ -876,13 +876,13 @@ irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
876 if ( hashbin->hb_current == NULL) { 876 if ( hashbin->hb_current == NULL) {
877 IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;); 877 IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;);
878 return NULL; 878 return NULL;
879 } 879 }
880 entry = hashbin->hb_current->q_next; 880 entry = hashbin->hb_current->q_next;
881 bin = GET_HASHBIN( entry->q_hash); 881 bin = GET_HASHBIN( entry->q_hash);
882 882
883 /* 883 /*
884 * Make sure that we are not back at the beginning of the queue 884 * Make sure that we are not back at the beginning of the queue
885 * again 885 * again
886 */ 886 */
887 if ( entry != hashbin->hb_queue[ bin ]) { 887 if ( entry != hashbin->hb_queue[ bin ]) {
888 hashbin->hb_current = entry; 888 hashbin->hb_current = entry;
@@ -895,7 +895,7 @@ irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
895 */ 895 */
896 if ( bin >= HASHBIN_SIZE) 896 if ( bin >= HASHBIN_SIZE)
897 return NULL; 897 return NULL;
898 898
899 /* 899 /*
900 * Move to next queue in hashbin 900 * Move to next queue in hashbin
901 */ 901 */
@@ -904,7 +904,7 @@ irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
904 entry = hashbin->hb_queue[ i]; 904 entry = hashbin->hb_queue[ i];
905 if ( entry) { 905 if ( entry) {
906 hashbin->hb_current = entry; 906 hashbin->hb_current = entry;
907 907
908 return entry; 908 return entry;
909 } 909 }
910 } 910 }
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 86805c3d8324..bb53ba0be585 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irsysctl.c 3 * Filename: irsysctl.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: Sysctl interface for IrDA 5 * Description: Sysctl interface for IrDA
@@ -8,19 +8,19 @@
8 * Created at: Sun May 24 22:12:06 1998 8 * Created at: Sun May 24 22:12:06 1998
9 * Modified at: Fri Jun 4 02:50:15 1999 9 * Modified at: Fri Jun 4 02:50:15 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved. 12 * Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved.
13 * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com> 13 * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com>
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as 16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of 17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version. 18 * the License, or (at your option) any later version.
19 * 19 *
20 * Neither Dag Brattli nor University of Tromsø admit liability nor 20 * Neither Dag Brattli nor University of Tromsø admit liability nor
21 * provide warranty for any of this software. This material is 21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge. 22 * provided "AS-IS" and at no charge.
23 * 23 *
24 ********************************************************************/ 24 ********************************************************************/
25 25
26#include <linux/mm.h> 26#include <linux/mm.h>
@@ -111,7 +111,7 @@ static ctl_table irda_table[] = {
111 .strategy = &sysctl_string 111 .strategy = &sysctl_string
112 }, 112 },
113#ifdef CONFIG_IRDA_DEBUG 113#ifdef CONFIG_IRDA_DEBUG
114 { 114 {
115 .ctl_name = DEBUG, 115 .ctl_name = DEBUG,
116 .procname = "debug", 116 .procname = "debug",
117 .data = &irda_debug, 117 .data = &irda_debug,
@@ -121,7 +121,7 @@ static ctl_table irda_table[] = {
121 }, 121 },
122#endif 122#endif
123#ifdef CONFIG_IRDA_FAST_RR 123#ifdef CONFIG_IRDA_FAST_RR
124 { 124 {
125 .ctl_name = FAST_POLL, 125 .ctl_name = FAST_POLL,
126 .procname = "fast_poll_increase", 126 .procname = "fast_poll_increase",
127 .data = &sysctl_fast_poll_increase, 127 .data = &sysctl_fast_poll_increase,
@@ -287,7 +287,7 @@ int __init irda_sysctl_register(void)
287 * Unregister our sysctl interface 287 * Unregister our sysctl interface
288 * 288 *
289 */ 289 */
290void __exit irda_sysctl_unregister(void) 290void __exit irda_sysctl_unregister(void)
291{ 291{
292 unregister_sysctl_table(irda_table_header); 292 unregister_sysctl_table(irda_table_header);
293} 293}
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 03504f3e4990..68836358fdf2 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: irttp.c 3 * Filename: irttp.c
4 * Version: 1.2 4 * Version: 1.2
5 * Description: Tiny Transport Protocol (TTP) implementation 5 * Description: Tiny Transport Protocol (TTP) implementation
@@ -8,18 +8,18 @@
8 * Created at: Sun Aug 31 20:14:31 1997 8 * Created at: Sun Aug 31 20:14:31 1997
9 * Modified at: Wed Jan 5 11:31:27 2000 9 * Modified at: Wed Jan 5 11:31:27 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>, 12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
13 * All Rights Reserved. 13 * All Rights Reserved.
14 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> 14 * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * Neither Dag Brattli nor University of Tromsø admit liability nor 21 * Neither Dag Brattli nor University of Tromsø admit liability nor
22 * provide warranty for any of this software. This material is 22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge. 23 * provided "AS-IS" and at no charge.
24 * 24 *
25 ********************************************************************/ 25 ********************************************************************/
@@ -42,17 +42,17 @@ static struct irttp_cb *irttp;
42 42
43static void __irttp_close_tsap(struct tsap_cb *self); 43static void __irttp_close_tsap(struct tsap_cb *self);
44 44
45static int irttp_data_indication(void *instance, void *sap, 45static int irttp_data_indication(void *instance, void *sap,
46 struct sk_buff *skb); 46 struct sk_buff *skb);
47static int irttp_udata_indication(void *instance, void *sap, 47static int irttp_udata_indication(void *instance, void *sap,
48 struct sk_buff *skb); 48 struct sk_buff *skb);
49static void irttp_disconnect_indication(void *instance, void *sap, 49static void irttp_disconnect_indication(void *instance, void *sap,
50 LM_REASON reason, struct sk_buff *); 50 LM_REASON reason, struct sk_buff *);
51static void irttp_connect_indication(void *instance, void *sap, 51static void irttp_connect_indication(void *instance, void *sap,
52 struct qos_info *qos, __u32 max_sdu_size, 52 struct qos_info *qos, __u32 max_sdu_size,
53 __u8 header_size, struct sk_buff *skb); 53 __u8 header_size, struct sk_buff *skb);
54static void irttp_connect_confirm(void *instance, void *sap, 54static void irttp_connect_confirm(void *instance, void *sap,
55 struct qos_info *qos, __u32 max_sdu_size, 55 struct qos_info *qos, __u32 max_sdu_size,
56 __u8 header_size, struct sk_buff *skb); 56 __u8 header_size, struct sk_buff *skb);
57static void irttp_run_tx_queue(struct tsap_cb *self); 57static void irttp_run_tx_queue(struct tsap_cb *self);
58static void irttp_run_rx_queue(struct tsap_cb *self); 58static void irttp_run_rx_queue(struct tsap_cb *self);
@@ -61,7 +61,7 @@ static void irttp_flush_queues(struct tsap_cb *self);
61static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb); 61static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
62static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self); 62static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
63static void irttp_todo_expired(unsigned long data); 63static void irttp_todo_expired(unsigned long data);
64static int irttp_param_max_sdu_size(void *instance, irda_param_t *param, 64static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
65 int get); 65 int get);
66 66
67static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow); 67static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
@@ -109,7 +109,7 @@ int __init irttp_init(void)
109 * Called by module destruction/cleanup code 109 * Called by module destruction/cleanup code
110 * 110 *
111 */ 111 */
112void __exit irttp_cleanup(void) 112void __exit irttp_cleanup(void)
113{ 113{
114 /* Check for main structure */ 114 /* Check for main structure */
115 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;); 115 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
@@ -865,7 +865,7 @@ static int irttp_udata_indication(void *instance, void *sap,
865 err = self->notify.udata_indication(self->notify.instance, 865 err = self->notify.udata_indication(self->notify.instance,
866 self,skb); 866 self,skb);
867 /* Same comment as in irttp_do_data_indication() */ 867 /* Same comment as in irttp_do_data_indication() */
868 if (!err) 868 if (!err)
869 return 0; 869 return 0;
870 } 870 }
871 /* Either no handler, or handler returns an error */ 871 /* Either no handler, or handler returns an error */
@@ -940,7 +940,7 @@ static int irttp_data_indication(void *instance, void *sap,
940 940
941 /* 941 /*
942 * If the peer device has given us some credits and we didn't have 942 * If the peer device has given us some credits and we didn't have
943 * anyone from before, then we need to shedule the tx queue. 943 * anyone from before, then we need to shedule the tx queue.
944 * We need to do that because our Tx have stopped (so we may not 944 * We need to do that because our Tx have stopped (so we may not
945 * get any LAP flow indication) and the user may be stopped as 945 * get any LAP flow indication) and the user may be stopped as
946 * well. - Jean II 946 * well. - Jean II
@@ -1798,14 +1798,14 @@ static void *irttp_seq_start(struct seq_file *seq, loff_t *pos)
1798 spin_lock_irq(&irttp->tsaps->hb_spinlock); 1798 spin_lock_irq(&irttp->tsaps->hb_spinlock);
1799 iter->id = 0; 1799 iter->id = 0;
1800 1800
1801 for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps); 1801 for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
1802 self != NULL; 1802 self != NULL;
1803 self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) { 1803 self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) {
1804 if (iter->id == *pos) 1804 if (iter->id == *pos)
1805 break; 1805 break;
1806 ++iter->id; 1806 ++iter->id;
1807 } 1807 }
1808 1808
1809 return self; 1809 return self;
1810} 1810}
1811 1811
diff --git a/net/irda/parameters.c b/net/irda/parameters.c
index 1324942f976c..75a72d203b01 100644
--- a/net/irda/parameters.c
+++ b/net/irda/parameters.c
@@ -563,7 +563,7 @@ static int irda_param_extract(void *self, __u8 *buf, int len,
563 * safe. Returns the number of bytes that was parsed 563 * safe. Returns the number of bytes that was parsed
564 * 564 *
565 */ 565 */
566int irda_param_extract_all(void *self, __u8 *buf, int len, 566int irda_param_extract_all(void *self, __u8 *buf, int len,
567 pi_param_info_t *info) 567 pi_param_info_t *info)
568{ 568{
569 int ret = -1; 569 int ret = -1;
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 95a69c013ee8..349012c926b7 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: qos.c 3 * Filename: qos.c
4 * Version: 1.0 4 * Version: 1.0
5 * Description: IrLAP QoS parameter negotiation 5 * Description: IrLAP QoS parameter negotiation
@@ -8,26 +8,26 @@
8 * Created at: Tue Sep 9 00:00:26 1997 8 * Created at: Tue Sep 9 00:00:26 1997
9 * Modified at: Sun Jan 30 14:29:16 2000 9 * Modified at: Sun Jan 30 14:29:16 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>, 12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
13 * All Rights Reserved. 13 * All Rights Reserved.
14 * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com> 14 * Copyright (c) 2000-2001 Jean Tourrilhes <jt@hpl.hp.com>
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * This program is distributed in the hope that it will be useful, 21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details. 24 * GNU General Public License for more details.
25 * 25 *
26 * You should have received a copy of the GNU General Public License 26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software 27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 * MA 02111-1307 USA 29 * MA 02111-1307 USA
30 * 30 *
31 ********************************************************************/ 31 ********************************************************************/
32 32
33#include <asm/byteorder.h> 33#include <asm/byteorder.h>
@@ -84,16 +84,16 @@ unsigned sysctl_max_tx_data_size = 2042;
84unsigned sysctl_max_tx_window = 7; 84unsigned sysctl_max_tx_window = 7;
85 85
86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); 86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, 87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
88 int get); 88 int get);
89static int irlap_param_max_turn_time(void *instance, irda_param_t *param, 89static int irlap_param_max_turn_time(void *instance, irda_param_t *param,
90 int get); 90 int get);
91static int irlap_param_data_size(void *instance, irda_param_t *param, int get); 91static int irlap_param_data_size(void *instance, irda_param_t *param, int get);
92static int irlap_param_window_size(void *instance, irda_param_t *param, 92static int irlap_param_window_size(void *instance, irda_param_t *param,
93 int get); 93 int get);
94static int irlap_param_additional_bofs(void *instance, irda_param_t *parm, 94static int irlap_param_additional_bofs(void *instance, irda_param_t *parm,
95 int get); 95 int get);
96static int irlap_param_min_turn_time(void *instance, irda_param_t *param, 96static int irlap_param_min_turn_time(void *instance, irda_param_t *param,
97 int get); 97 int get);
98 98
99#ifndef CONFIG_IRDA_DYNAMIC_WINDOW 99#ifndef CONFIG_IRDA_DYNAMIC_WINDOW
@@ -101,7 +101,7 @@ static __u32 irlap_requested_line_capacity(struct qos_info *qos);
101#endif 101#endif
102 102
103static __u32 min_turn_times[] = { 10000, 5000, 1000, 500, 100, 50, 10, 0 }; /* us */ 103static __u32 min_turn_times[] = { 10000, 5000, 1000, 500, 100, 50, 10, 0 }; /* us */
104static __u32 baud_rates[] = { 2400, 9600, 19200, 38400, 57600, 115200, 576000, 104static __u32 baud_rates[] = { 2400, 9600, 19200, 38400, 57600, 115200, 576000,
105 1152000, 4000000, 16000000 }; /* bps */ 105 1152000, 4000000, 16000000 }; /* bps */
106static __u32 data_sizes[] = { 64, 128, 256, 512, 1024, 2048 }; /* bytes */ 106static __u32 data_sizes[] = { 64, 128, 256, 512, 1024, 2048 }; /* bytes */
107static __u32 add_bofs[] = { 48, 24, 12, 5, 3, 2, 1, 0 }; /* bytes */ 107static __u32 add_bofs[] = { 48, 24, 12, 5, 3, 2, 1, 0 }; /* bytes */
@@ -165,7 +165,7 @@ static pi_param_info_t irlap_param_info = { pi_major_call_table, 2, 0x7f, 7 };
165static inline int value_index(__u32 value, __u32 *array, int size) 165static inline int value_index(__u32 value, __u32 *array, int size)
166{ 166{
167 int i; 167 int i;
168 168
169 for (i=0; i < size; i++) 169 for (i=0; i < size; i++)
170 if (array[i] == value) 170 if (array[i] == value)
171 break; 171 break;
@@ -178,7 +178,7 @@ static inline int value_index(__u32 value, __u32 *array, int size)
178 * Returns value to index in array, easy! 178 * Returns value to index in array, easy!
179 * 179 *
180 */ 180 */
181static inline __u32 index_value(int index, __u32 *array) 181static inline __u32 index_value(int index, __u32 *array)
182{ 182{
183 return array[index]; 183 return array[index];
184} 184}
@@ -189,7 +189,7 @@ static inline __u32 index_value(int index, __u32 *array)
189 * Returns index to most significant bit (MSB) in word 189 * Returns index to most significant bit (MSB) in word
190 * 190 *
191 */ 191 */
192static int msb_index (__u16 word) 192static int msb_index (__u16 word)
193{ 193{
194 __u16 msb = 0x8000; 194 __u16 msb = 0x8000;
195 int index = 15; /* Current MSB */ 195 int index = 15; /* Current MSB */
@@ -298,12 +298,12 @@ void irda_qos_compute_intersection(struct qos_info *qos, struct qos_info *new)
298 * 298 *
299 * The purpose of this function is for layers and drivers to be able to 299 * The purpose of this function is for layers and drivers to be able to
300 * set the maximum QoS possible and then "and in" their own limitations 300 * set the maximum QoS possible and then "and in" their own limitations
301 * 301 *
302 */ 302 */
303void irda_init_max_qos_capabilies(struct qos_info *qos) 303void irda_init_max_qos_capabilies(struct qos_info *qos)
304{ 304{
305 int i; 305 int i;
306 /* 306 /*
307 * These are the maximum supported values as specified on pages 307 * These are the maximum supported values as specified on pages
308 * 39-43 in IrLAP 308 * 39-43 in IrLAP
309 */ 309 */
@@ -361,25 +361,25 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
361 qos->min_turn_time.value = sysctl_min_tx_turn_time; 361 qos->min_turn_time.value = sysctl_min_tx_turn_time;
362 } 362 }
363 363
364 /* 364 /*
365 * Not allowed to use a max turn time less than 500 ms if the baudrate 365 * Not allowed to use a max turn time less than 500 ms if the baudrate
366 * is less than 115200 366 * is less than 115200
367 */ 367 */
368 if ((qos->baud_rate.value < 115200) && 368 if ((qos->baud_rate.value < 115200) &&
369 (qos->max_turn_time.value < 500)) 369 (qos->max_turn_time.value < 500))
370 { 370 {
371 IRDA_DEBUG(0, 371 IRDA_DEBUG(0,
372 "%s(), adjusting max turn time from %d to 500 ms\n", 372 "%s(), adjusting max turn time from %d to 500 ms\n",
373 __FUNCTION__, qos->max_turn_time.value); 373 __FUNCTION__, qos->max_turn_time.value);
374 qos->max_turn_time.value = 500; 374 qos->max_turn_time.value = 500;
375 } 375 }
376 376
377 /* 377 /*
378 * The data size must be adjusted according to the baud rate and max 378 * The data size must be adjusted according to the baud rate and max
379 * turn time 379 * turn time
380 */ 380 */
381 index = value_index(qos->data_size.value, data_sizes, 6); 381 index = value_index(qos->data_size.value, data_sizes, 6);
382 line_capacity = irlap_max_line_capacity(qos->baud_rate.value, 382 line_capacity = irlap_max_line_capacity(qos->baud_rate.value,
383 qos->max_turn_time.value); 383 qos->max_turn_time.value);
384 384
385#ifdef CONFIG_IRDA_DYNAMIC_WINDOW 385#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
@@ -427,32 +427,32 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
427 * We just set the QoS capabilities for the peer station 427 * We just set the QoS capabilities for the peer station
428 * 428 *
429 */ 429 */
430int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb) 430int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb)
431{ 431{
432 int ret; 432 int ret;
433 433
434 ret = irda_param_extract_all(self, skb->data, skb->len, 434 ret = irda_param_extract_all(self, skb->data, skb->len,
435 &irlap_param_info); 435 &irlap_param_info);
436 436
437 /* Convert the negotiated bits to values */ 437 /* Convert the negotiated bits to values */
438 irda_qos_bits_to_value(&self->qos_tx); 438 irda_qos_bits_to_value(&self->qos_tx);
439 irda_qos_bits_to_value(&self->qos_rx); 439 irda_qos_bits_to_value(&self->qos_rx);
440 440
441 irlap_adjust_qos_settings(&self->qos_tx); 441 irlap_adjust_qos_settings(&self->qos_tx);
442 442
443 IRDA_DEBUG(2, "Setting BAUD_RATE to %d bps.\n", 443 IRDA_DEBUG(2, "Setting BAUD_RATE to %d bps.\n",
444 self->qos_tx.baud_rate.value); 444 self->qos_tx.baud_rate.value);
445 IRDA_DEBUG(2, "Setting DATA_SIZE to %d bytes\n", 445 IRDA_DEBUG(2, "Setting DATA_SIZE to %d bytes\n",
446 self->qos_tx.data_size.value); 446 self->qos_tx.data_size.value);
447 IRDA_DEBUG(2, "Setting WINDOW_SIZE to %d\n", 447 IRDA_DEBUG(2, "Setting WINDOW_SIZE to %d\n",
448 self->qos_tx.window_size.value); 448 self->qos_tx.window_size.value);
449 IRDA_DEBUG(2, "Setting XBOFS to %d\n", 449 IRDA_DEBUG(2, "Setting XBOFS to %d\n",
450 self->qos_tx.additional_bofs.value); 450 self->qos_tx.additional_bofs.value);
451 IRDA_DEBUG(2, "Setting MAX_TURN_TIME to %d ms.\n", 451 IRDA_DEBUG(2, "Setting MAX_TURN_TIME to %d ms.\n",
452 self->qos_tx.max_turn_time.value); 452 self->qos_tx.max_turn_time.value);
453 IRDA_DEBUG(2, "Setting MIN_TURN_TIME to %d usecs.\n", 453 IRDA_DEBUG(2, "Setting MIN_TURN_TIME to %d usecs.\n",
454 self->qos_tx.min_turn_time.value); 454 self->qos_tx.min_turn_time.value);
455 IRDA_DEBUG(2, "Setting LINK_DISC to %d secs.\n", 455 IRDA_DEBUG(2, "Setting LINK_DISC to %d secs.\n",
456 self->qos_tx.link_disc_time.value); 456 self->qos_tx.link_disc_time.value);
457 return ret; 457 return ret;
458} 458}
@@ -463,55 +463,55 @@ int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb)
463 * Insert QoS negotiaion pararameters into frame 463 * Insert QoS negotiaion pararameters into frame
464 * 464 *
465 */ 465 */
466int irlap_insert_qos_negotiation_params(struct irlap_cb *self, 466int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
467 struct sk_buff *skb) 467 struct sk_buff *skb)
468{ 468{
469 int ret; 469 int ret;
470 470
471 /* Insert data rate */ 471 /* Insert data rate */
472 ret = irda_param_insert(self, PI_BAUD_RATE, skb->tail, 472 ret = irda_param_insert(self, PI_BAUD_RATE, skb->tail,
473 skb_tailroom(skb), &irlap_param_info); 473 skb_tailroom(skb), &irlap_param_info);
474 if (ret < 0) 474 if (ret < 0)
475 return ret; 475 return ret;
476 skb_put(skb, ret); 476 skb_put(skb, ret);
477 477
478 /* Insert max turnaround time */ 478 /* Insert max turnaround time */
479 ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb->tail, 479 ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb->tail,
480 skb_tailroom(skb), &irlap_param_info); 480 skb_tailroom(skb), &irlap_param_info);
481 if (ret < 0) 481 if (ret < 0)
482 return ret; 482 return ret;
483 skb_put(skb, ret); 483 skb_put(skb, ret);
484 484
485 /* Insert data size */ 485 /* Insert data size */
486 ret = irda_param_insert(self, PI_DATA_SIZE, skb->tail, 486 ret = irda_param_insert(self, PI_DATA_SIZE, skb->tail,
487 skb_tailroom(skb), &irlap_param_info); 487 skb_tailroom(skb), &irlap_param_info);
488 if (ret < 0) 488 if (ret < 0)
489 return ret; 489 return ret;
490 skb_put(skb, ret); 490 skb_put(skb, ret);
491 491
492 /* Insert window size */ 492 /* Insert window size */
493 ret = irda_param_insert(self, PI_WINDOW_SIZE, skb->tail, 493 ret = irda_param_insert(self, PI_WINDOW_SIZE, skb->tail,
494 skb_tailroom(skb), &irlap_param_info); 494 skb_tailroom(skb), &irlap_param_info);
495 if (ret < 0) 495 if (ret < 0)
496 return ret; 496 return ret;
497 skb_put(skb, ret); 497 skb_put(skb, ret);
498 498
499 /* Insert additional BOFs */ 499 /* Insert additional BOFs */
500 ret = irda_param_insert(self, PI_ADD_BOFS, skb->tail, 500 ret = irda_param_insert(self, PI_ADD_BOFS, skb->tail,
501 skb_tailroom(skb), &irlap_param_info); 501 skb_tailroom(skb), &irlap_param_info);
502 if (ret < 0) 502 if (ret < 0)
503 return ret; 503 return ret;
504 skb_put(skb, ret); 504 skb_put(skb, ret);
505 505
506 /* Insert minimum turnaround time */ 506 /* Insert minimum turnaround time */
507 ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb->tail, 507 ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb->tail,
508 skb_tailroom(skb), &irlap_param_info); 508 skb_tailroom(skb), &irlap_param_info);
509 if (ret < 0) 509 if (ret < 0)
510 return ret; 510 return ret;
511 skb_put(skb, ret); 511 skb_put(skb, ret);
512 512
513 /* Insert link disconnect/threshold time */ 513 /* Insert link disconnect/threshold time */
514 ret = irda_param_insert(self, PI_LINK_DISC, skb->tail, 514 ret = irda_param_insert(self, PI_LINK_DISC, skb->tail,
515 skb_tailroom(skb), &irlap_param_info); 515 skb_tailroom(skb), &irlap_param_info);
516 if (ret < 0) 516 if (ret < 0)
517 return ret; 517 return ret;
@@ -537,12 +537,12 @@ static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get)
537 537
538 if (get) { 538 if (get) {
539 param->pv.i = self->qos_rx.baud_rate.bits; 539 param->pv.i = self->qos_rx.baud_rate.bits;
540 IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n", 540 IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n",
541 __FUNCTION__, param->pv.i); 541 __FUNCTION__, param->pv.i);
542 } else { 542 } else {
543 /* 543 /*
544 * Stations must agree on baud rate, so calculate 544 * Stations must agree on baud rate, so calculate
545 * intersection 545 * intersection
546 */ 546 */
547 IRDA_DEBUG(2, "Requested BAUD_RATE: 0x%04x\n", (__u16) param->pv.i); 547 IRDA_DEBUG(2, "Requested BAUD_RATE: 0x%04x\n", (__u16) param->pv.i);
548 final = (__u16) param->pv.i & self->qos_rx.baud_rate.bits; 548 final = (__u16) param->pv.i & self->qos_rx.baud_rate.bits;
@@ -558,24 +558,24 @@ static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get)
558/* 558/*
559 * Function irlap_param_link_disconnect (instance, param, get) 559 * Function irlap_param_link_disconnect (instance, param, get)
560 * 560 *
561 * Negotiate link disconnect/threshold time. 561 * Negotiate link disconnect/threshold time.
562 * 562 *
563 */ 563 */
564static int irlap_param_link_disconnect(void *instance, irda_param_t *param, 564static int irlap_param_link_disconnect(void *instance, irda_param_t *param,
565 int get) 565 int get)
566{ 566{
567 __u16 final; 567 __u16 final;
568 568
569 struct irlap_cb *self = (struct irlap_cb *) instance; 569 struct irlap_cb *self = (struct irlap_cb *) instance;
570 570
571 IRDA_ASSERT(self != NULL, return -1;); 571 IRDA_ASSERT(self != NULL, return -1;);
572 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 572 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
573 573
574 if (get) 574 if (get)
575 param->pv.i = self->qos_rx.link_disc_time.bits; 575 param->pv.i = self->qos_rx.link_disc_time.bits;
576 else { 576 else {
577 /* 577 /*
578 * Stations must agree on link disconnect/threshold 578 * Stations must agree on link disconnect/threshold
579 * time. 579 * time.
580 */ 580 */
581 IRDA_DEBUG(2, "LINK_DISC: %02x\n", (__u8) param->pv.i); 581 IRDA_DEBUG(2, "LINK_DISC: %02x\n", (__u8) param->pv.i);
@@ -595,14 +595,14 @@ static int irlap_param_link_disconnect(void *instance, irda_param_t *param,
595 * will be negotiated independently for each station 595 * will be negotiated independently for each station
596 * 596 *
597 */ 597 */
598static int irlap_param_max_turn_time(void *instance, irda_param_t *param, 598static int irlap_param_max_turn_time(void *instance, irda_param_t *param,
599 int get) 599 int get)
600{ 600{
601 struct irlap_cb *self = (struct irlap_cb *) instance; 601 struct irlap_cb *self = (struct irlap_cb *) instance;
602 602
603 IRDA_ASSERT(self != NULL, return -1;); 603 IRDA_ASSERT(self != NULL, return -1;);
604 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 604 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
605 605
606 if (get) 606 if (get)
607 param->pv.i = self->qos_rx.max_turn_time.bits; 607 param->pv.i = self->qos_rx.max_turn_time.bits;
608 else 608 else
@@ -621,10 +621,10 @@ static int irlap_param_max_turn_time(void *instance, irda_param_t *param,
621static int irlap_param_data_size(void *instance, irda_param_t *param, int get) 621static int irlap_param_data_size(void *instance, irda_param_t *param, int get)
622{ 622{
623 struct irlap_cb *self = (struct irlap_cb *) instance; 623 struct irlap_cb *self = (struct irlap_cb *) instance;
624 624
625 IRDA_ASSERT(self != NULL, return -1;); 625 IRDA_ASSERT(self != NULL, return -1;);
626 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 626 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
627 627
628 if (get) 628 if (get)
629 param->pv.i = self->qos_rx.data_size.bits; 629 param->pv.i = self->qos_rx.data_size.bits;
630 else 630 else
@@ -640,14 +640,14 @@ static int irlap_param_data_size(void *instance, irda_param_t *param, int get)
640 * will be negotiated independently for each station 640 * will be negotiated independently for each station
641 * 641 *
642 */ 642 */
643static int irlap_param_window_size(void *instance, irda_param_t *param, 643static int irlap_param_window_size(void *instance, irda_param_t *param,
644 int get) 644 int get)
645{ 645{
646 struct irlap_cb *self = (struct irlap_cb *) instance; 646 struct irlap_cb *self = (struct irlap_cb *) instance;
647 647
648 IRDA_ASSERT(self != NULL, return -1;); 648 IRDA_ASSERT(self != NULL, return -1;);
649 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 649 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
650 650
651 if (get) 651 if (get)
652 param->pv.i = self->qos_rx.window_size.bits; 652 param->pv.i = self->qos_rx.window_size.bits;
653 else 653 else
@@ -665,10 +665,10 @@ static int irlap_param_window_size(void *instance, irda_param_t *param,
665static int irlap_param_additional_bofs(void *instance, irda_param_t *param, int get) 665static int irlap_param_additional_bofs(void *instance, irda_param_t *param, int get)
666{ 666{
667 struct irlap_cb *self = (struct irlap_cb *) instance; 667 struct irlap_cb *self = (struct irlap_cb *) instance;
668 668
669 IRDA_ASSERT(self != NULL, return -1;); 669 IRDA_ASSERT(self != NULL, return -1;);
670 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 670 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
671 671
672 if (get) 672 if (get)
673 param->pv.i = self->qos_rx.additional_bofs.bits; 673 param->pv.i = self->qos_rx.additional_bofs.bits;
674 else 674 else
@@ -683,14 +683,14 @@ static int irlap_param_additional_bofs(void *instance, irda_param_t *param, int
683 * Negotiate the minimum turn around time. This is a type 1 parameter and 683 * Negotiate the minimum turn around time. This is a type 1 parameter and
684 * will be negotiated independently for each station 684 * will be negotiated independently for each station
685 */ 685 */
686static int irlap_param_min_turn_time(void *instance, irda_param_t *param, 686static int irlap_param_min_turn_time(void *instance, irda_param_t *param,
687 int get) 687 int get)
688{ 688{
689 struct irlap_cb *self = (struct irlap_cb *) instance; 689 struct irlap_cb *self = (struct irlap_cb *) instance;
690 690
691 IRDA_ASSERT(self != NULL, return -1;); 691 IRDA_ASSERT(self != NULL, return -1;);
692 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); 692 IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
693 693
694 if (get) 694 if (get)
695 param->pv.i = self->qos_rx.min_turn_time.bits; 695 param->pv.i = self->qos_rx.min_turn_time.bits;
696 else 696 else
@@ -721,9 +721,9 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time)
721 721
722 line_capacity = max_line_capacities[i][j]; 722 line_capacity = max_line_capacities[i][j];
723 723
724 IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n", 724 IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n",
725 __FUNCTION__, line_capacity); 725 __FUNCTION__, line_capacity);
726 726
727 return line_capacity; 727 return line_capacity;
728} 728}
729 729
@@ -749,7 +749,7 @@ void irda_qos_bits_to_value(struct qos_info *qos)
749 int index; 749 int index;
750 750
751 IRDA_ASSERT(qos != NULL, return;); 751 IRDA_ASSERT(qos != NULL, return;);
752 752
753 index = msb_index(qos->baud_rate.bits); 753 index = msb_index(qos->baud_rate.bits);
754 qos->baud_rate.value = baud_rates[index]; 754 qos->baud_rate.value = baud_rates[index];
755 755
@@ -761,13 +761,13 @@ void irda_qos_bits_to_value(struct qos_info *qos)
761 761
762 index = msb_index(qos->min_turn_time.bits); 762 index = msb_index(qos->min_turn_time.bits);
763 qos->min_turn_time.value = min_turn_times[index]; 763 qos->min_turn_time.value = min_turn_times[index];
764 764
765 index = msb_index(qos->max_turn_time.bits); 765 index = msb_index(qos->max_turn_time.bits);
766 qos->max_turn_time.value = max_turn_times[index]; 766 qos->max_turn_time.value = max_turn_times[index];
767 767
768 index = msb_index(qos->link_disc_time.bits); 768 index = msb_index(qos->link_disc_time.bits);
769 qos->link_disc_time.value = link_disc_times[index]; 769 qos->link_disc_time.value = link_disc_times[index];
770 770
771 index = msb_index(qos->additional_bofs.bits); 771 index = msb_index(qos->additional_bofs.bits);
772 qos->additional_bofs.value = add_bofs[index]; 772 qos->additional_bofs.value = add_bofs[index];
773} 773}
diff --git a/net/irda/timer.c b/net/irda/timer.c
index 3871a2b911f9..d3a6ee8cc4a2 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -1,25 +1,25 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Filename: timer.c 3 * Filename: timer.c
4 * Version: 4 * Version:
5 * Description: 5 * Description:
6 * Status: Experimental. 6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no> 7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sat Aug 16 00:59:29 1997 8 * Created at: Sat Aug 16 00:59:29 1997
9 * Modified at: Wed Dec 8 12:50:34 1999 9 * Modified at: Wed Dec 8 12:50:34 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no> 10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * 11 *
12 * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>, 12 * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
13 * All Rights Reserved. 13 * All Rights Reserved.
14 * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com> 14 * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 19 * the License, or (at your option) any later version.
20 * 20 *
21 * Neither Dag Brattli nor University of Tromsø admit liability nor 21 * Neither Dag Brattli nor University of Tromsø admit liability nor
22 * provide warranty for any of this software. This material is 22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge. 23 * provided "AS-IS" and at no charge.
24 * 24 *
25 ********************************************************************/ 25 ********************************************************************/
@@ -40,11 +40,11 @@ static void irlap_query_timer_expired(void* data);
40static void irlap_final_timer_expired(void* data); 40static void irlap_final_timer_expired(void* data);
41static void irlap_wd_timer_expired(void* data); 41static void irlap_wd_timer_expired(void* data);
42static void irlap_backoff_timer_expired(void* data); 42static void irlap_backoff_timer_expired(void* data);
43static void irlap_media_busy_expired(void* data); 43static void irlap_media_busy_expired(void* data);
44 44
45void irlap_start_slot_timer(struct irlap_cb *self, int timeout) 45void irlap_start_slot_timer(struct irlap_cb *self, int timeout)
46{ 46{
47 irda_start_timer(&self->slot_timer, timeout, (void *) self, 47 irda_start_timer(&self->slot_timer, timeout, (void *) self,
48 irlap_slot_timer_expired); 48 irlap_slot_timer_expired);
49} 49}
50 50
@@ -67,31 +67,31 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
67 /* Set or re-set the timer. We reset the timer for each received 67 /* Set or re-set the timer. We reset the timer for each received
68 * discovery query, which allow us to automatically adjust to 68 * discovery query, which allow us to automatically adjust to
69 * the speed of the peer discovery (faster or slower). Jean II */ 69 * the speed of the peer discovery (faster or slower). Jean II */
70 irda_start_timer( &self->query_timer, timeout, (void *) self, 70 irda_start_timer( &self->query_timer, timeout, (void *) self,
71 irlap_query_timer_expired); 71 irlap_query_timer_expired);
72} 72}
73 73
74void irlap_start_final_timer(struct irlap_cb *self, int timeout) 74void irlap_start_final_timer(struct irlap_cb *self, int timeout)
75{ 75{
76 irda_start_timer(&self->final_timer, timeout, (void *) self, 76 irda_start_timer(&self->final_timer, timeout, (void *) self,
77 irlap_final_timer_expired); 77 irlap_final_timer_expired);
78} 78}
79 79
80void irlap_start_wd_timer(struct irlap_cb *self, int timeout) 80void irlap_start_wd_timer(struct irlap_cb *self, int timeout)
81{ 81{
82 irda_start_timer(&self->wd_timer, timeout, (void *) self, 82 irda_start_timer(&self->wd_timer, timeout, (void *) self,
83 irlap_wd_timer_expired); 83 irlap_wd_timer_expired);
84} 84}
85 85
86void irlap_start_backoff_timer(struct irlap_cb *self, int timeout) 86void irlap_start_backoff_timer(struct irlap_cb *self, int timeout)
87{ 87{
88 irda_start_timer(&self->backoff_timer, timeout, (void *) self, 88 irda_start_timer(&self->backoff_timer, timeout, (void *) self,
89 irlap_backoff_timer_expired); 89 irlap_backoff_timer_expired);
90} 90}
91 91
92void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout) 92void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout)
93{ 93{
94 irda_start_timer(&self->media_busy_timer, timeout, 94 irda_start_timer(&self->media_busy_timer, timeout,
95 (void *) self, irlap_media_busy_expired); 95 (void *) self, irlap_media_busy_expired);
96} 96}
97 97
@@ -109,25 +109,25 @@ void irlap_stop_mbusy_timer(struct irlap_cb *self)
109 irlap_do_event(self, MEDIA_BUSY_TIMER_EXPIRED, NULL, NULL); 109 irlap_do_event(self, MEDIA_BUSY_TIMER_EXPIRED, NULL, NULL);
110} 110}
111 111
112void irlmp_start_watchdog_timer(struct lsap_cb *self, int timeout) 112void irlmp_start_watchdog_timer(struct lsap_cb *self, int timeout)
113{ 113{
114 irda_start_timer(&self->watchdog_timer, timeout, (void *) self, 114 irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
115 irlmp_watchdog_timer_expired); 115 irlmp_watchdog_timer_expired);
116} 116}
117 117
118void irlmp_start_discovery_timer(struct irlmp_cb *self, int timeout) 118void irlmp_start_discovery_timer(struct irlmp_cb *self, int timeout)
119{ 119{
120 irda_start_timer(&self->discovery_timer, timeout, (void *) self, 120 irda_start_timer(&self->discovery_timer, timeout, (void *) self,
121 irlmp_discovery_timer_expired); 121 irlmp_discovery_timer_expired);
122} 122}
123 123
124void irlmp_start_idle_timer(struct lap_cb *self, int timeout) 124void irlmp_start_idle_timer(struct lap_cb *self, int timeout)
125{ 125{
126 irda_start_timer(&self->idle_timer, timeout, (void *) self, 126 irda_start_timer(&self->idle_timer, timeout, (void *) self,
127 irlmp_idle_timer_expired); 127 irlmp_idle_timer_expired);
128} 128}
129 129
130void irlmp_stop_idle_timer(struct lap_cb *self) 130void irlmp_stop_idle_timer(struct lap_cb *self)
131{ 131{
132 /* If timer is activated, kill it! */ 132 /* If timer is activated, kill it! */
133 del_timer(&self->idle_timer); 133 del_timer(&self->idle_timer);
@@ -147,7 +147,7 @@ static void irlap_slot_timer_expired(void *data)
147 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 147 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
148 148
149 irlap_do_event(self, SLOT_TIMER_EXPIRED, NULL, NULL); 149 irlap_do_event(self, SLOT_TIMER_EXPIRED, NULL, NULL);
150} 150}
151 151
152/* 152/*
153 * Function irlap_query_timer_expired (data) 153 * Function irlap_query_timer_expired (data)
@@ -163,12 +163,12 @@ static void irlap_query_timer_expired(void *data)
163 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 163 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
164 164
165 irlap_do_event(self, QUERY_TIMER_EXPIRED, NULL, NULL); 165 irlap_do_event(self, QUERY_TIMER_EXPIRED, NULL, NULL);
166} 166}
167 167
168/* 168/*
169 * Function irda_final_timer_expired (data) 169 * Function irda_final_timer_expired (data)
170 * 170 *
171 * 171 *
172 * 172 *
173 */ 173 */
174static void irlap_final_timer_expired(void *data) 174static void irlap_final_timer_expired(void *data)
@@ -184,32 +184,32 @@ static void irlap_final_timer_expired(void *data)
184/* 184/*
185 * Function irda_wd_timer_expired (data) 185 * Function irda_wd_timer_expired (data)
186 * 186 *
187 * 187 *
188 * 188 *
189 */ 189 */
190static void irlap_wd_timer_expired(void *data) 190static void irlap_wd_timer_expired(void *data)
191{ 191{
192 struct irlap_cb *self = (struct irlap_cb *) data; 192 struct irlap_cb *self = (struct irlap_cb *) data;
193 193
194 IRDA_ASSERT(self != NULL, return;); 194 IRDA_ASSERT(self != NULL, return;);
195 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 195 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
196 196
197 irlap_do_event(self, WD_TIMER_EXPIRED, NULL, NULL); 197 irlap_do_event(self, WD_TIMER_EXPIRED, NULL, NULL);
198} 198}
199 199
200/* 200/*
201 * Function irda_backoff_timer_expired (data) 201 * Function irda_backoff_timer_expired (data)
202 * 202 *
203 * 203 *
204 * 204 *
205 */ 205 */
206static void irlap_backoff_timer_expired(void *data) 206static void irlap_backoff_timer_expired(void *data)
207{ 207{
208 struct irlap_cb *self = (struct irlap_cb *) data; 208 struct irlap_cb *self = (struct irlap_cb *) data;
209 209
210 IRDA_ASSERT(self != NULL, return;); 210 IRDA_ASSERT(self != NULL, return;);
211 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 211 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
212 212
213 irlap_do_event(self, BACKOFF_TIMER_EXPIRED, NULL, NULL); 213 irlap_do_event(self, BACKOFF_TIMER_EXPIRED, NULL, NULL);
214} 214}
215 215
@@ -217,7 +217,7 @@ static void irlap_backoff_timer_expired(void *data)
217/* 217/*
218 * Function irtty_media_busy_expired (data) 218 * Function irtty_media_busy_expired (data)
219 * 219 *
220 * 220 *
221 */ 221 */
222void irlap_media_busy_expired(void* data) 222void irlap_media_busy_expired(void* data)
223{ 223{
diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c
index 87130c1c8693..5abfb71aae8d 100644
--- a/net/irda/wrapper.c
+++ b/net/irda/wrapper.c
@@ -295,7 +295,7 @@ async_unwrap_bof(struct net_device *dev,
295 case OUTSIDE_FRAME: 295 case OUTSIDE_FRAME:
296 case BEGIN_FRAME: 296 case BEGIN_FRAME:
297 default: 297 default:
298 /* We may receive multiple BOF at the start of frame */ 298 /* We may receive multiple BOF at the start of frame */
299 break; 299 break;
300 } 300 }
301 301
diff --git a/net/key/af_key.c b/net/key/af_key.c
index b4e444063d1f..f3a026ff9b2c 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -152,7 +152,7 @@ static int pfkey_create(struct socket *sock, int protocol)
152 sk = sk_alloc(PF_KEY, GFP_KERNEL, &key_proto, 1); 152 sk = sk_alloc(PF_KEY, GFP_KERNEL, &key_proto, 1);
153 if (sk == NULL) 153 if (sk == NULL)
154 goto out; 154 goto out;
155 155
156 sock->ops = &pfkey_ops; 156 sock->ops = &pfkey_ops;
157 sock_init_data(sock, sk); 157 sock_init_data(sock, sk);
158 158
@@ -487,7 +487,7 @@ static int parse_exthdrs(struct sk_buff *skb, struct sadb_msg *hdr, void **ext_h
487 ext_type == SADB_X_EXT_NAT_T_OA) { 487 ext_type == SADB_X_EXT_NAT_T_OA) {
488 if (verify_address_len(p)) 488 if (verify_address_len(p))
489 return -EINVAL; 489 return -EINVAL;
490 } 490 }
491 if (ext_type == SADB_X_EXT_SEC_CTX) { 491 if (ext_type == SADB_X_EXT_SEC_CTX) {
492 if (verify_sec_ctx_len(p)) 492 if (verify_sec_ctx_len(p))
493 return -EINVAL; 493 return -EINVAL;
@@ -556,12 +556,12 @@ static int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr,
556{ 556{
557 switch (((struct sockaddr*)(addr + 1))->sa_family) { 557 switch (((struct sockaddr*)(addr + 1))->sa_family) {
558 case AF_INET: 558 case AF_INET:
559 xaddr->a4 = 559 xaddr->a4 =
560 ((struct sockaddr_in *)(addr + 1))->sin_addr.s_addr; 560 ((struct sockaddr_in *)(addr + 1))->sin_addr.s_addr;
561 return AF_INET; 561 return AF_INET;
562#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 562#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
563 case AF_INET6: 563 case AF_INET6:
564 memcpy(xaddr->a6, 564 memcpy(xaddr->a6,
565 &((struct sockaddr_in6 *)(addr + 1))->sin6_addr, 565 &((struct sockaddr_in6 *)(addr + 1))->sin6_addr,
566 sizeof(struct in6_addr)); 566 sizeof(struct in6_addr));
567 return AF_INET6; 567 return AF_INET6;
@@ -659,11 +659,11 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
659 659
660 /* base, SA, (lifetime (HSC),) address(SD), (address(P),) 660 /* base, SA, (lifetime (HSC),) address(SD), (address(P),)
661 key(AE), (identity(SD),) (sensitivity)> */ 661 key(AE), (identity(SD),) (sensitivity)> */
662 size = sizeof(struct sadb_msg) +sizeof(struct sadb_sa) + 662 size = sizeof(struct sadb_msg) +sizeof(struct sadb_sa) +
663 sizeof(struct sadb_lifetime) + 663 sizeof(struct sadb_lifetime) +
664 ((hsc & 1) ? sizeof(struct sadb_lifetime) : 0) + 664 ((hsc & 1) ? sizeof(struct sadb_lifetime) : 0) +
665 ((hsc & 2) ? sizeof(struct sadb_lifetime) : 0) + 665 ((hsc & 2) ? sizeof(struct sadb_lifetime) : 0) +
666 sizeof(struct sadb_address)*2 + 666 sizeof(struct sadb_address)*2 +
667 sockaddr_size*2 + 667 sockaddr_size*2 +
668 sizeof(struct sadb_x_sa2); 668 sizeof(struct sadb_x_sa2);
669 669
@@ -685,13 +685,13 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
685 685
686 if (add_keys) { 686 if (add_keys) {
687 if (x->aalg && x->aalg->alg_key_len) { 687 if (x->aalg && x->aalg->alg_key_len) {
688 auth_key_size = 688 auth_key_size =
689 PFKEY_ALIGN8((x->aalg->alg_key_len + 7) / 8); 689 PFKEY_ALIGN8((x->aalg->alg_key_len + 7) / 8);
690 size += sizeof(struct sadb_key) + auth_key_size; 690 size += sizeof(struct sadb_key) + auth_key_size;
691 } 691 }
692 if (x->ealg && x->ealg->alg_key_len) { 692 if (x->ealg && x->ealg->alg_key_len) {
693 encrypt_key_size = 693 encrypt_key_size =
694 PFKEY_ALIGN8((x->ealg->alg_key_len+7) / 8); 694 PFKEY_ALIGN8((x->ealg->alg_key_len+7) / 8);
695 size += sizeof(struct sadb_key) + encrypt_key_size; 695 size += sizeof(struct sadb_key) + encrypt_key_size;
696 } 696 }
697 } 697 }
@@ -758,7 +758,7 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
758 758
759 /* hard time */ 759 /* hard time */
760 if (hsc & 2) { 760 if (hsc & 2) {
761 lifetime = (struct sadb_lifetime *) skb_put(skb, 761 lifetime = (struct sadb_lifetime *) skb_put(skb,
762 sizeof(struct sadb_lifetime)); 762 sizeof(struct sadb_lifetime));
763 lifetime->sadb_lifetime_len = 763 lifetime->sadb_lifetime_len =
764 sizeof(struct sadb_lifetime)/sizeof(uint64_t); 764 sizeof(struct sadb_lifetime)/sizeof(uint64_t);
@@ -770,7 +770,7 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
770 } 770 }
771 /* soft time */ 771 /* soft time */
772 if (hsc & 1) { 772 if (hsc & 1) {
773 lifetime = (struct sadb_lifetime *) skb_put(skb, 773 lifetime = (struct sadb_lifetime *) skb_put(skb,
774 sizeof(struct sadb_lifetime)); 774 sizeof(struct sadb_lifetime));
775 lifetime->sadb_lifetime_len = 775 lifetime->sadb_lifetime_len =
776 sizeof(struct sadb_lifetime)/sizeof(uint64_t); 776 sizeof(struct sadb_lifetime)/sizeof(uint64_t);
@@ -791,16 +791,16 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
791 lifetime->sadb_lifetime_addtime = x->curlft.add_time; 791 lifetime->sadb_lifetime_addtime = x->curlft.add_time;
792 lifetime->sadb_lifetime_usetime = x->curlft.use_time; 792 lifetime->sadb_lifetime_usetime = x->curlft.use_time;
793 /* src address */ 793 /* src address */
794 addr = (struct sadb_address*) skb_put(skb, 794 addr = (struct sadb_address*) skb_put(skb,
795 sizeof(struct sadb_address)+sockaddr_size); 795 sizeof(struct sadb_address)+sockaddr_size);
796 addr->sadb_address_len = 796 addr->sadb_address_len =
797 (sizeof(struct sadb_address)+sockaddr_size)/ 797 (sizeof(struct sadb_address)+sockaddr_size)/
798 sizeof(uint64_t); 798 sizeof(uint64_t);
799 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 799 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
800 /* "if the ports are non-zero, then the sadb_address_proto field, 800 /* "if the ports are non-zero, then the sadb_address_proto field,
801 normally zero, MUST be filled in with the transport 801 normally zero, MUST be filled in with the transport
802 protocol's number." - RFC2367 */ 802 protocol's number." - RFC2367 */
803 addr->sadb_address_proto = 0; 803 addr->sadb_address_proto = 0;
804 addr->sadb_address_reserved = 0; 804 addr->sadb_address_reserved = 0;
805 if (x->props.family == AF_INET) { 805 if (x->props.family == AF_INET) {
806 addr->sadb_address_prefixlen = 32; 806 addr->sadb_address_prefixlen = 32;
@@ -813,29 +813,29 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
813 } 813 }
814#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 814#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
815 else if (x->props.family == AF_INET6) { 815 else if (x->props.family == AF_INET6) {
816 addr->sadb_address_prefixlen = 128; 816 addr->sadb_address_prefixlen = 128;
817 817
818 sin6 = (struct sockaddr_in6 *) (addr + 1); 818 sin6 = (struct sockaddr_in6 *) (addr + 1);
819 sin6->sin6_family = AF_INET6; 819 sin6->sin6_family = AF_INET6;
820 sin6->sin6_port = 0; 820 sin6->sin6_port = 0;
821 sin6->sin6_flowinfo = 0; 821 sin6->sin6_flowinfo = 0;
822 memcpy(&sin6->sin6_addr, x->props.saddr.a6, 822 memcpy(&sin6->sin6_addr, x->props.saddr.a6,
823 sizeof(struct in6_addr)); 823 sizeof(struct in6_addr));
824 sin6->sin6_scope_id = 0; 824 sin6->sin6_scope_id = 0;
825 } 825 }
826#endif 826#endif
827 else 827 else
828 BUG(); 828 BUG();
829 829
830 /* dst address */ 830 /* dst address */
831 addr = (struct sadb_address*) skb_put(skb, 831 addr = (struct sadb_address*) skb_put(skb,
832 sizeof(struct sadb_address)+sockaddr_size); 832 sizeof(struct sadb_address)+sockaddr_size);
833 addr->sadb_address_len = 833 addr->sadb_address_len =
834 (sizeof(struct sadb_address)+sockaddr_size)/ 834 (sizeof(struct sadb_address)+sockaddr_size)/
835 sizeof(uint64_t); 835 sizeof(uint64_t);
836 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 836 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
837 addr->sadb_address_proto = 0; 837 addr->sadb_address_proto = 0;
838 addr->sadb_address_prefixlen = 32; /* XXX */ 838 addr->sadb_address_prefixlen = 32; /* XXX */
839 addr->sadb_address_reserved = 0; 839 addr->sadb_address_reserved = 0;
840 if (x->props.family == AF_INET) { 840 if (x->props.family == AF_INET) {
841 sin = (struct sockaddr_in *) (addr + 1); 841 sin = (struct sockaddr_in *) (addr + 1);
@@ -845,9 +845,9 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
845 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 845 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
846 846
847 if (x->sel.saddr.a4 != x->props.saddr.a4) { 847 if (x->sel.saddr.a4 != x->props.saddr.a4) {
848 addr = (struct sadb_address*) skb_put(skb, 848 addr = (struct sadb_address*) skb_put(skb,
849 sizeof(struct sadb_address)+sockaddr_size); 849 sizeof(struct sadb_address)+sockaddr_size);
850 addr->sadb_address_len = 850 addr->sadb_address_len =
851 (sizeof(struct sadb_address)+sockaddr_size)/ 851 (sizeof(struct sadb_address)+sockaddr_size)/
852 sizeof(uint64_t); 852 sizeof(uint64_t);
853 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY; 853 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
@@ -876,9 +876,9 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
876 876
877 if (memcmp (x->sel.saddr.a6, x->props.saddr.a6, 877 if (memcmp (x->sel.saddr.a6, x->props.saddr.a6,
878 sizeof(struct in6_addr))) { 878 sizeof(struct in6_addr))) {
879 addr = (struct sadb_address *) skb_put(skb, 879 addr = (struct sadb_address *) skb_put(skb,
880 sizeof(struct sadb_address)+sockaddr_size); 880 sizeof(struct sadb_address)+sockaddr_size);
881 addr->sadb_address_len = 881 addr->sadb_address_len =
882 (sizeof(struct sadb_address)+sockaddr_size)/ 882 (sizeof(struct sadb_address)+sockaddr_size)/
883 sizeof(uint64_t); 883 sizeof(uint64_t);
884 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY; 884 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
@@ -902,7 +902,7 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
902 902
903 /* auth key */ 903 /* auth key */
904 if (add_keys && auth_key_size) { 904 if (add_keys && auth_key_size) {
905 key = (struct sadb_key *) skb_put(skb, 905 key = (struct sadb_key *) skb_put(skb,
906 sizeof(struct sadb_key)+auth_key_size); 906 sizeof(struct sadb_key)+auth_key_size);
907 key->sadb_key_len = (sizeof(struct sadb_key) + auth_key_size) / 907 key->sadb_key_len = (sizeof(struct sadb_key) + auth_key_size) /
908 sizeof(uint64_t); 908 sizeof(uint64_t);
@@ -913,14 +913,14 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
913 } 913 }
914 /* encrypt key */ 914 /* encrypt key */
915 if (add_keys && encrypt_key_size) { 915 if (add_keys && encrypt_key_size) {
916 key = (struct sadb_key *) skb_put(skb, 916 key = (struct sadb_key *) skb_put(skb,
917 sizeof(struct sadb_key)+encrypt_key_size); 917 sizeof(struct sadb_key)+encrypt_key_size);
918 key->sadb_key_len = (sizeof(struct sadb_key) + 918 key->sadb_key_len = (sizeof(struct sadb_key) +
919 encrypt_key_size) / sizeof(uint64_t); 919 encrypt_key_size) / sizeof(uint64_t);
920 key->sadb_key_exttype = SADB_EXT_KEY_ENCRYPT; 920 key->sadb_key_exttype = SADB_EXT_KEY_ENCRYPT;
921 key->sadb_key_bits = x->ealg->alg_key_len; 921 key->sadb_key_bits = x->ealg->alg_key_len;
922 key->sadb_key_reserved = 0; 922 key->sadb_key_reserved = 0;
923 memcpy(key + 1, x->ealg->alg_key, 923 memcpy(key + 1, x->ealg->alg_key,
924 (x->ealg->alg_key_len+7)/8); 924 (x->ealg->alg_key_len+7)/8);
925 } 925 }
926 926
@@ -979,17 +979,17 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
979 return skb; 979 return skb;
980} 980}
981 981
982static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr, 982static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
983 void **ext_hdrs) 983 void **ext_hdrs)
984{ 984{
985 struct xfrm_state *x; 985 struct xfrm_state *x;
986 struct sadb_lifetime *lifetime; 986 struct sadb_lifetime *lifetime;
987 struct sadb_sa *sa; 987 struct sadb_sa *sa;
988 struct sadb_key *key; 988 struct sadb_key *key;
989 struct sadb_x_sec_ctx *sec_ctx; 989 struct sadb_x_sec_ctx *sec_ctx;
990 uint16_t proto; 990 uint16_t proto;
991 int err; 991 int err;
992 992
993 993
994 sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1]; 994 sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
995 if (!sa || 995 if (!sa ||
@@ -1022,7 +1022,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
1022 SADB_SASTATE_MATURE and the kernel MUST return an error if this is 1022 SADB_SASTATE_MATURE and the kernel MUST return an error if this is
1023 not true. 1023 not true.
1024 1024
1025 However, KAME setkey always uses SADB_SASTATE_LARVAL. 1025 However, KAME setkey always uses SADB_SASTATE_LARVAL.
1026 Hence, we have to _ignore_ sadb_sa_state, which is also reasonable. 1026 Hence, we have to _ignore_ sadb_sa_state, which is also reasonable.
1027 */ 1027 */
1028 if (sa->sadb_sa_auth > SADB_AALG_MAX || 1028 if (sa->sadb_sa_auth > SADB_AALG_MAX ||
@@ -1144,13 +1144,13 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
1144 } 1144 }
1145 /* x->algo.flags = sa->sadb_sa_flags; */ 1145 /* x->algo.flags = sa->sadb_sa_flags; */
1146 1146
1147 x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 1147 x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
1148 &x->props.saddr); 1148 &x->props.saddr);
1149 if (!x->props.family) { 1149 if (!x->props.family) {
1150 err = -EAFNOSUPPORT; 1150 err = -EAFNOSUPPORT;
1151 goto out; 1151 goto out;
1152 } 1152 }
1153 pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1], 1153 pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1],
1154 &x->id.daddr); 1154 &x->id.daddr);
1155 1155
1156 if (ext_hdrs[SADB_X_EXT_SA2-1]) { 1156 if (ext_hdrs[SADB_X_EXT_SA2-1]) {
@@ -1410,7 +1410,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
1410 struct km_event c; 1410 struct km_event c;
1411 1411
1412 xfrm_probe_algs(); 1412 xfrm_probe_algs();
1413 1413
1414 x = pfkey_msg2xfrm_state(hdr, ext_hdrs); 1414 x = pfkey_msg2xfrm_state(hdr, ext_hdrs);
1415 if (IS_ERR(x)) 1415 if (IS_ERR(x))
1416 return PTR_ERR(x); 1416 return PTR_ERR(x);
@@ -1530,13 +1530,13 @@ static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig,
1530 auth_len *= sizeof(struct sadb_alg); 1530 auth_len *= sizeof(struct sadb_alg);
1531 auth_len += sizeof(struct sadb_supported); 1531 auth_len += sizeof(struct sadb_supported);
1532 } 1532 }
1533 1533
1534 enc_len = xfrm_count_enc_supported(); 1534 enc_len = xfrm_count_enc_supported();
1535 if (enc_len) { 1535 if (enc_len) {
1536 enc_len *= sizeof(struct sadb_alg); 1536 enc_len *= sizeof(struct sadb_alg);
1537 enc_len += sizeof(struct sadb_supported); 1537 enc_len += sizeof(struct sadb_supported);
1538 } 1538 }
1539 1539
1540 len = enc_len + auth_len + sizeof(struct sadb_msg); 1540 len = enc_len + auth_len + sizeof(struct sadb_msg);
1541 1541
1542 skb = alloc_skb(len + 16, allocation); 1542 skb = alloc_skb(len + 16, allocation);
@@ -1605,7 +1605,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
1605 } 1605 }
1606 1606
1607 xfrm_probe_algs(); 1607 xfrm_probe_algs();
1608 1608
1609 supp_skb = compose_sadb_supported(hdr, GFP_KERNEL); 1609 supp_skb = compose_sadb_supported(hdr, GFP_KERNEL);
1610 if (!supp_skb) { 1610 if (!supp_skb) {
1611 if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC) 1611 if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
@@ -1856,7 +1856,7 @@ static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
1856 1856
1857 return sizeof(struct sadb_msg) + 1857 return sizeof(struct sadb_msg) +
1858 (sizeof(struct sadb_lifetime) * 3) + 1858 (sizeof(struct sadb_lifetime) * 3) +
1859 (sizeof(struct sadb_address) * 2) + 1859 (sizeof(struct sadb_address) * 2) +
1860 (sockaddr_size * 2) + 1860 (sockaddr_size * 2) +
1861 sizeof(struct sadb_x_policy) + 1861 sizeof(struct sadb_x_policy) +
1862 (xp->xfrm_nr * sizeof(struct sadb_x_ipsecrequest)) + 1862 (xp->xfrm_nr * sizeof(struct sadb_x_ipsecrequest)) +
@@ -1904,9 +1904,9 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
1904 memset(hdr, 0, size); /* XXX do we need this ? */ 1904 memset(hdr, 0, size); /* XXX do we need this ? */
1905 1905
1906 /* src address */ 1906 /* src address */
1907 addr = (struct sadb_address*) skb_put(skb, 1907 addr = (struct sadb_address*) skb_put(skb,
1908 sizeof(struct sadb_address)+sockaddr_size); 1908 sizeof(struct sadb_address)+sockaddr_size);
1909 addr->sadb_address_len = 1909 addr->sadb_address_len =
1910 (sizeof(struct sadb_address)+sockaddr_size)/ 1910 (sizeof(struct sadb_address)+sockaddr_size)/
1911 sizeof(uint64_t); 1911 sizeof(uint64_t);
1912 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 1912 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
@@ -1936,14 +1936,14 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
1936 BUG(); 1936 BUG();
1937 1937
1938 /* dst address */ 1938 /* dst address */
1939 addr = (struct sadb_address*) skb_put(skb, 1939 addr = (struct sadb_address*) skb_put(skb,
1940 sizeof(struct sadb_address)+sockaddr_size); 1940 sizeof(struct sadb_address)+sockaddr_size);
1941 addr->sadb_address_len = 1941 addr->sadb_address_len =
1942 (sizeof(struct sadb_address)+sockaddr_size)/ 1942 (sizeof(struct sadb_address)+sockaddr_size)/
1943 sizeof(uint64_t); 1943 sizeof(uint64_t);
1944 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 1944 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
1945 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); 1945 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
1946 addr->sadb_address_prefixlen = xp->selector.prefixlen_d; 1946 addr->sadb_address_prefixlen = xp->selector.prefixlen_d;
1947 addr->sadb_address_reserved = 0; 1947 addr->sadb_address_reserved = 0;
1948 if (xp->family == AF_INET) { 1948 if (xp->family == AF_INET) {
1949 sin = (struct sockaddr_in *) (addr + 1); 1949 sin = (struct sockaddr_in *) (addr + 1);
@@ -1967,7 +1967,7 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
1967 BUG(); 1967 BUG();
1968 1968
1969 /* hard time */ 1969 /* hard time */
1970 lifetime = (struct sadb_lifetime *) skb_put(skb, 1970 lifetime = (struct sadb_lifetime *) skb_put(skb,
1971 sizeof(struct sadb_lifetime)); 1971 sizeof(struct sadb_lifetime));
1972 lifetime->sadb_lifetime_len = 1972 lifetime->sadb_lifetime_len =
1973 sizeof(struct sadb_lifetime)/sizeof(uint64_t); 1973 sizeof(struct sadb_lifetime)/sizeof(uint64_t);
@@ -1977,7 +1977,7 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
1977 lifetime->sadb_lifetime_addtime = xp->lft.hard_add_expires_seconds; 1977 lifetime->sadb_lifetime_addtime = xp->lft.hard_add_expires_seconds;
1978 lifetime->sadb_lifetime_usetime = xp->lft.hard_use_expires_seconds; 1978 lifetime->sadb_lifetime_usetime = xp->lft.hard_use_expires_seconds;
1979 /* soft time */ 1979 /* soft time */
1980 lifetime = (struct sadb_lifetime *) skb_put(skb, 1980 lifetime = (struct sadb_lifetime *) skb_put(skb,
1981 sizeof(struct sadb_lifetime)); 1981 sizeof(struct sadb_lifetime));
1982 lifetime->sadb_lifetime_len = 1982 lifetime->sadb_lifetime_len =
1983 sizeof(struct sadb_lifetime)/sizeof(uint64_t); 1983 sizeof(struct sadb_lifetime)/sizeof(uint64_t);
@@ -1987,7 +1987,7 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
1987 lifetime->sadb_lifetime_addtime = xp->lft.soft_add_expires_seconds; 1987 lifetime->sadb_lifetime_addtime = xp->lft.soft_add_expires_seconds;
1988 lifetime->sadb_lifetime_usetime = xp->lft.soft_use_expires_seconds; 1988 lifetime->sadb_lifetime_usetime = xp->lft.soft_use_expires_seconds;
1989 /* current time */ 1989 /* current time */
1990 lifetime = (struct sadb_lifetime *) skb_put(skb, 1990 lifetime = (struct sadb_lifetime *) skb_put(skb,
1991 sizeof(struct sadb_lifetime)); 1991 sizeof(struct sadb_lifetime));
1992 lifetime->sadb_lifetime_len = 1992 lifetime->sadb_lifetime_len =
1993 sizeof(struct sadb_lifetime)/sizeof(uint64_t); 1993 sizeof(struct sadb_lifetime)/sizeof(uint64_t);
@@ -2019,8 +2019,8 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
2019 req_size = sizeof(struct sadb_x_ipsecrequest); 2019 req_size = sizeof(struct sadb_x_ipsecrequest);
2020 if (t->mode == XFRM_MODE_TUNNEL) 2020 if (t->mode == XFRM_MODE_TUNNEL)
2021 req_size += ((t->encap_family == AF_INET ? 2021 req_size += ((t->encap_family == AF_INET ?
2022 sizeof(struct sockaddr_in) : 2022 sizeof(struct sockaddr_in) :
2023 sizeof(struct sockaddr_in6)) * 2); 2023 sizeof(struct sockaddr_in6)) * 2);
2024 else 2024 else
2025 size -= 2*socklen; 2025 size -= 2*socklen;
2026 rq = (void*)skb_put(skb, req_size); 2026 rq = (void*)skb_put(skb, req_size);
@@ -2150,7 +2150,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2150 XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); 2150 XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW);
2151 xp->priority = pol->sadb_x_policy_priority; 2151 xp->priority = pol->sadb_x_policy_priority;
2152 2152
2153 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 2153 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
2154 xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); 2154 xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
2155 if (!xp->family) { 2155 if (!xp->family) {
2156 err = -EINVAL; 2156 err = -EINVAL;
@@ -2163,7 +2163,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2163 if (xp->selector.sport) 2163 if (xp->selector.sport)
2164 xp->selector.sport_mask = htons(0xffff); 2164 xp->selector.sport_mask = htons(0xffff);
2165 2165
2166 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], 2166 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1],
2167 pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); 2167 pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr);
2168 xp->selector.prefixlen_d = sa->sadb_address_prefixlen; 2168 xp->selector.prefixlen_d = sa->sadb_address_prefixlen;
2169 2169
@@ -2224,7 +2224,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2224 2224
2225 if (hdr->sadb_msg_type == SADB_X_SPDUPDATE) 2225 if (hdr->sadb_msg_type == SADB_X_SPDUPDATE)
2226 c.event = XFRM_MSG_UPDPOLICY; 2226 c.event = XFRM_MSG_UPDPOLICY;
2227 else 2227 else
2228 c.event = XFRM_MSG_NEWPOLICY; 2228 c.event = XFRM_MSG_NEWPOLICY;
2229 2229
2230 c.seq = hdr->sadb_msg_seq; 2230 c.seq = hdr->sadb_msg_seq;
@@ -2261,7 +2261,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2261 2261
2262 memset(&sel, 0, sizeof(sel)); 2262 memset(&sel, 0, sizeof(sel));
2263 2263
2264 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 2264 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
2265 sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); 2265 sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
2266 sel.prefixlen_s = sa->sadb_address_prefixlen; 2266 sel.prefixlen_s = sa->sadb_address_prefixlen;
2267 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2267 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2269,7 +2269,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2269 if (sel.sport) 2269 if (sel.sport)
2270 sel.sport_mask = htons(0xffff); 2270 sel.sport_mask = htons(0xffff);
2271 2271
2272 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], 2272 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1],
2273 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); 2273 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
2274 sel.prefixlen_d = sa->sadb_address_prefixlen; 2274 sel.prefixlen_d = sa->sadb_address_prefixlen;
2275 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2275 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2743,15 +2743,15 @@ static int count_esp_combs(struct xfrm_tmpl *t)
2743 struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i); 2743 struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
2744 if (!ealg) 2744 if (!ealg)
2745 break; 2745 break;
2746 2746
2747 if (!(ealg_tmpl_set(t, ealg) && ealg->available)) 2747 if (!(ealg_tmpl_set(t, ealg) && ealg->available))
2748 continue; 2748 continue;
2749 2749
2750 for (k = 1; ; k++) { 2750 for (k = 1; ; k++) {
2751 struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k); 2751 struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
2752 if (!aalg) 2752 if (!aalg)
2753 break; 2753 break;
2754 2754
2755 if (aalg_tmpl_set(t, aalg) && aalg->available) 2755 if (aalg_tmpl_set(t, aalg) && aalg->available)
2756 sz += sizeof(struct sadb_comb); 2756 sz += sizeof(struct sadb_comb);
2757 } 2757 }
@@ -2806,10 +2806,10 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
2806 struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i); 2806 struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
2807 if (!ealg) 2807 if (!ealg)
2808 break; 2808 break;
2809 2809
2810 if (!(ealg_tmpl_set(t, ealg) && ealg->available)) 2810 if (!(ealg_tmpl_set(t, ealg) && ealg->available))
2811 continue; 2811 continue;
2812 2812
2813 for (k = 1; ; k++) { 2813 for (k = 1; ; k++) {
2814 struct sadb_comb *c; 2814 struct sadb_comb *c;
2815 struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k); 2815 struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
@@ -2941,7 +2941,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
2941 struct sadb_x_sec_ctx *sec_ctx; 2941 struct sadb_x_sec_ctx *sec_ctx;
2942 struct xfrm_sec_ctx *xfrm_ctx; 2942 struct xfrm_sec_ctx *xfrm_ctx;
2943 int ctx_size = 0; 2943 int ctx_size = 0;
2944 2944
2945 sockaddr_size = pfkey_sockaddr_size(x->props.family); 2945 sockaddr_size = pfkey_sockaddr_size(x->props.family);
2946 if (!sockaddr_size) 2946 if (!sockaddr_size)
2947 return -EINVAL; 2947 return -EINVAL;
@@ -2950,7 +2950,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
2950 (sizeof(struct sadb_address) * 2) + 2950 (sizeof(struct sadb_address) * 2) +
2951 (sockaddr_size * 2) + 2951 (sockaddr_size * 2) +
2952 sizeof(struct sadb_x_policy); 2952 sizeof(struct sadb_x_policy);
2953 2953
2954 if (x->id.proto == IPPROTO_AH) 2954 if (x->id.proto == IPPROTO_AH)
2955 size += count_ah_combs(t); 2955 size += count_ah_combs(t);
2956 else if (x->id.proto == IPPROTO_ESP) 2956 else if (x->id.proto == IPPROTO_ESP)
@@ -2964,7 +2964,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
2964 skb = alloc_skb(size + 16, GFP_ATOMIC); 2964 skb = alloc_skb(size + 16, GFP_ATOMIC);
2965 if (skb == NULL) 2965 if (skb == NULL)
2966 return -ENOMEM; 2966 return -ENOMEM;
2967 2967
2968 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); 2968 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
2969 hdr->sadb_msg_version = PF_KEY_V2; 2969 hdr->sadb_msg_version = PF_KEY_V2;
2970 hdr->sadb_msg_type = SADB_ACQUIRE; 2970 hdr->sadb_msg_type = SADB_ACQUIRE;
@@ -2976,9 +2976,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
2976 hdr->sadb_msg_pid = 0; 2976 hdr->sadb_msg_pid = 0;
2977 2977
2978 /* src address */ 2978 /* src address */
2979 addr = (struct sadb_address*) skb_put(skb, 2979 addr = (struct sadb_address*) skb_put(skb,
2980 sizeof(struct sadb_address)+sockaddr_size); 2980 sizeof(struct sadb_address)+sockaddr_size);
2981 addr->sadb_address_len = 2981 addr->sadb_address_len =
2982 (sizeof(struct sadb_address)+sockaddr_size)/ 2982 (sizeof(struct sadb_address)+sockaddr_size)/
2983 sizeof(uint64_t); 2983 sizeof(uint64_t);
2984 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 2984 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
@@ -3008,9 +3008,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3008#endif 3008#endif
3009 else 3009 else
3010 BUG(); 3010 BUG();
3011 3011
3012 /* dst address */ 3012 /* dst address */
3013 addr = (struct sadb_address*) skb_put(skb, 3013 addr = (struct sadb_address*) skb_put(skb,
3014 sizeof(struct sadb_address)+sockaddr_size); 3014 sizeof(struct sadb_address)+sockaddr_size);
3015 addr->sadb_address_len = 3015 addr->sadb_address_len =
3016 (sizeof(struct sadb_address)+sockaddr_size)/ 3016 (sizeof(struct sadb_address)+sockaddr_size)/
@@ -3019,7 +3019,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3019 addr->sadb_address_proto = 0; 3019 addr->sadb_address_proto = 0;
3020 addr->sadb_address_reserved = 0; 3020 addr->sadb_address_reserved = 0;
3021 if (x->props.family == AF_INET) { 3021 if (x->props.family == AF_INET) {
3022 addr->sadb_address_prefixlen = 32; 3022 addr->sadb_address_prefixlen = 32;
3023 3023
3024 sin = (struct sockaddr_in *) (addr + 1); 3024 sin = (struct sockaddr_in *) (addr + 1);
3025 sin->sin_family = AF_INET; 3025 sin->sin_family = AF_INET;
@@ -3029,7 +3029,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3029 } 3029 }
3030#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3030#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3031 else if (x->props.family == AF_INET6) { 3031 else if (x->props.family == AF_INET6) {
3032 addr->sadb_address_prefixlen = 128; 3032 addr->sadb_address_prefixlen = 128;
3033 3033
3034 sin6 = (struct sockaddr_in6 *) (addr + 1); 3034 sin6 = (struct sockaddr_in6 *) (addr + 1);
3035 sin6->sin6_family = AF_INET6; 3035 sin6->sin6_family = AF_INET6;
@@ -3074,7 +3074,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3074} 3074}
3075 3075
3076static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, 3076static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3077 u8 *data, int len, int *dir) 3077 u8 *data, int len, int *dir)
3078{ 3078{
3079 struct xfrm_policy *xp; 3079 struct xfrm_policy *xp;
3080 struct sadb_x_policy *pol = (struct sadb_x_policy*)data; 3080 struct sadb_x_policy *pol = (struct sadb_x_policy*)data;
@@ -3193,17 +3193,17 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3193 * HDR | SA | ADDRESS_SRC (old addr) | NAT_T_SPORT (old port) | 3193 * HDR | SA | ADDRESS_SRC (old addr) | NAT_T_SPORT (old port) |
3194 * ADDRESS_DST (new addr) | NAT_T_DPORT (new port) 3194 * ADDRESS_DST (new addr) | NAT_T_DPORT (new port)
3195 */ 3195 */
3196 3196
3197 size = sizeof(struct sadb_msg) + 3197 size = sizeof(struct sadb_msg) +
3198 sizeof(struct sadb_sa) + 3198 sizeof(struct sadb_sa) +
3199 (sizeof(struct sadb_address) * 2) + 3199 (sizeof(struct sadb_address) * 2) +
3200 (sockaddr_size * 2) + 3200 (sockaddr_size * 2) +
3201 (sizeof(struct sadb_x_nat_t_port) * 2); 3201 (sizeof(struct sadb_x_nat_t_port) * 2);
3202 3202
3203 skb = alloc_skb(size + 16, GFP_ATOMIC); 3203 skb = alloc_skb(size + 16, GFP_ATOMIC);
3204 if (skb == NULL) 3204 if (skb == NULL)
3205 return -ENOMEM; 3205 return -ENOMEM;
3206 3206
3207 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); 3207 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
3208 hdr->sadb_msg_version = PF_KEY_V2; 3208 hdr->sadb_msg_version = PF_KEY_V2;
3209 hdr->sadb_msg_type = SADB_X_NAT_T_NEW_MAPPING; 3209 hdr->sadb_msg_type = SADB_X_NAT_T_NEW_MAPPING;
@@ -3228,7 +3228,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3228 /* ADDRESS_SRC (old addr) */ 3228 /* ADDRESS_SRC (old addr) */
3229 addr = (struct sadb_address*) 3229 addr = (struct sadb_address*)
3230 skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); 3230 skb_put(skb, sizeof(struct sadb_address)+sockaddr_size);
3231 addr->sadb_address_len = 3231 addr->sadb_address_len =
3232 (sizeof(struct sadb_address)+sockaddr_size)/ 3232 (sizeof(struct sadb_address)+sockaddr_size)/
3233 sizeof(uint64_t); 3233 sizeof(uint64_t);
3234 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 3234 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
@@ -3269,7 +3269,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3269 /* ADDRESS_DST (new addr) */ 3269 /* ADDRESS_DST (new addr) */
3270 addr = (struct sadb_address*) 3270 addr = (struct sadb_address*)
3271 skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); 3271 skb_put(skb, sizeof(struct sadb_address)+sockaddr_size);
3272 addr->sadb_address_len = 3272 addr->sadb_address_len =
3273 (sizeof(struct sadb_address)+sockaddr_size)/ 3273 (sizeof(struct sadb_address)+sockaddr_size)/
3274 sizeof(uint64_t); 3274 sizeof(uint64_t);
3275 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 3275 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
@@ -3674,7 +3674,7 @@ static int pfkey_read_proc(char *buffer, char **start, off_t offset,
3674 ); 3674 );
3675 3675
3676 buffer[len++] = '\n'; 3676 buffer[len++] = '\n';
3677 3677
3678 pos = begin + len; 3678 pos = begin + len;
3679 if (pos < offset) { 3679 if (pos < offset) {
3680 len = 0; 3680 len = 0;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 7e6bc41eeb21..a2e7aa63fd8a 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -14,7 +14,7 @@
14 * LAPB 002 Jonathan Naylor New timer architecture. 14 * LAPB 002 Jonathan Naylor New timer architecture.
15 * 2000-10-29 Henner Eisen lapb_data_indication() return status. 15 * 2000-10-29 Henner Eisen lapb_data_indication() return status.
16 */ 16 */
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/types.h> 20#include <linux/types.h>
@@ -43,7 +43,7 @@ static struct list_head lapb_list = LIST_HEAD_INIT(lapb_list);
43static DEFINE_RWLOCK(lapb_list_lock); 43static DEFINE_RWLOCK(lapb_list_lock);
44 44
45/* 45/*
46 * Free an allocated lapb control block. 46 * Free an allocated lapb control block.
47 */ 47 */
48static void lapb_free_cb(struct lapb_cb *lapb) 48static void lapb_free_cb(struct lapb_cb *lapb)
49{ 49{
@@ -407,7 +407,7 @@ int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
407 return lapb->callbacks.data_indication(lapb->dev, skb); 407 return lapb->callbacks.data_indication(lapb->dev, skb);
408 408
409 kfree_skb(skb); 409 kfree_skb(skb);
410 return NET_RX_CN_HIGH; /* For now; must be != NET_RX_DROP */ 410 return NET_RX_CN_HIGH; /* For now; must be != NET_RX_DROP */
411} 411}
412 412
413int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) 413int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index 49a761bd9314..981beb5e5187 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -34,7 +34,7 @@
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <net/lapb.h> 35#include <net/lapb.h>
36 36
37/* 37/*
38 * This procedure is passed a buffer descriptor for an iframe. It builds 38 * This procedure is passed a buffer descriptor for an iframe. It builds
39 * the rest of the control part of the frame and then writes it out. 39 * the rest of the control part of the frame and then writes it out.
40 */ 40 */
@@ -66,7 +66,7 @@ static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll
66 lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr); 66 lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
67#endif 67#endif
68 68
69 lapb_transmit_buffer(lapb, skb, LAPB_COMMAND); 69 lapb_transmit_buffer(lapb, skb, LAPB_COMMAND);
70} 70}
71 71
72void lapb_kick(struct lapb_cb *lapb) 72void lapb_kick(struct lapb_cb *lapb)
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 8b5eefd70f03..b8739cf9156d 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -59,7 +59,7 @@ void lapb_frames_acked(struct lapb_cb *lapb, unsigned short nr)
59 */ 59 */
60 if (lapb->va != nr) 60 if (lapb->va != nr)
61 while (skb_peek(&lapb->ack_queue) && lapb->va != nr) { 61 while (skb_peek(&lapb->ack_queue) && lapb->va != nr) {
62 skb = skb_dequeue(&lapb->ack_queue); 62 skb = skb_dequeue(&lapb->ack_queue);
63 kfree_skb(skb); 63 kfree_skb(skb);
64 lapb->va = (lapb->va + 1) % modulus; 64 lapb->va = (lapb->va + 1) % modulus;
65 } 65 }
@@ -67,7 +67,7 @@ void lapb_frames_acked(struct lapb_cb *lapb, unsigned short nr)
67 67
68void lapb_requeue_frames(struct lapb_cb *lapb) 68void lapb_requeue_frames(struct lapb_cb *lapb)
69{ 69{
70 struct sk_buff *skb, *skb_prev = NULL; 70 struct sk_buff *skb, *skb_prev = NULL;
71 71
72 /* 72 /*
73 * Requeue all the un-ack-ed frames on the output queue to be picked 73 * Requeue all the un-ack-ed frames on the output queue to be picked
@@ -91,7 +91,7 @@ int lapb_validate_nr(struct lapb_cb *lapb, unsigned short nr)
91{ 91{
92 unsigned short vc = lapb->va; 92 unsigned short vc = lapb->va;
93 int modulus; 93 int modulus;
94 94
95 modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS; 95 modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS;
96 96
97 while (vc != lapb->vs) { 97 while (vc != lapb->vs) {
@@ -99,7 +99,7 @@ int lapb_validate_nr(struct lapb_cb *lapb, unsigned short nr)
99 return 1; 99 return 1;
100 vc = (vc + 1) % modulus; 100 vc = (vc + 1) % modulus;
101 } 101 }
102 102
103 return nr == lapb->vs; 103 return nr == lapb->vs;
104} 104}
105 105
@@ -149,7 +149,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
149 frame->cr = LAPB_RESPONSE; 149 frame->cr = LAPB_RESPONSE;
150 } 150 }
151 } 151 }
152 152
153 skb_pull(skb, 1); 153 skb_pull(skb, 1);
154 154
155 if (lapb->mode & LAPB_EXTENDED) { 155 if (lapb->mode & LAPB_EXTENDED) {
@@ -220,9 +220,9 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
220 return 0; 220 return 0;
221} 221}
222 222
223/* 223/*
224 * This routine is called when the HDLC layer internally generates a 224 * This routine is called when the HDLC layer internally generates a
225 * command or response for the remote machine ( eg. RR, UA etc. ). 225 * command or response for the remote machine ( eg. RR, UA etc. ).
226 * Only supervisory or unnumbered frames are processed, FRMRs are handled 226 * Only supervisory or unnumbered frames are processed, FRMRs are handled
227 * by lapb_transmit_frmr below. 227 * by lapb_transmit_frmr below.
228 */ 228 */
@@ -259,7 +259,7 @@ void lapb_send_control(struct lapb_cb *lapb, int frametype,
259 lapb_transmit_buffer(lapb, skb, type); 259 lapb_transmit_buffer(lapb, skb, type);
260} 260}
261 261
262/* 262/*
263 * This routine generates FRMRs based on information previously stored in 263 * This routine generates FRMRs based on information previously stored in
264 * the LAPB control block. 264 * the LAPB control block.
265 */ 265 */
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index 2c8f0f809220..af6d14b44e2e 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -101,7 +101,7 @@ static void lapb_t1timer_expiry(unsigned long param)
101 /* 101 /*
102 * Awaiting connection state, send SABM(E), up to N2 times. 102 * Awaiting connection state, send SABM(E), up to N2 times.
103 */ 103 */
104 case LAPB_STATE_1: 104 case LAPB_STATE_1:
105 if (lapb->n2count == lapb->n2) { 105 if (lapb->n2count == lapb->n2) {
106 lapb_clear_queues(lapb); 106 lapb_clear_queues(lapb);
107 lapb->state = LAPB_STATE_0; 107 lapb->state = LAPB_STATE_0;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 190bb3e05188..7d9fa38b6a7d 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -67,7 +67,7 @@ static inline u16 llc_ui_next_link_no(int sap)
67static inline __be16 llc_proto_type(u16 arphrd) 67static inline __be16 llc_proto_type(u16 arphrd)
68{ 68{
69 return arphrd == ARPHRD_IEEE802_TR ? 69 return arphrd == ARPHRD_IEEE802_TR ?
70 htons(ETH_P_TR_802_2) : htons(ETH_P_802_2); 70 htons(ETH_P_TR_802_2) : htons(ETH_P_802_2);
71} 71}
72 72
73/** 73/**
@@ -114,7 +114,7 @@ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
114 struct llc_sock* llc = llc_sk(sk); 114 struct llc_sock* llc = llc_sk(sk);
115 int rc = 0; 115 int rc = 0;
116 116
117 if (unlikely(llc_data_accept_state(llc->state) || 117 if (unlikely(llc_data_accept_state(llc->state) ||
118 llc->remote_busy_flag || 118 llc->remote_busy_flag ||
119 llc->p_flag)) { 119 llc->p_flag)) {
120 long timeout = sock_sndtimeo(sk, noblock); 120 long timeout = sock_sndtimeo(sk, noblock);
@@ -602,7 +602,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
602 int rc = -EOPNOTSUPP; 602 int rc = -EOPNOTSUPP;
603 603
604 dprintk("%s: accepting on %02X\n", __FUNCTION__, 604 dprintk("%s: accepting on %02X\n", __FUNCTION__,
605 llc_sk(sk)->laddr.lsap); 605 llc_sk(sk)->laddr.lsap);
606 lock_sock(sk); 606 lock_sock(sk);
607 if (unlikely(sk->sk_type != SOCK_STREAM)) 607 if (unlikely(sk->sk_type != SOCK_STREAM))
608 goto out; 608 goto out;
@@ -617,7 +617,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
617 goto out; 617 goto out;
618 } 618 }
619 dprintk("%s: got a new connection on %02X\n", __FUNCTION__, 619 dprintk("%s: got a new connection on %02X\n", __FUNCTION__,
620 llc_sk(sk)->laddr.lsap); 620 llc_sk(sk)->laddr.lsap);
621 skb = skb_dequeue(&sk->sk_receive_queue); 621 skb = skb_dequeue(&sk->sk_receive_queue);
622 rc = -EINVAL; 622 rc = -EINVAL;
623 if (!skb->sk) 623 if (!skb->sk)
@@ -682,7 +682,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
682 if (flags & MSG_PEEK) { 682 if (flags & MSG_PEEK) {
683 peek_seq = llc->copied_seq; 683 peek_seq = llc->copied_seq;
684 seq = &peek_seq; 684 seq = &peek_seq;
685 } 685 }
686 686
687 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 687 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
688 copied = 0; 688 copied = 0;
@@ -860,7 +860,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
860 goto release; 860 goto release;
861 skb->dev = llc->dev; 861 skb->dev = llc->dev;
862 skb->protocol = llc_proto_type(addr->sllc_arphrd); 862 skb->protocol = llc_proto_type(addr->sllc_arphrd);
863 skb_reserve(skb, hdrlen); 863 skb_reserve(skb, hdrlen);
864 rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied); 864 rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied);
865 if (rc) 865 if (rc)
866 goto out; 866 goto out;
@@ -1116,11 +1116,11 @@ static const struct proto_ops llc_ui_ops = {
1116}; 1116};
1117 1117
1118static char llc_proc_err_msg[] __initdata = 1118static char llc_proc_err_msg[] __initdata =
1119 KERN_CRIT "LLC: Unable to register the proc_fs entries\n"; 1119 KERN_CRIT "LLC: Unable to register the proc_fs entries\n";
1120static char llc_sysctl_err_msg[] __initdata = 1120static char llc_sysctl_err_msg[] __initdata =
1121 KERN_CRIT "LLC: Unable to register the sysctl entries\n"; 1121 KERN_CRIT "LLC: Unable to register the sysctl entries\n";
1122static char llc_sock_err_msg[] __initdata = 1122static char llc_sock_err_msg[] __initdata =
1123 KERN_CRIT "LLC: Unable to register the network family\n"; 1123 KERN_CRIT "LLC: Unable to register the network family\n";
1124 1124
1125static int __init llc2_init(void) 1125static int __init llc2_init(void)
1126{ 1126{
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c761c15da421..3b8cfbe029a7 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -854,7 +854,7 @@ static void llc_sk_init(struct sock* sk)
854 llc->n2 = 2; /* max retransmit */ 854 llc->n2 = 2; /* max retransmit */
855 llc->k = 2; /* tx win size, will adjust dynam */ 855 llc->k = 2; /* tx win size, will adjust dynam */
856 llc->rw = 128; /* rx win size (opt and equal to 856 llc->rw = 128; /* rx win size (opt and equal to
857 * tx_win of remote LLC) */ 857 * tx_win of remote LLC) */
858 skb_queue_head_init(&llc->pdu_unack_q); 858 skb_queue_head_init(&llc->pdu_unack_q);
859 sk->sk_backlog_rcv = llc_backlog_rcv; 859 sk->sk_backlog_rcv = llc_backlog_rcv;
860} 860}
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index db82aff6e40f..b3f65d1e80b1 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -164,7 +164,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
164 sap = llc_sap_find(pdu->dsap); 164 sap = llc_sap_find(pdu->dsap);
165 if (unlikely(!sap)) {/* unknown SAP */ 165 if (unlikely(!sap)) {/* unknown SAP */
166 dprintk("%s: llc_sap_find(%02X) failed!\n", __FUNCTION__, 166 dprintk("%s: llc_sap_find(%02X) failed!\n", __FUNCTION__,
167 pdu->dsap); 167 pdu->dsap);
168 goto drop; 168 goto drop;
169 } 169 }
170 /* 170 /*
@@ -173,9 +173,9 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
173 */ 173 */
174 rcv = rcu_dereference(sap->rcv_func); 174 rcv = rcu_dereference(sap->rcv_func);
175 if (rcv) { 175 if (rcv) {
176 struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC); 176 struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
177 if (cskb) 177 if (cskb)
178 rcv(cskb, dev, pt, orig_dev); 178 rcv(cskb, dev, pt, orig_dev);
179 } 179 }
180 dest = llc_pdu_type(skb); 180 dest = llc_pdu_type(skb);
181 if (unlikely(!dest || !llc_type_handlers[dest - 1])) 181 if (unlikely(!dest || !llc_type_handlers[dest - 1]))
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index b4e668e0e12c..f4291f349e92 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -40,7 +40,7 @@ int llc_mac_hdr_init(struct sk_buff *skb,
40 case ARPHRD_IEEE802_TR: { 40 case ARPHRD_IEEE802_TR: {
41 struct net_device *dev = skb->dev; 41 struct net_device *dev = skb->dev;
42 struct trh_hdr *trh; 42 struct trh_hdr *trh;
43 43
44 skb->mac.raw = skb_push(skb, sizeof(*trh)); 44 skb->mac.raw = skb_push(skb, sizeof(*trh));
45 trh = tr_hdr(skb); 45 trh = tr_hdr(skb);
46 trh->ac = AC; 46 trh->ac = AC;
diff --git a/net/llc/llc_pdu.c b/net/llc/llc_pdu.c
index a28ce525d201..fa8324396db3 100644
--- a/net/llc/llc_pdu.c
+++ b/net/llc/llc_pdu.c
@@ -39,7 +39,7 @@ void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value)
39 39
40 llc_pdu_decode_pdu_type(skb, &pdu_type); 40 llc_pdu_decode_pdu_type(skb, &pdu_type);
41 pdu = llc_pdu_sn_hdr(skb); 41 pdu = llc_pdu_sn_hdr(skb);
42 42
43 switch (pdu_type) { 43 switch (pdu_type) {
44 case LLC_PDU_TYPE_I: 44 case LLC_PDU_TYPE_I:
45 case LLC_PDU_TYPE_S: 45 case LLC_PDU_TYPE_S:
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 19308fece3ad..dcfe6c739471 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -142,18 +142,18 @@ out:
142} 142}
143 143
144static char *llc_conn_state_names[] = { 144static char *llc_conn_state_names[] = {
145 [LLC_CONN_STATE_ADM] = "adm", 145 [LLC_CONN_STATE_ADM] = "adm",
146 [LLC_CONN_STATE_SETUP] = "setup", 146 [LLC_CONN_STATE_SETUP] = "setup",
147 [LLC_CONN_STATE_NORMAL] = "normal", 147 [LLC_CONN_STATE_NORMAL] = "normal",
148 [LLC_CONN_STATE_BUSY] = "busy", 148 [LLC_CONN_STATE_BUSY] = "busy",
149 [LLC_CONN_STATE_REJ] = "rej", 149 [LLC_CONN_STATE_REJ] = "rej",
150 [LLC_CONN_STATE_AWAIT] = "await", 150 [LLC_CONN_STATE_AWAIT] = "await",
151 [LLC_CONN_STATE_AWAIT_BUSY] = "await_busy", 151 [LLC_CONN_STATE_AWAIT_BUSY] = "await_busy",
152 [LLC_CONN_STATE_AWAIT_REJ] = "await_rej", 152 [LLC_CONN_STATE_AWAIT_REJ] = "await_rej",
153 [LLC_CONN_STATE_D_CONN] = "d_conn", 153 [LLC_CONN_STATE_D_CONN] = "d_conn",
154 [LLC_CONN_STATE_RESET] = "reset", 154 [LLC_CONN_STATE_RESET] = "reset",
155 [LLC_CONN_STATE_ERROR] = "error", 155 [LLC_CONN_STATE_ERROR] = "error",
156 [LLC_CONN_STATE_TEMP] = "temp", 156 [LLC_CONN_STATE_TEMP] = "temp",
157}; 157};
158 158
159static int llc_seq_core_show(struct seq_file *seq, void *v) 159static int llc_seq_core_show(struct seq_file *seq, void *v)
diff --git a/net/llc/llc_s_st.c b/net/llc/llc_s_st.c
index 6a43201aa32e..135f7d80069e 100644
--- a/net/llc/llc_s_st.c
+++ b/net/llc/llc_s_st.c
@@ -175,7 +175,7 @@ struct llc_sap_state llc_sap_state_table[LLC_NR_SAP_STATES] = {
175 [LLC_SAP_STATE_INACTIVE - 1] = { 175 [LLC_SAP_STATE_INACTIVE - 1] = {
176 .curr_state = LLC_SAP_STATE_INACTIVE, 176 .curr_state = LLC_SAP_STATE_INACTIVE,
177 .transitions = llc_sap_inactive_state_transitions, 177 .transitions = llc_sap_inactive_state_transitions,
178 }, 178 },
179 [LLC_SAP_STATE_ACTIVE - 1] = { 179 [LLC_SAP_STATE_ACTIVE - 1] = {
180 .curr_state = LLC_SAP_STATE_ACTIVE, 180 .curr_state = LLC_SAP_STATE_ACTIVE,
181 .transitions = llc_sap_active_state_transitions, 181 .transitions = llc_sap_active_state_transitions,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 61cb8cf7d153..2615dc81aa36 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -201,7 +201,7 @@ static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
201 if (sock_queue_rcv_skb(skb->sk, skb)) 201 if (sock_queue_rcv_skb(skb->sk, skb))
202 kfree_skb(skb); 202 kfree_skb(skb);
203 } 203 }
204 } 204 }
205 kfree_skb(skb); 205 kfree_skb(skb);
206} 206}
207 207
@@ -215,7 +215,7 @@ static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
215 * This function is called when upper layer wants to send a TEST pdu. 215 * This function is called when upper layer wants to send a TEST pdu.
216 * Returns 0 for success, 1 otherwise. 216 * Returns 0 for success, 1 otherwise.
217 */ 217 */
218void llc_build_and_send_test_pkt(struct llc_sap *sap, 218void llc_build_and_send_test_pkt(struct llc_sap *sap,
219 struct sk_buff *skb, u8 *dmac, u8 dsap) 219 struct sk_buff *skb, u8 *dmac, u8 dsap)
220{ 220{
221 struct llc_sap_state_ev *ev = llc_sap_ev(skb); 221 struct llc_sap_state_ev *ev = llc_sap_ev(skb);
@@ -224,7 +224,7 @@ void llc_build_and_send_test_pkt(struct llc_sap *sap,
224 ev->daddr.lsap = dsap; 224 ev->daddr.lsap = dsap;
225 memcpy(ev->saddr.mac, skb->dev->dev_addr, IFHWADDRLEN); 225 memcpy(ev->saddr.mac, skb->dev->dev_addr, IFHWADDRLEN);
226 memcpy(ev->daddr.mac, dmac, IFHWADDRLEN); 226 memcpy(ev->daddr.mac, dmac, IFHWADDRLEN);
227 227
228 ev->type = LLC_SAP_EV_TYPE_PRIM; 228 ev->type = LLC_SAP_EV_TYPE_PRIM;
229 ev->prim = LLC_TEST_PRIM; 229 ev->prim = LLC_TEST_PRIM;
230 ev->prim_type = LLC_PRIM_TYPE_REQ; 230 ev->prim_type = LLC_PRIM_TYPE_REQ;
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 8275bd33bd9d..576355a192ab 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -112,17 +112,17 @@ static struct llc_station llc_main_station;
112 112
113static int llc_stat_ev_enable_with_dup_addr_check(struct sk_buff *skb) 113static int llc_stat_ev_enable_with_dup_addr_check(struct sk_buff *skb)
114{ 114{
115 struct llc_station_state_ev *ev = llc_station_ev(skb); 115 struct llc_station_state_ev *ev = llc_station_ev(skb);
116 116
117 return ev->type == LLC_STATION_EV_TYPE_SIMPLE && 117 return ev->type == LLC_STATION_EV_TYPE_SIMPLE &&
118 ev->prim_type == 118 ev->prim_type ==
119 LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK ? 0 : 1; 119 LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK ? 0 : 1;
120} 120}
121 121
122static int llc_stat_ev_enable_without_dup_addr_check(struct sk_buff *skb) 122static int llc_stat_ev_enable_without_dup_addr_check(struct sk_buff *skb)
123{ 123{
124 struct llc_station_state_ev *ev = llc_station_ev(skb); 124 struct llc_station_state_ev *ev = llc_station_ev(skb);
125 125
126 return ev->type == LLC_STATION_EV_TYPE_SIMPLE && 126 return ev->type == LLC_STATION_EV_TYPE_SIMPLE &&
127 ev->prim_type == 127 ev->prim_type ==
128 LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK ? 0 : 1; 128 LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK ? 0 : 1;
@@ -130,8 +130,8 @@ static int llc_stat_ev_enable_without_dup_addr_check(struct sk_buff *skb)
130 130
131static int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct sk_buff *skb) 131static int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct sk_buff *skb)
132{ 132{
133 struct llc_station_state_ev *ev = llc_station_ev(skb); 133 struct llc_station_state_ev *ev = llc_station_ev(skb);
134 134
135 return ev->type == LLC_STATION_EV_TYPE_ACK_TMR && 135 return ev->type == LLC_STATION_EV_TYPE_ACK_TMR &&
136 llc_main_station.retry_count < 136 llc_main_station.retry_count <
137 llc_main_station.maximum_retry ? 0 : 1; 137 llc_main_station.maximum_retry ? 0 : 1;
@@ -139,8 +139,8 @@ static int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct sk_buff *skb)
139 139
140static int llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry(struct sk_buff *skb) 140static int llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry(struct sk_buff *skb)
141{ 141{
142 struct llc_station_state_ev *ev = llc_station_ev(skb); 142 struct llc_station_state_ev *ev = llc_station_ev(skb);
143 143
144 return ev->type == LLC_STATION_EV_TYPE_ACK_TMR && 144 return ev->type == LLC_STATION_EV_TYPE_ACK_TMR &&
145 llc_main_station.retry_count == 145 llc_main_station.retry_count ==
146 llc_main_station.maximum_retry ? 0 : 1; 146 llc_main_station.maximum_retry ? 0 : 1;
@@ -148,7 +148,7 @@ static int llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry(struct sk_buff *skb)
148 148
149static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb) 149static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
150{ 150{
151 struct llc_station_state_ev *ev = llc_station_ev(skb); 151 struct llc_station_state_ev *ev = llc_station_ev(skb);
152 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 152 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
153 153
154 return ev->type == LLC_STATION_EV_TYPE_PDU && 154 return ev->type == LLC_STATION_EV_TYPE_PDU &&
@@ -306,7 +306,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
306 llc_pdu_decode_sa(skb, mac_da); 306 llc_pdu_decode_sa(skb, mac_da);
307 llc_pdu_decode_ssap(skb, &dsap); 307 llc_pdu_decode_ssap(skb, &dsap);
308 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); 308 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP);
309 llc_pdu_init_as_test_rsp(nskb, skb); 309 llc_pdu_init_as_test_rsp(nskb, skb);
310 rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, mac_da); 310 rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, mac_da);
311 if (unlikely(rc)) 311 if (unlikely(rc))
312 goto free; 312 goto free;
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index 45d7dd92a088..d047a3e15714 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * sysctl_net_llc.c: sysctl interface to LLC net subsystem. 2 * sysctl_net_llc.c: sysctl interface to LLC net subsystem.
3 * 3 *
4 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 4 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 */ 5 */
6 6
@@ -72,7 +72,7 @@ static struct ctl_table llc2_dir_timeout_table[] = {
72 .procname = "timeout", 72 .procname = "timeout",
73 .mode = 0555, 73 .mode = 0555,
74 .child = llc2_timeout_table, 74 .child = llc2_timeout_table,
75 }, 75 },
76 { 0 }, 76 { 0 },
77}; 77};
78 78
@@ -82,7 +82,7 @@ static struct ctl_table llc_table[] = {
82 .procname = "llc2", 82 .procname = "llc2",
83 .mode = 0555, 83 .mode = 0555,
84 .child = llc2_dir_timeout_table, 84 .child = llc2_dir_timeout_table,
85 }, 85 },
86 { 86 {
87 .ctl_name = NET_LLC_STATION, 87 .ctl_name = NET_LLC_STATION,
88 .procname = "station", 88 .procname = "station",
@@ -98,7 +98,7 @@ static struct ctl_table llc_dir_table[] = {
98 .procname = "llc", 98 .procname = "llc",
99 .mode = 0555, 99 .mode = 0555,
100 .child = llc_table, 100 .child = llc_table,
101 }, 101 },
102 { 0 }, 102 { 0 },
103}; 103};
104 104
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 73e0ff469bff..07e47dbcb0a9 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -292,7 +292,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
292 } 292 }
293 } 293 }
294 doi_def->map.std->cat.local = kcalloc( 294 doi_def->map.std->cat.local = kcalloc(
295 doi_def->map.std->cat.local_size, 295 doi_def->map.std->cat.local_size,
296 sizeof(u32), 296 sizeof(u32),
297 GFP_KERNEL); 297 GFP_KERNEL);
298 if (doi_def->map.std->cat.local == NULL) { 298 if (doi_def->map.std->cat.local == NULL) {
@@ -300,7 +300,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
300 goto add_std_failure; 300 goto add_std_failure;
301 } 301 }
302 doi_def->map.std->cat.cipso = kcalloc( 302 doi_def->map.std->cat.cipso = kcalloc(
303 doi_def->map.std->cat.cipso_size, 303 doi_def->map.std->cat.cipso_size,
304 sizeof(u32), 304 sizeof(u32),
305 GFP_KERNEL); 305 GFP_KERNEL);
306 if (doi_def->map.std->cat.cipso == NULL) { 306 if (doi_def->map.std->cat.cipso == NULL) {
@@ -325,10 +325,10 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
325 if (cat_loc == NULL || cat_rem == NULL) 325 if (cat_loc == NULL || cat_rem == NULL)
326 goto add_std_failure; 326 goto add_std_failure;
327 doi_def->map.std->cat.local[ 327 doi_def->map.std->cat.local[
328 nla_get_u32(cat_loc)] = 328 nla_get_u32(cat_loc)] =
329 nla_get_u32(cat_rem); 329 nla_get_u32(cat_rem);
330 doi_def->map.std->cat.cipso[ 330 doi_def->map.std->cat.cipso[
331 nla_get_u32(cat_rem)] = 331 nla_get_u32(cat_rem)] =
332 nla_get_u32(cat_loc); 332 nla_get_u32(cat_loc);
333 } 333 }
334 } 334 }
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 383dd4e82ee1..f6ee9b47428b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -8,7 +8,7 @@
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith 12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit 13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> 14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
@@ -470,7 +470,7 @@ static int netlink_release(struct socket *sock)
470 }; 470 };
471 atomic_notifier_call_chain(&netlink_chain, 471 atomic_notifier_call_chain(&netlink_chain,
472 NETLINK_URELEASE, &n); 472 NETLINK_URELEASE, &n);
473 } 473 }
474 474
475 module_put(nlk->module); 475 module_put(nlk->module);
476 476
@@ -528,11 +528,11 @@ retry:
528 return err; 528 return err;
529} 529}
530 530
531static inline int netlink_capable(struct socket *sock, unsigned int flag) 531static inline int netlink_capable(struct socket *sock, unsigned int flag)
532{ 532{
533 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || 533 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
534 capable(CAP_NET_ADMIN); 534 capable(CAP_NET_ADMIN);
535} 535}
536 536
537static void 537static void
538netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) 538netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
@@ -574,7 +574,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len
574 struct netlink_sock *nlk = nlk_sk(sk); 574 struct netlink_sock *nlk = nlk_sk(sk);
575 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 575 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
576 int err; 576 int err;
577 577
578 if (nladdr->nl_family != AF_NETLINK) 578 if (nladdr->nl_family != AF_NETLINK)
579 return -EINVAL; 579 return -EINVAL;
580 580
@@ -605,9 +605,9 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len
605 605
606 netlink_table_grab(); 606 netlink_table_grab();
607 netlink_update_subscriptions(sk, nlk->subscriptions + 607 netlink_update_subscriptions(sk, nlk->subscriptions +
608 hweight32(nladdr->nl_groups) - 608 hweight32(nladdr->nl_groups) -
609 hweight32(nlk->groups[0])); 609 hweight32(nlk->groups[0]));
610 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; 610 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
611 netlink_update_listeners(sk); 611 netlink_update_listeners(sk);
612 netlink_table_ungrab(); 612 netlink_table_ungrab();
613 613
@@ -652,7 +652,7 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr
652 struct sock *sk = sock->sk; 652 struct sock *sk = sock->sk;
653 struct netlink_sock *nlk = nlk_sk(sk); 653 struct netlink_sock *nlk = nlk_sk(sk);
654 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr; 654 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
655 655
656 nladdr->nl_family = AF_NETLINK; 656 nladdr->nl_family = AF_NETLINK;
657 nladdr->nl_pad = 0; 657 nladdr->nl_pad = 0;
658 *addr_len = sizeof(*nladdr); 658 *addr_len = sizeof(*nladdr);
@@ -999,7 +999,7 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
999} 999}
1000 1000
1001static int netlink_setsockopt(struct socket *sock, int level, int optname, 1001static int netlink_setsockopt(struct socket *sock, int level, int optname,
1002 char __user *optval, int optlen) 1002 char __user *optval, int optlen)
1003{ 1003{
1004 struct sock *sk = sock->sk; 1004 struct sock *sk = sock->sk;
1005 struct netlink_sock *nlk = nlk_sk(sk); 1005 struct netlink_sock *nlk = nlk_sk(sk);
@@ -1054,7 +1054,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1054} 1054}
1055 1055
1056static int netlink_getsockopt(struct socket *sock, int level, int optname, 1056static int netlink_getsockopt(struct socket *sock, int level, int optname,
1057 char __user *optval, int __user *optlen) 1057 char __user *optval, int __user *optlen)
1058{ 1058{
1059 struct sock *sk = sock->sk; 1059 struct sock *sk = sock->sk;
1060 struct netlink_sock *nlk = nlk_sk(sk); 1060 struct netlink_sock *nlk = nlk_sk(sk);
@@ -1257,15 +1257,15 @@ static void netlink_data_ready(struct sock *sk, int len)
1257} 1257}
1258 1258
1259/* 1259/*
1260 * We export these functions to other modules. They provide a 1260 * We export these functions to other modules. They provide a
1261 * complete set of kernel non-blocking support for message 1261 * complete set of kernel non-blocking support for message
1262 * queueing. 1262 * queueing.
1263 */ 1263 */
1264 1264
1265struct sock * 1265struct sock *
1266netlink_kernel_create(int unit, unsigned int groups, 1266netlink_kernel_create(int unit, unsigned int groups,
1267 void (*input)(struct sock *sk, int len), 1267 void (*input)(struct sock *sk, int len),
1268 struct module *module) 1268 struct module *module)
1269{ 1269{
1270 struct socket *sock; 1270 struct socket *sock;
1271 struct sock *sk; 1271 struct sock *sk;
@@ -1317,10 +1317,10 @@ out_sock_release:
1317} 1317}
1318 1318
1319void netlink_set_nonroot(int protocol, unsigned int flags) 1319void netlink_set_nonroot(int protocol, unsigned int flags)
1320{ 1320{
1321 if ((unsigned int)protocol < MAX_LINKS) 1321 if ((unsigned int)protocol < MAX_LINKS)
1322 nl_table[protocol].nl_nonroot = flags; 1322 nl_table[protocol].nl_nonroot = flags;
1323} 1323}
1324 1324
1325static void netlink_destroy_callback(struct netlink_callback *cb) 1325static void netlink_destroy_callback(struct netlink_callback *cb)
1326{ 1326{
@@ -1341,7 +1341,7 @@ static int netlink_dump(struct sock *sk)
1341 struct sk_buff *skb; 1341 struct sk_buff *skb;
1342 struct nlmsghdr *nlh; 1342 struct nlmsghdr *nlh;
1343 int len, err = -ENOBUFS; 1343 int len, err = -ENOBUFS;
1344 1344
1345 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL); 1345 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1346 if (!skb) 1346 if (!skb)
1347 goto errout; 1347 goto errout;
@@ -1626,7 +1626,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1626 1626
1627 if (v == SEQ_START_TOKEN) 1627 if (v == SEQ_START_TOKEN)
1628 return netlink_seq_socket_idx(seq, 0); 1628 return netlink_seq_socket_idx(seq, 0);
1629 1629
1630 s = sk_next(v); 1630 s = sk_next(v);
1631 if (s) 1631 if (s)
1632 return s; 1632 return s;
@@ -1732,7 +1732,7 @@ int netlink_unregister_notifier(struct notifier_block *nb)
1732{ 1732{
1733 return atomic_notifier_chain_unregister(&netlink_chain, nb); 1733 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1734} 1734}
1735 1735
1736static const struct proto_ops netlink_ops = { 1736static const struct proto_ops netlink_ops = {
1737 .family = PF_NETLINK, 1737 .family = PF_NETLINK,
1738 .owner = THIS_MODULE, 1738 .owner = THIS_MODULE,
@@ -1808,7 +1808,7 @@ static int __init netlink_proto_init(void)
1808#ifdef CONFIG_PROC_FS 1808#ifdef CONFIG_PROC_FS
1809 proc_net_fops_create("netlink", 0, &netlink_seq_fops); 1809 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1810#endif 1810#endif
1811 /* The netlink device handler may be needed early. */ 1811 /* The netlink device handler may be needed early. */
1812 rtnetlink_init(); 1812 rtnetlink_init();
1813out: 1813out:
1814 return err; 1814 return err;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 548e4e6e698f..c2996794eb25 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -310,7 +310,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
310 if (nlh->nlmsg_type < NLMSG_MIN_TYPE) 310 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
311 goto ignore; 311 goto ignore;
312 312
313 family = genl_family_find_byid(nlh->nlmsg_type); 313 family = genl_family_find_byid(nlh->nlmsg_type);
314 if (family == NULL) { 314 if (family == NULL) {
315 err = -ENOENT; 315 err = -ENOENT;
316 goto errout; 316 goto errout;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 43bbe2c9e49a..799b76806bc3 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -110,7 +110,7 @@ static int nr_device_event(struct notifier_block *this, unsigned long event, voi
110 110
111 nr_kill_by_device(dev); 111 nr_kill_by_device(dev);
112 nr_rt_device_down(dev); 112 nr_rt_device_down(dev);
113 113
114 return NOTIFY_DONE; 114 return NOTIFY_DONE;
115} 115}
116 116
@@ -137,7 +137,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
137 sk_for_each(s, node, &nr_list) 137 sk_for_each(s, node, &nr_list)
138 if (!ax25cmp(&nr_sk(s)->source_addr, addr) && 138 if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
139 s->sk_state == TCP_LISTEN) { 139 s->sk_state == TCP_LISTEN) {
140 bh_lock_sock(s); 140 bh_lock_sock(s);
141 goto found; 141 goto found;
142 } 142 }
143 s = NULL; 143 s = NULL;
@@ -157,7 +157,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
157 spin_lock_bh(&nr_list_lock); 157 spin_lock_bh(&nr_list_lock);
158 sk_for_each(s, node, &nr_list) { 158 sk_for_each(s, node, &nr_list) {
159 struct nr_sock *nr = nr_sk(s); 159 struct nr_sock *nr = nr_sk(s);
160 160
161 if (nr->my_index == index && nr->my_id == id) { 161 if (nr->my_index == index && nr->my_id == id) {
162 bh_lock_sock(s); 162 bh_lock_sock(s);
163 goto found; 163 goto found;
@@ -181,10 +181,10 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
181 spin_lock_bh(&nr_list_lock); 181 spin_lock_bh(&nr_list_lock);
182 sk_for_each(s, node, &nr_list) { 182 sk_for_each(s, node, &nr_list) {
183 struct nr_sock *nr = nr_sk(s); 183 struct nr_sock *nr = nr_sk(s);
184 184
185 if (nr->your_index == index && nr->your_id == id && 185 if (nr->your_index == index && nr->your_id == id &&
186 !ax25cmp(&nr->dest_addr, dest)) { 186 !ax25cmp(&nr->dest_addr, dest)) {
187 bh_lock_sock(s); 187 bh_lock_sock(s);
188 goto found; 188 goto found;
189 } 189 }
190 } 190 }
@@ -341,17 +341,17 @@ static int nr_getsockopt(struct socket *sock, int level, int optname,
341 struct sock *sk = sock->sk; 341 struct sock *sk = sock->sk;
342 struct nr_sock *nr = nr_sk(sk); 342 struct nr_sock *nr = nr_sk(sk);
343 int val = 0; 343 int val = 0;
344 int len; 344 int len;
345 345
346 if (level != SOL_NETROM) 346 if (level != SOL_NETROM)
347 return -ENOPROTOOPT; 347 return -ENOPROTOOPT;
348 348
349 if (get_user(len, optlen)) 349 if (get_user(len, optlen))
350 return -EFAULT; 350 return -EFAULT;
351 351
352 if (len < 0) 352 if (len < 0)
353 return -EINVAL; 353 return -EINVAL;
354 354
355 switch (optname) { 355 switch (optname) {
356 case NETROM_T1: 356 case NETROM_T1:
357 val = nr->t1 / HZ; 357 val = nr->t1 / HZ;
@@ -537,7 +537,7 @@ static int nr_release(struct socket *sock)
537 break; 537 break;
538 } 538 }
539 539
540 sock->sk = NULL; 540 sock->sk = NULL;
541 release_sock(sk); 541 release_sock(sk);
542 sock_put(sk); 542 sock_put(sk);
543 543
@@ -644,7 +644,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
644 return -EISCONN; /* No reconnect on a seqpacket socket */ 644 return -EISCONN; /* No reconnect on a seqpacket socket */
645 } 645 }
646 646
647 sk->sk_state = TCP_CLOSE; 647 sk->sk_state = TCP_CLOSE;
648 sock->state = SS_UNCONNECTED; 648 sock->state = SS_UNCONNECTED;
649 649
650 if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) { 650 if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) {
@@ -710,7 +710,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
710 release_sock(sk); 710 release_sock(sk);
711 return -EINPROGRESS; 711 return -EINPROGRESS;
712 } 712 }
713 713
714 /* 714 /*
715 * A Connect Ack with Choke or timeout or failed routing will go to 715 * A Connect Ack with Choke or timeout or failed routing will go to
716 * closed. 716 * closed.
@@ -848,7 +848,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
848int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) 848int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
849{ 849{
850 struct sock *sk; 850 struct sock *sk;
851 struct sock *make; 851 struct sock *make;
852 struct nr_sock *nr_make; 852 struct nr_sock *nr_make;
853 ax25_address *src, *dest, *user; 853 ax25_address *src, *dest, *user;
854 unsigned short circuit_index, circuit_id; 854 unsigned short circuit_index, circuit_id;
@@ -1258,10 +1258,10 @@ static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
1258{ 1258{
1259 ++*pos; 1259 ++*pos;
1260 1260
1261 return (v == SEQ_START_TOKEN) ? sk_head(&nr_list) 1261 return (v == SEQ_START_TOKEN) ? sk_head(&nr_list)
1262 : sk_next((struct sock *)v); 1262 : sk_next((struct sock *)v);
1263} 1263}
1264 1264
1265static void nr_info_stop(struct seq_file *seq, void *v) 1265static void nr_info_stop(struct seq_file *seq, void *v)
1266{ 1266{
1267 spin_unlock_bh(&nr_list_lock); 1267 spin_unlock_bh(&nr_list_lock);
@@ -1291,7 +1291,7 @@ static int nr_info_show(struct seq_file *seq, void *v)
1291 1291
1292 seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr)); 1292 seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr));
1293 seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr)); 1293 seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr));
1294 seq_printf(seq, 1294 seq_printf(seq,
1295"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n", 1295"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n",
1296 ax2asc(buf, &nr->source_addr), 1296 ax2asc(buf, &nr->source_addr),
1297 devname, 1297 devname,
@@ -1329,12 +1329,12 @@ static struct seq_operations nr_info_seqops = {
1329 .stop = nr_info_stop, 1329 .stop = nr_info_stop,
1330 .show = nr_info_show, 1330 .show = nr_info_show,
1331}; 1331};
1332 1332
1333static int nr_info_open(struct inode *inode, struct file *file) 1333static int nr_info_open(struct inode *inode, struct file *file)
1334{ 1334{
1335 return seq_open(file, &nr_info_seqops); 1335 return seq_open(file, &nr_info_seqops);
1336} 1336}
1337 1337
1338static struct file_operations nr_info_fops = { 1338static struct file_operations nr_info_fops = {
1339 .owner = THIS_MODULE, 1339 .owner = THIS_MODULE,
1340 .open = nr_info_open, 1340 .open = nr_info_open,
@@ -1415,7 +1415,7 @@ static int __init nr_proto_init(void)
1415 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); 1415 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
1416 goto fail; 1416 goto fail;
1417 } 1417 }
1418 1418
1419 dev->base_addr = i; 1419 dev->base_addr = i;
1420 if (register_netdev(dev)) { 1420 if (register_netdev(dev)) {
1421 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n"); 1421 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
@@ -1430,7 +1430,7 @@ static int __init nr_proto_init(void)
1430 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n"); 1430 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
1431 goto fail; 1431 goto fail;
1432 } 1432 }
1433 1433
1434 register_netdevice_notifier(&nr_dev_notifier); 1434 register_netdevice_notifier(&nr_dev_notifier);
1435 1435
1436 ax25_register_pid(&nr_pid); 1436 ax25_register_pid(&nr_pid);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 8f88964099ef..e9909aeb43e9 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -781,8 +781,8 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
781 781
782 if (ax25 != NULL) { 782 if (ax25 != NULL) {
783 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, 783 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
784 ax25->ax25_dev->dev, 0, 784 ax25->ax25_dev->dev, 0,
785 sysctl_netrom_obsolescence_count_initialiser); 785 sysctl_netrom_obsolescence_count_initialiser);
786 if (ret) 786 if (ret)
787 return ret; 787 return ret;
788 } 788 }
@@ -861,8 +861,8 @@ static void *nr_node_start(struct seq_file *seq, loff_t *pos)
861 struct nr_node *nr_node; 861 struct nr_node *nr_node;
862 struct hlist_node *node; 862 struct hlist_node *node;
863 int i = 1; 863 int i = 1;
864 864
865 spin_lock_bh(&nr_node_list_lock); 865 spin_lock_bh(&nr_node_list_lock);
866 if (*pos == 0) 866 if (*pos == 0)
867 return SEQ_START_TOKEN; 867 return SEQ_START_TOKEN;
868 868
@@ -879,8 +879,8 @@ static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
879{ 879{
880 struct hlist_node *node; 880 struct hlist_node *node;
881 ++*pos; 881 ++*pos;
882 882
883 node = (v == SEQ_START_TOKEN) 883 node = (v == SEQ_START_TOKEN)
884 ? nr_node_list.first 884 ? nr_node_list.first
885 : ((struct nr_node *)v)->node_node.next; 885 : ((struct nr_node *)v)->node_node.next;
886 886
@@ -963,8 +963,8 @@ static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
963{ 963{
964 struct hlist_node *node; 964 struct hlist_node *node;
965 ++*pos; 965 ++*pos;
966 966
967 node = (v == SEQ_START_TOKEN) 967 node = (v == SEQ_START_TOKEN)
968 ? nr_neigh_list.first 968 ? nr_neigh_list.first
969 : ((struct nr_neigh *)v)->neigh_node.next; 969 : ((struct nr_neigh *)v)->neigh_node.next;
970 970
@@ -997,7 +997,7 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
997 997
998 if (nr_neigh->digipeat != NULL) { 998 if (nr_neigh->digipeat != NULL) {
999 for (i = 0; i < nr_neigh->digipeat->ndigi; i++) 999 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
1000 seq_printf(seq, " %s", 1000 seq_printf(seq, " %s",
1001 ax2asc(buf, &nr_neigh->digipeat->calls[i])); 1001 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
1002 } 1002 }
1003 1003
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index bcb9946b4f56..cfab5721a608 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -57,7 +57,7 @@ void nr_frames_acked(struct sock *sk, unsigned short nr)
57 */ 57 */
58 if (nrom->va != nr) { 58 if (nrom->va != nr) {
59 while (skb_peek(&nrom->ack_queue) != NULL && nrom->va != nr) { 59 while (skb_peek(&nrom->ack_queue) != NULL && nrom->va != nr) {
60 skb = skb_dequeue(&nrom->ack_queue); 60 skb = skb_dequeue(&nrom->ack_queue);
61 kfree_skb(skb); 61 kfree_skb(skb);
62 nrom->va = (nrom->va + 1) % NR_MODULUS; 62 nrom->va = (nrom->va + 1) % NR_MODULUS;
63 } 63 }
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index ddba1c144260..6cfaad952c6c 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -43,7 +43,7 @@ void nr_init_timers(struct sock *sk)
43 init_timer(&nr->t1timer); 43 init_timer(&nr->t1timer);
44 nr->t1timer.data = (unsigned long)sk; 44 nr->t1timer.data = (unsigned long)sk;
45 nr->t1timer.function = &nr_t1timer_expiry; 45 nr->t1timer.function = &nr_t1timer_expiry;
46 46
47 init_timer(&nr->t2timer); 47 init_timer(&nr->t2timer);
48 nr->t2timer.data = (unsigned long)sk; 48 nr->t2timer.data = (unsigned long)sk;
49 nr->t2timer.function = &nr_t2timer_expiry; 49 nr->t2timer.function = &nr_t2timer_expiry;
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index 6bb8dda849dc..e4b27d7aae79 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -35,7 +35,7 @@ static int min_reset[] = {0}, max_reset[] = {1};
35static struct ctl_table_header *nr_table_header; 35static struct ctl_table_header *nr_table_header;
36 36
37static ctl_table nr_table[] = { 37static ctl_table nr_table[] = {
38 { 38 {
39 .ctl_name = NET_NETROM_DEFAULT_PATH_QUALITY, 39 .ctl_name = NET_NETROM_DEFAULT_PATH_QUALITY,
40 .procname = "default_path_quality", 40 .procname = "default_path_quality",
41 .data = &sysctl_netrom_default_path_quality, 41 .data = &sysctl_netrom_default_path_quality,
@@ -46,18 +46,18 @@ static ctl_table nr_table[] = {
46 .extra1 = &min_quality, 46 .extra1 = &min_quality,
47 .extra2 = &max_quality 47 .extra2 = &max_quality
48 }, 48 },
49 { 49 {
50 .ctl_name = NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, 50 .ctl_name = NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER,
51 .procname = "obsolescence_count_initialiser", 51 .procname = "obsolescence_count_initialiser",
52 .data = &sysctl_netrom_obsolescence_count_initialiser, 52 .data = &sysctl_netrom_obsolescence_count_initialiser,
53 .maxlen = sizeof(int), 53 .maxlen = sizeof(int),
54 .mode = 0644, 54 .mode = 0644,
55 .proc_handler = &proc_dointvec_minmax, 55 .proc_handler = &proc_dointvec_minmax,
56 .strategy = &sysctl_intvec, 56 .strategy = &sysctl_intvec,
57 .extra1 = &min_obs, 57 .extra1 = &min_obs,
58 .extra2 = &max_obs 58 .extra2 = &max_obs
59 }, 59 },
60 { 60 {
61 .ctl_name = NET_NETROM_NETWORK_TTL_INITIALISER, 61 .ctl_name = NET_NETROM_NETWORK_TTL_INITIALISER,
62 .procname = "network_ttl_initialiser", 62 .procname = "network_ttl_initialiser",
63 .data = &sysctl_netrom_network_ttl_initialiser, 63 .data = &sysctl_netrom_network_ttl_initialiser,
@@ -68,7 +68,7 @@ static ctl_table nr_table[] = {
68 .extra1 = &min_ttl, 68 .extra1 = &min_ttl,
69 .extra2 = &max_ttl 69 .extra2 = &max_ttl
70 }, 70 },
71 { 71 {
72 .ctl_name = NET_NETROM_TRANSPORT_TIMEOUT, 72 .ctl_name = NET_NETROM_TRANSPORT_TIMEOUT,
73 .procname = "transport_timeout", 73 .procname = "transport_timeout",
74 .data = &sysctl_netrom_transport_timeout, 74 .data = &sysctl_netrom_transport_timeout,
@@ -79,7 +79,7 @@ static ctl_table nr_table[] = {
79 .extra1 = &min_t1, 79 .extra1 = &min_t1,
80 .extra2 = &max_t1 80 .extra2 = &max_t1
81 }, 81 },
82 { 82 {
83 .ctl_name = NET_NETROM_TRANSPORT_MAXIMUM_TRIES, 83 .ctl_name = NET_NETROM_TRANSPORT_MAXIMUM_TRIES,
84 .procname = "transport_maximum_tries", 84 .procname = "transport_maximum_tries",
85 .data = &sysctl_netrom_transport_maximum_tries, 85 .data = &sysctl_netrom_transport_maximum_tries,
@@ -90,7 +90,7 @@ static ctl_table nr_table[] = {
90 .extra1 = &min_n2, 90 .extra1 = &min_n2,
91 .extra2 = &max_n2 91 .extra2 = &max_n2
92 }, 92 },
93 { 93 {
94 .ctl_name = NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, 94 .ctl_name = NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY,
95 .procname = "transport_acknowledge_delay", 95 .procname = "transport_acknowledge_delay",
96 .data = &sysctl_netrom_transport_acknowledge_delay, 96 .data = &sysctl_netrom_transport_acknowledge_delay,
@@ -101,7 +101,7 @@ static ctl_table nr_table[] = {
101 .extra1 = &min_t2, 101 .extra1 = &min_t2,
102 .extra2 = &max_t2 102 .extra2 = &max_t2
103 }, 103 },
104 { 104 {
105 .ctl_name = NET_NETROM_TRANSPORT_BUSY_DELAY, 105 .ctl_name = NET_NETROM_TRANSPORT_BUSY_DELAY,
106 .procname = "transport_busy_delay", 106 .procname = "transport_busy_delay",
107 .data = &sysctl_netrom_transport_busy_delay, 107 .data = &sysctl_netrom_transport_busy_delay,
@@ -112,7 +112,7 @@ static ctl_table nr_table[] = {
112 .extra1 = &min_t4, 112 .extra1 = &min_t4,
113 .extra2 = &max_t4 113 .extra2 = &max_t4
114 }, 114 },
115 { 115 {
116 .ctl_name = NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, 116 .ctl_name = NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE,
117 .procname = "transport_requested_window_size", 117 .procname = "transport_requested_window_size",
118 .data = &sysctl_netrom_transport_requested_window_size, 118 .data = &sysctl_netrom_transport_requested_window_size,
@@ -123,7 +123,7 @@ static ctl_table nr_table[] = {
123 .extra1 = &min_window, 123 .extra1 = &min_window,
124 .extra2 = &max_window 124 .extra2 = &max_window
125 }, 125 },
126 { 126 {
127 .ctl_name = NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, 127 .ctl_name = NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT,
128 .procname = "transport_no_activity_timeout", 128 .procname = "transport_no_activity_timeout",
129 .data = &sysctl_netrom_transport_no_activity_timeout, 129 .data = &sysctl_netrom_transport_no_activity_timeout,
@@ -134,7 +134,7 @@ static ctl_table nr_table[] = {
134 .extra1 = &min_idle, 134 .extra1 = &min_idle,
135 .extra2 = &max_idle 135 .extra2 = &max_idle
136 }, 136 },
137 { 137 {
138 .ctl_name = NET_NETROM_ROUTING_CONTROL, 138 .ctl_name = NET_NETROM_ROUTING_CONTROL,
139 .procname = "routing_control", 139 .procname = "routing_control",
140 .data = &sysctl_netrom_routing_control, 140 .data = &sysctl_netrom_routing_control,
@@ -145,7 +145,7 @@ static ctl_table nr_table[] = {
145 .extra1 = &min_route, 145 .extra1 = &min_route,
146 .extra2 = &max_route 146 .extra2 = &max_route
147 }, 147 },
148 { 148 {
149 .ctl_name = NET_NETROM_LINK_FAILS_COUNT, 149 .ctl_name = NET_NETROM_LINK_FAILS_COUNT,
150 .procname = "link_fails_count", 150 .procname = "link_fails_count",
151 .data = &sysctl_netrom_link_fails_count, 151 .data = &sysctl_netrom_link_fails_count,
@@ -156,7 +156,7 @@ static ctl_table nr_table[] = {
156 .extra1 = &min_fails, 156 .extra1 = &min_fails,
157 .extra2 = &max_fails 157 .extra2 = &max_fails
158 }, 158 },
159 { 159 {
160 .ctl_name = NET_NETROM_RESET, 160 .ctl_name = NET_NETROM_RESET,
161 .procname = "reset", 161 .procname = "reset",
162 .data = &sysctl_netrom_reset_circuit, 162 .data = &sysctl_netrom_reset_circuit,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a6fa48788e8f..444550917bc1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -11,7 +11,7 @@
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * 13 *
14 * Fixes: 14 * Fixes:
15 * Alan Cox : verify_area() now used correctly 15 * Alan Cox : verify_area() now used correctly
16 * Alan Cox : new skbuff lists, look ma no backlogs! 16 * Alan Cox : new skbuff lists, look ma no backlogs!
17 * Alan Cox : tidied skbuff lists. 17 * Alan Cox : tidied skbuff lists.
@@ -34,12 +34,12 @@
34 * Alexey Kuznetsov : Untied from IPv4 stack. 34 * Alexey Kuznetsov : Untied from IPv4 stack.
35 * Cyrus Durgin : Fixed kerneld for kmod. 35 * Cyrus Durgin : Fixed kerneld for kmod.
36 * Michal Ostrowski : Module initialization cleanup. 36 * Michal Ostrowski : Module initialization cleanup.
37 * Ulises Alonso : Frame number limit removal and 37 * Ulises Alonso : Frame number limit removal and
38 * packet_set_ring memory leak. 38 * packet_set_ring memory leak.
39 * Eric Biederman : Allow for > 8 byte hardware addresses. 39 * Eric Biederman : Allow for > 8 byte hardware addresses.
40 * The convention is that longer addresses 40 * The convention is that longer addresses
41 * will simply extend the hardware address 41 * will simply extend the hardware address
42 * byte arrays at the end of sockaddr_ll 42 * byte arrays at the end of sockaddr_ll
43 * and packet_mreq. 43 * and packet_mreq.
44 * 44 *
45 * This program is free software; you can redistribute it and/or 45 * This program is free software; you can redistribute it and/or
@@ -48,7 +48,7 @@
48 * 2 of the License, or (at your option) any later version. 48 * 2 of the License, or (at your option) any later version.
49 * 49 *
50 */ 50 */
51 51
52#include <linux/types.h> 52#include <linux/types.h>
53#include <linux/sched.h> 53#include <linux/sched.h>
54#include <linux/mm.h> 54#include <linux/mm.h>
@@ -124,7 +124,7 @@ Outgoing, dev->hard_header!=NULL
124 124
125Incoming, dev->hard_header==NULL 125Incoming, dev->hard_header==NULL
126 mac.raw -> UNKNOWN position. It is very likely, that it points to ll header. 126 mac.raw -> UNKNOWN position. It is very likely, that it points to ll header.
127 PPP makes it, that is wrong, because introduce assymetry 127 PPP makes it, that is wrong, because introduce assymetry
128 between rx and tx paths. 128 between rx and tx paths.
129 data -> data 129 data -> data
130 130
@@ -237,7 +237,7 @@ static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int pos
237 frame_offset = position % po->frames_per_block; 237 frame_offset = position % po->frames_per_block;
238 238
239 frame = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size); 239 frame = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
240 240
241 return frame; 241 return frame;
242} 242}
243#endif 243#endif
@@ -280,7 +280,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
280 */ 280 */
281 281
282 sk = pt->af_packet_priv; 282 sk = pt->af_packet_priv;
283 283
284 /* 284 /*
285 * Yank back the headers [hope the device set this 285 * Yank back the headers [hope the device set this
286 * right or kerboom...] 286 * right or kerboom...]
@@ -336,7 +336,7 @@ oom:
336 * Output a raw packet to a device layer. This bypasses all the other 336 * Output a raw packet to a device layer. This bypasses all the other
337 * protocol layers and you must therefore supply it with a complete frame 337 * protocol layers and you must therefore supply it with a complete frame
338 */ 338 */
339 339
340static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, 340static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
341 struct msghdr *msg, size_t len) 341 struct msghdr *msg, size_t len)
342{ 342{
@@ -346,9 +346,9 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
346 struct net_device *dev; 346 struct net_device *dev;
347 __be16 proto=0; 347 __be16 proto=0;
348 int err; 348 int err;
349 349
350 /* 350 /*
351 * Get and verify the address. 351 * Get and verify the address.
352 */ 352 */
353 353
354 if (saddr) 354 if (saddr)
@@ -362,7 +362,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
362 return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */ 362 return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
363 363
364 /* 364 /*
365 * Find the device first to size check it 365 * Find the device first to size check it
366 */ 366 */
367 367
368 saddr->spkt_device[13] = 0; 368 saddr->spkt_device[13] = 0;
@@ -370,7 +370,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
370 err = -ENODEV; 370 err = -ENODEV;
371 if (dev == NULL) 371 if (dev == NULL)
372 goto out_unlock; 372 goto out_unlock;
373 373
374 err = -ENETDOWN; 374 err = -ENETDOWN;
375 if (!(dev->flags & IFF_UP)) 375 if (!(dev->flags & IFF_UP))
376 goto out_unlock; 376 goto out_unlock;
@@ -379,7 +379,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
379 * You may not queue a frame bigger than the mtu. This is the lowest level 379 * You may not queue a frame bigger than the mtu. This is the lowest level
380 * raw protocol and you must do your own fragmentation at this level. 380 * raw protocol and you must do your own fragmentation at this level.
381 */ 381 */
382 382
383 err = -EMSGSIZE; 383 err = -EMSGSIZE;
384 if (len > dev->mtu + dev->hard_header_len) 384 if (len > dev->mtu + dev->hard_header_len)
385 goto out_unlock; 385 goto out_unlock;
@@ -392,14 +392,14 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
392 * deal with the problem - do your own algorithmic backoffs. That's far 392 * deal with the problem - do your own algorithmic backoffs. That's far
393 * more flexible. 393 * more flexible.
394 */ 394 */
395 395
396 if (skb == NULL) 396 if (skb == NULL)
397 goto out_unlock; 397 goto out_unlock;
398 398
399 /* 399 /*
400 * Fill it in 400 * Fill it in
401 */ 401 */
402 402
403 /* FIXME: Save some space for broken drivers that write a 403 /* FIXME: Save some space for broken drivers that write a
404 * hard header at transmission time by themselves. PPP is the 404 * hard header at transmission time by themselves. PPP is the
405 * notable one here. This should really be fixed at the driver level. 405 * notable one here. This should really be fixed at the driver level.
@@ -641,7 +641,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
641 641
642 spin_lock(&sk->sk_receive_queue.lock); 642 spin_lock(&sk->sk_receive_queue.lock);
643 h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head); 643 h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
644 644
645 if (h->tp_status) 645 if (h->tp_status)
646 goto ring_is_full; 646 goto ring_is_full;
647 po->head = po->head != po->frame_max ? po->head+1 : 0; 647 po->head = po->head != po->frame_max ? po->head+1 : 0;
@@ -660,7 +660,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
660 h->tp_snaplen = snaplen; 660 h->tp_snaplen = snaplen;
661 h->tp_mac = macoff; 661 h->tp_mac = macoff;
662 h->tp_net = netoff; 662 h->tp_net = netoff;
663 if (skb->tstamp.off_sec == 0) { 663 if (skb->tstamp.off_sec == 0) {
664 __net_timestamp(skb); 664 __net_timestamp(skb);
665 sock_enable_timestamp(sk); 665 sock_enable_timestamp(sk);
666 } 666 }
@@ -700,7 +700,7 @@ drop_n_restore:
700 skb->len = skb_len; 700 skb->len = skb_len;
701 } 701 }
702drop: 702drop:
703 kfree_skb(skb); 703 kfree_skb(skb);
704 return 0; 704 return 0;
705 705
706ring_is_full: 706ring_is_full:
@@ -728,9 +728,9 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
728 int ifindex, err, reserve = 0; 728 int ifindex, err, reserve = 0;
729 729
730 /* 730 /*
731 * Get and verify the address. 731 * Get and verify the address.
732 */ 732 */
733 733
734 if (saddr == NULL) { 734 if (saddr == NULL) {
735 struct packet_sock *po = pkt_sk(sk); 735 struct packet_sock *po = pkt_sk(sk);
736 736
@@ -939,11 +939,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
939 char name[15]; 939 char name[15];
940 struct net_device *dev; 940 struct net_device *dev;
941 int err = -ENODEV; 941 int err = -ENODEV;
942 942
943 /* 943 /*
944 * Check legality 944 * Check legality
945 */ 945 */
946 946
947 if (addr_len != sizeof(struct sockaddr)) 947 if (addr_len != sizeof(struct sockaddr))
948 return -EINVAL; 948 return -EINVAL;
949 strlcpy(name,uaddr->sa_data,sizeof(name)); 949 strlcpy(name,uaddr->sa_data,sizeof(name));
@@ -968,7 +968,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
968 /* 968 /*
969 * Check legality 969 * Check legality
970 */ 970 */
971 971
972 if (addr_len < sizeof(struct sockaddr_ll)) 972 if (addr_len < sizeof(struct sockaddr_ll))
973 return -EINVAL; 973 return -EINVAL;
974 if (sll->sll_family != AF_PACKET) 974 if (sll->sll_family != AF_PACKET)
@@ -995,7 +995,7 @@ static struct proto packet_proto = {
995}; 995};
996 996
997/* 997/*
998 * Create a packet of type SOCK_PACKET. 998 * Create a packet of type SOCK_PACKET.
999 */ 999 */
1000 1000
1001static int packet_create(struct socket *sock, int protocol) 1001static int packet_create(struct socket *sock, int protocol)
@@ -1097,7 +1097,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1097 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err); 1097 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
1098 1098
1099 /* 1099 /*
1100 * An error occurred so return it. Because skb_recv_datagram() 1100 * An error occurred so return it. Because skb_recv_datagram()
1101 * handles the blocking we don't see and worry about blocking 1101 * handles the blocking we don't see and worry about blocking
1102 * retries. 1102 * retries.
1103 */ 1103 */
@@ -1358,7 +1358,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1358 1358
1359 switch(optname) { 1359 switch(optname) {
1360#ifdef CONFIG_PACKET_MULTICAST 1360#ifdef CONFIG_PACKET_MULTICAST
1361 case PACKET_ADD_MEMBERSHIP: 1361 case PACKET_ADD_MEMBERSHIP:
1362 case PACKET_DROP_MEMBERSHIP: 1362 case PACKET_DROP_MEMBERSHIP:
1363 { 1363 {
1364 struct packet_mreq_max mreq; 1364 struct packet_mreq_max mreq;
@@ -1438,7 +1438,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1438 1438
1439 if (len < 0) 1439 if (len < 0)
1440 return -EINVAL; 1440 return -EINVAL;
1441 1441
1442 switch(optname) { 1442 switch(optname) {
1443 case PACKET_STATISTICS: 1443 case PACKET_STATISTICS:
1444 if (len > sizeof(struct tpacket_stats)) 1444 if (len > sizeof(struct tpacket_stats))
@@ -1547,7 +1547,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
1547 } 1547 }
1548 case SIOCGSTAMP: 1548 case SIOCGSTAMP:
1549 return sock_get_timestamp(sk, (struct timeval __user *)arg); 1549 return sock_get_timestamp(sk, (struct timeval __user *)arg);
1550 1550
1551#ifdef CONFIG_INET 1551#ifdef CONFIG_INET
1552 case SIOCADDRT: 1552 case SIOCADDRT:
1553 case SIOCDELRT: 1553 case SIOCDELRT:
@@ -1608,7 +1608,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
1608 struct file *file = vma->vm_file; 1608 struct file *file = vma->vm_file;
1609 struct socket * sock = file->private_data; 1609 struct socket * sock = file->private_data;
1610 struct sock *sk = sock->sk; 1610 struct sock *sk = sock->sk;
1611 1611
1612 if (sk) 1612 if (sk)
1613 atomic_inc(&pkt_sk(sk)->mapped); 1613 atomic_inc(&pkt_sk(sk)->mapped);
1614} 1614}
@@ -1618,7 +1618,7 @@ static void packet_mm_close(struct vm_area_struct *vma)
1618 struct file *file = vma->vm_file; 1618 struct file *file = vma->vm_file;
1619 struct socket * sock = file->private_data; 1619 struct socket * sock = file->private_data;
1620 struct sock *sk = sock->sk; 1620 struct sock *sk = sock->sk;
1621 1621
1622 if (sk) 1622 if (sk)
1623 atomic_dec(&pkt_sk(sk)->mapped); 1623 atomic_dec(&pkt_sk(sk)->mapped);
1624} 1624}
@@ -1682,7 +1682,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1682 int was_running, order = 0; 1682 int was_running, order = 0;
1683 __be16 num; 1683 __be16 num;
1684 int err = 0; 1684 int err = 0;
1685 1685
1686 if (req->tp_block_nr) { 1686 if (req->tp_block_nr) {
1687 int i, l; 1687 int i, l;
1688 1688
@@ -1744,7 +1744,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1744 __sock_put(sk); 1744 __sock_put(sk);
1745 } 1745 }
1746 spin_unlock(&po->bind_lock); 1746 spin_unlock(&po->bind_lock);
1747 1747
1748 synchronize_net(); 1748 synchronize_net();
1749 1749
1750 err = -EBUSY; 1750 err = -EBUSY;
@@ -1861,7 +1861,7 @@ static const struct proto_ops packet_ops = {
1861 .connect = sock_no_connect, 1861 .connect = sock_no_connect,
1862 .socketpair = sock_no_socketpair, 1862 .socketpair = sock_no_socketpair,
1863 .accept = sock_no_accept, 1863 .accept = sock_no_accept,
1864 .getname = packet_getname, 1864 .getname = packet_getname,
1865 .poll = packet_poll, 1865 .poll = packet_poll,
1866 .ioctl = packet_ioctl, 1866 .ioctl = packet_ioctl,
1867 .listen = sock_no_listen, 1867 .listen = sock_no_listen,
@@ -1906,17 +1906,17 @@ static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
1906static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1906static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1907{ 1907{
1908 ++*pos; 1908 ++*pos;
1909 return (v == SEQ_START_TOKEN) 1909 return (v == SEQ_START_TOKEN)
1910 ? sk_head(&packet_sklist) 1910 ? sk_head(&packet_sklist)
1911 : sk_next((struct sock*)v) ; 1911 : sk_next((struct sock*)v) ;
1912} 1912}
1913 1913
1914static void packet_seq_stop(struct seq_file *seq, void *v) 1914static void packet_seq_stop(struct seq_file *seq, void *v)
1915{ 1915{
1916 read_unlock(&packet_sklist_lock); 1916 read_unlock(&packet_sklist_lock);
1917} 1917}
1918 1918
1919static int packet_seq_show(struct seq_file *seq, void *v) 1919static int packet_seq_show(struct seq_file *seq, void *v)
1920{ 1920{
1921 if (v == SEQ_START_TOKEN) 1921 if (v == SEQ_START_TOKEN)
1922 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 1922 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 9e279464c9d1..7a81a8ee8544 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1351,7 +1351,7 @@ static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1351 spin_lock_bh(&rose_list_lock); 1351 spin_lock_bh(&rose_list_lock);
1352 if (*pos == 0) 1352 if (*pos == 0)
1353 return SEQ_START_TOKEN; 1353 return SEQ_START_TOKEN;
1354 1354
1355 i = 1; 1355 i = 1;
1356 sk_for_each(s, node, &rose_list) { 1356 sk_for_each(s, node, &rose_list) {
1357 if (i == *pos) 1357 if (i == *pos)
@@ -1365,10 +1365,10 @@ static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1365{ 1365{
1366 ++*pos; 1366 ++*pos;
1367 1367
1368 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list) 1368 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
1369 : sk_next((struct sock *)v); 1369 : sk_next((struct sock *)v);
1370} 1370}
1371 1371
1372static void rose_info_stop(struct seq_file *seq, void *v) 1372static void rose_info_stop(struct seq_file *seq, void *v)
1373{ 1373{
1374 spin_unlock_bh(&rose_list_lock); 1374 spin_unlock_bh(&rose_list_lock);
@@ -1379,7 +1379,7 @@ static int rose_info_show(struct seq_file *seq, void *v)
1379 char buf[11]; 1379 char buf[11];
1380 1380
1381 if (v == SEQ_START_TOKEN) 1381 if (v == SEQ_START_TOKEN)
1382 seq_puts(seq, 1382 seq_puts(seq,
1383 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1383 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
1384 1384
1385 else { 1385 else {
@@ -1392,7 +1392,7 @@ static int rose_info_show(struct seq_file *seq, void *v)
1392 devname = "???"; 1392 devname = "???";
1393 else 1393 else
1394 devname = dev->name; 1394 devname = dev->name;
1395 1395
1396 seq_printf(seq, "%-10s %-9s ", 1396 seq_printf(seq, "%-10s %-9s ",
1397 rose2asc(&rose->dest_addr), 1397 rose2asc(&rose->dest_addr),
1398 ax2asc(buf, &rose->dest_call)); 1398 ax2asc(buf, &rose->dest_call));
@@ -1520,7 +1520,7 @@ static int __init rose_proto_init(void)
1520 char name[IFNAMSIZ]; 1520 char name[IFNAMSIZ];
1521 1521
1522 sprintf(name, "rose%d", i); 1522 sprintf(name, "rose%d", i);
1523 dev = alloc_netdev(sizeof(struct net_device_stats), 1523 dev = alloc_netdev(sizeof(struct net_device_stats),
1524 name, rose_setup); 1524 name, rose_setup);
1525 if (!dev) { 1525 if (!dev) {
1526 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); 1526 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 8028c0d425dc..0dcca4289eeb 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -66,7 +66,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
66 while (rose_node != NULL) { 66 while (rose_node != NULL) {
67 if ((rose_node->mask == rose_route->mask) && 67 if ((rose_node->mask == rose_route->mask) &&
68 (rosecmpm(&rose_route->address, &rose_node->address, 68 (rosecmpm(&rose_route->address, &rose_node->address,
69 rose_route->mask) == 0)) 69 rose_route->mask) == 0))
70 break; 70 break;
71 rose_node = rose_node->next; 71 rose_node = rose_node->next;
72 } 72 }
@@ -300,7 +300,7 @@ static int rose_del_node(struct rose_route_struct *rose_route,
300 while (rose_node != NULL) { 300 while (rose_node != NULL) {
301 if ((rose_node->mask == rose_route->mask) && 301 if ((rose_node->mask == rose_route->mask) &&
302 (rosecmpm(&rose_route->address, &rose_node->address, 302 (rosecmpm(&rose_route->address, &rose_node->address,
303 rose_route->mask) == 0)) 303 rose_route->mask) == 0))
304 break; 304 break;
305 rose_node = rose_node->next; 305 rose_node = rose_node->next;
306 } 306 }
@@ -1070,7 +1070,7 @@ static void *rose_node_start(struct seq_file *seq, loff_t *pos)
1070 if (*pos == 0) 1070 if (*pos == 0)
1071 return SEQ_START_TOKEN; 1071 return SEQ_START_TOKEN;
1072 1072
1073 for (rose_node = rose_node_list; rose_node && i < *pos; 1073 for (rose_node = rose_node_list; rose_node && i < *pos;
1074 rose_node = rose_node->next, ++i); 1074 rose_node = rose_node->next, ++i);
1075 1075
1076 return (i == *pos) ? rose_node : NULL; 1076 return (i == *pos) ? rose_node : NULL;
@@ -1079,8 +1079,8 @@ static void *rose_node_start(struct seq_file *seq, loff_t *pos)
1079static void *rose_node_next(struct seq_file *seq, void *v, loff_t *pos) 1079static void *rose_node_next(struct seq_file *seq, void *v, loff_t *pos)
1080{ 1080{
1081 ++*pos; 1081 ++*pos;
1082 1082
1083 return (v == SEQ_START_TOKEN) ? rose_node_list 1083 return (v == SEQ_START_TOKEN) ? rose_node_list
1084 : ((struct rose_node *)v)->next; 1084 : ((struct rose_node *)v)->next;
1085} 1085}
1086 1086
@@ -1146,7 +1146,7 @@ static void *rose_neigh_start(struct seq_file *seq, loff_t *pos)
1146 if (*pos == 0) 1146 if (*pos == 0)
1147 return SEQ_START_TOKEN; 1147 return SEQ_START_TOKEN;
1148 1148
1149 for (rose_neigh = rose_neigh_list; rose_neigh && i < *pos; 1149 for (rose_neigh = rose_neigh_list; rose_neigh && i < *pos;
1150 rose_neigh = rose_neigh->next, ++i); 1150 rose_neigh = rose_neigh->next, ++i);
1151 1151
1152 return (i == *pos) ? rose_neigh : NULL; 1152 return (i == *pos) ? rose_neigh : NULL;
@@ -1155,8 +1155,8 @@ static void *rose_neigh_start(struct seq_file *seq, loff_t *pos)
1155static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos) 1155static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
1156{ 1156{
1157 ++*pos; 1157 ++*pos;
1158 1158
1159 return (v == SEQ_START_TOKEN) ? rose_neigh_list 1159 return (v == SEQ_START_TOKEN) ? rose_neigh_list
1160 : ((struct rose_neigh *)v)->next; 1160 : ((struct rose_neigh *)v)->next;
1161} 1161}
1162 1162
@@ -1171,7 +1171,7 @@ static int rose_neigh_show(struct seq_file *seq, void *v)
1171 int i; 1171 int i;
1172 1172
1173 if (v == SEQ_START_TOKEN) 1173 if (v == SEQ_START_TOKEN)
1174 seq_puts(seq, 1174 seq_puts(seq,
1175 "addr callsign dev count use mode restart t0 tf digipeaters\n"); 1175 "addr callsign dev count use mode restart t0 tf digipeaters\n");
1176 else { 1176 else {
1177 struct rose_neigh *rose_neigh = v; 1177 struct rose_neigh *rose_neigh = v;
@@ -1229,7 +1229,7 @@ static void *rose_route_start(struct seq_file *seq, loff_t *pos)
1229 if (*pos == 0) 1229 if (*pos == 0)
1230 return SEQ_START_TOKEN; 1230 return SEQ_START_TOKEN;
1231 1231
1232 for (rose_route = rose_route_list; rose_route && i < *pos; 1232 for (rose_route = rose_route_list; rose_route && i < *pos;
1233 rose_route = rose_route->next, ++i); 1233 rose_route = rose_route->next, ++i);
1234 1234
1235 return (i == *pos) ? rose_route : NULL; 1235 return (i == *pos) ? rose_route : NULL;
@@ -1238,8 +1238,8 @@ static void *rose_route_start(struct seq_file *seq, loff_t *pos)
1238static void *rose_route_next(struct seq_file *seq, void *v, loff_t *pos) 1238static void *rose_route_next(struct seq_file *seq, void *v, loff_t *pos)
1239{ 1239{
1240 ++*pos; 1240 ++*pos;
1241 1241
1242 return (v == SEQ_START_TOKEN) ? rose_route_list 1242 return (v == SEQ_START_TOKEN) ? rose_route_list
1243 : ((struct rose_route *)v)->next; 1243 : ((struct rose_route *)v)->next;
1244} 1244}
1245 1245
@@ -1253,30 +1253,30 @@ static int rose_route_show(struct seq_file *seq, void *v)
1253 char buf[11]; 1253 char buf[11];
1254 1254
1255 if (v == SEQ_START_TOKEN) 1255 if (v == SEQ_START_TOKEN)
1256 seq_puts(seq, 1256 seq_puts(seq,
1257 "lci address callsign neigh <-> lci address callsign neigh\n"); 1257 "lci address callsign neigh <-> lci address callsign neigh\n");
1258 else { 1258 else {
1259 struct rose_route *rose_route = v; 1259 struct rose_route *rose_route = v;
1260 1260
1261 if (rose_route->neigh1) 1261 if (rose_route->neigh1)
1262 seq_printf(seq, 1262 seq_printf(seq,
1263 "%3.3X %-10s %-9s %05d ", 1263 "%3.3X %-10s %-9s %05d ",
1264 rose_route->lci1, 1264 rose_route->lci1,
1265 rose2asc(&rose_route->src_addr), 1265 rose2asc(&rose_route->src_addr),
1266 ax2asc(buf, &rose_route->src_call), 1266 ax2asc(buf, &rose_route->src_call),
1267 rose_route->neigh1->number); 1267 rose_route->neigh1->number);
1268 else 1268 else
1269 seq_puts(seq, 1269 seq_puts(seq,
1270 "000 * * 00000 "); 1270 "000 * * 00000 ");
1271 1271
1272 if (rose_route->neigh2) 1272 if (rose_route->neigh2)
1273 seq_printf(seq, 1273 seq_printf(seq,
1274 "%3.3X %-10s %-9s %05d\n", 1274 "%3.3X %-10s %-9s %05d\n",
1275 rose_route->lci2, 1275 rose_route->lci2,
1276 rose2asc(&rose_route->dest_addr), 1276 rose2asc(&rose_route->dest_addr),
1277 ax2asc(buf, &rose_route->dest_call), 1277 ax2asc(buf, &rose_route->dest_call),
1278 rose_route->neigh2->number); 1278 rose_route->neigh2->number);
1279 else 1279 else
1280 seq_puts(seq, 1280 seq_puts(seq,
1281 "000 * * 00000\n"); 1281 "000 * * 00000\n");
1282 } 1282 }
diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c
index 8548c7cf5643..27a452bc5b8d 100644
--- a/net/rose/sysctl_net_rose.c
+++ b/net/rose/sysctl_net_rose.c
@@ -25,7 +25,7 @@ static int min_window[] = {1}, max_window[] = {7};
25static struct ctl_table_header *rose_table_header; 25static struct ctl_table_header *rose_table_header;
26 26
27static ctl_table rose_table[] = { 27static ctl_table rose_table[] = {
28 { 28 {
29 .ctl_name = NET_ROSE_RESTART_REQUEST_TIMEOUT, 29 .ctl_name = NET_ROSE_RESTART_REQUEST_TIMEOUT,
30 .procname = "restart_request_timeout", 30 .procname = "restart_request_timeout",
31 .data = &sysctl_rose_restart_request_timeout, 31 .data = &sysctl_rose_restart_request_timeout,
@@ -36,7 +36,7 @@ static ctl_table rose_table[] = {
36 .extra1 = &min_timer, 36 .extra1 = &min_timer,
37 .extra2 = &max_timer 37 .extra2 = &max_timer
38 }, 38 },
39 { 39 {
40 .ctl_name = NET_ROSE_CALL_REQUEST_TIMEOUT, 40 .ctl_name = NET_ROSE_CALL_REQUEST_TIMEOUT,
41 .procname = "call_request_timeout", 41 .procname = "call_request_timeout",
42 .data = &sysctl_rose_call_request_timeout, 42 .data = &sysctl_rose_call_request_timeout,
@@ -47,7 +47,7 @@ static ctl_table rose_table[] = {
47 .extra1 = &min_timer, 47 .extra1 = &min_timer,
48 .extra2 = &max_timer 48 .extra2 = &max_timer
49 }, 49 },
50 { 50 {
51 .ctl_name = NET_ROSE_RESET_REQUEST_TIMEOUT, 51 .ctl_name = NET_ROSE_RESET_REQUEST_TIMEOUT,
52 .procname = "reset_request_timeout", 52 .procname = "reset_request_timeout",
53 .data = &sysctl_rose_reset_request_timeout, 53 .data = &sysctl_rose_reset_request_timeout,
@@ -58,7 +58,7 @@ static ctl_table rose_table[] = {
58 .extra1 = &min_timer, 58 .extra1 = &min_timer,
59 .extra2 = &max_timer 59 .extra2 = &max_timer
60 }, 60 },
61 { 61 {
62 .ctl_name = NET_ROSE_CLEAR_REQUEST_TIMEOUT, 62 .ctl_name = NET_ROSE_CLEAR_REQUEST_TIMEOUT,
63 .procname = "clear_request_timeout", 63 .procname = "clear_request_timeout",
64 .data = &sysctl_rose_clear_request_timeout, 64 .data = &sysctl_rose_clear_request_timeout,
@@ -69,7 +69,7 @@ static ctl_table rose_table[] = {
69 .extra1 = &min_timer, 69 .extra1 = &min_timer,
70 .extra2 = &max_timer 70 .extra2 = &max_timer
71 }, 71 },
72 { 72 {
73 .ctl_name = NET_ROSE_NO_ACTIVITY_TIMEOUT, 73 .ctl_name = NET_ROSE_NO_ACTIVITY_TIMEOUT,
74 .procname = "no_activity_timeout", 74 .procname = "no_activity_timeout",
75 .data = &sysctl_rose_no_activity_timeout, 75 .data = &sysctl_rose_no_activity_timeout,
@@ -80,7 +80,7 @@ static ctl_table rose_table[] = {
80 .extra1 = &min_idle, 80 .extra1 = &min_idle,
81 .extra2 = &max_idle 81 .extra2 = &max_idle
82 }, 82 },
83 { 83 {
84 .ctl_name = NET_ROSE_ACK_HOLD_BACK_TIMEOUT, 84 .ctl_name = NET_ROSE_ACK_HOLD_BACK_TIMEOUT,
85 .procname = "acknowledge_hold_back_timeout", 85 .procname = "acknowledge_hold_back_timeout",
86 .data = &sysctl_rose_ack_hold_back_timeout, 86 .data = &sysctl_rose_ack_hold_back_timeout,
@@ -91,7 +91,7 @@ static ctl_table rose_table[] = {
91 .extra1 = &min_timer, 91 .extra1 = &min_timer,
92 .extra2 = &max_timer 92 .extra2 = &max_timer
93 }, 93 },
94 { 94 {
95 .ctl_name = NET_ROSE_ROUTING_CONTROL, 95 .ctl_name = NET_ROSE_ROUTING_CONTROL,
96 .procname = "routing_control", 96 .procname = "routing_control",
97 .data = &sysctl_rose_routing_control, 97 .data = &sysctl_rose_routing_control,
@@ -102,7 +102,7 @@ static ctl_table rose_table[] = {
102 .extra1 = &min_route, 102 .extra1 = &min_route,
103 .extra2 = &max_route 103 .extra2 = &max_route
104 }, 104 },
105 { 105 {
106 .ctl_name = NET_ROSE_LINK_FAIL_TIMEOUT, 106 .ctl_name = NET_ROSE_LINK_FAIL_TIMEOUT,
107 .procname = "link_fail_timeout", 107 .procname = "link_fail_timeout",
108 .data = &sysctl_rose_link_fail_timeout, 108 .data = &sysctl_rose_link_fail_timeout,
@@ -113,7 +113,7 @@ static ctl_table rose_table[] = {
113 .extra1 = &min_ftimer, 113 .extra1 = &min_ftimer,
114 .extra2 = &max_ftimer 114 .extra2 = &max_ftimer
115 }, 115 },
116 { 116 {
117 .ctl_name = NET_ROSE_MAX_VCS, 117 .ctl_name = NET_ROSE_MAX_VCS,
118 .procname = "maximum_virtual_circuits", 118 .procname = "maximum_virtual_circuits",
119 .data = &sysctl_rose_maximum_vcs, 119 .data = &sysctl_rose_maximum_vcs,
@@ -124,7 +124,7 @@ static ctl_table rose_table[] = {
124 .extra1 = &min_maxvcs, 124 .extra1 = &min_maxvcs,
125 .extra2 = &max_maxvcs 125 .extra2 = &max_maxvcs
126 }, 126 },
127 { 127 {
128 .ctl_name = NET_ROSE_WINDOW_SIZE, 128 .ctl_name = NET_ROSE_WINDOW_SIZE,
129 .procname = "window_size", 129 .procname = "window_size",
130 .data = &sysctl_rose_window_size, 130 .data = &sysctl_rose_window_size,
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 93d2c55ad2d5..a7c929a9fdca 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -163,7 +163,7 @@ int rxrpc_create_connection(struct rxrpc_transport *trans,
163 conn = list_entry(_p, struct rxrpc_connection, link); 163 conn = list_entry(_p, struct rxrpc_connection, link);
164 if (conn->addr.sin_port == candidate->addr.sin_port && 164 if (conn->addr.sin_port == candidate->addr.sin_port &&
165 conn->security_ix == candidate->security_ix && 165 conn->security_ix == candidate->security_ix &&
166 conn->service_id == candidate->service_id && 166 conn->service_id == candidate->service_id &&
167 conn->in_clientflag == 0) 167 conn->in_clientflag == 0)
168 goto found_in_graveyard; 168 goto found_in_graveyard;
169 } 169 }
@@ -247,13 +247,13 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
247 conn->in_epoch == x_epoch && 247 conn->in_epoch == x_epoch &&
248 conn->conn_id == x_connid && 248 conn->conn_id == x_connid &&
249 conn->security_ix == x_secix && 249 conn->security_ix == x_secix &&
250 conn->service_id == x_servid && 250 conn->service_id == x_servid &&
251 conn->in_clientflag == x_clflag) 251 conn->in_clientflag == x_clflag)
252 goto found_active; 252 goto found_active;
253 } 253 }
254 read_unlock(&peer->conn_lock); 254 read_unlock(&peer->conn_lock);
255 255
256 /* [uncommon case] not active 256 /* [uncommon case] not active
257 * - create a candidate for a new record if an inbound connection 257 * - create a candidate for a new record if an inbound connection
258 * - only examine the graveyard for an outbound connection 258 * - only examine the graveyard for an outbound connection
259 */ 259 */
@@ -286,7 +286,7 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
286 conn->in_epoch == x_epoch && 286 conn->in_epoch == x_epoch &&
287 conn->conn_id == x_connid && 287 conn->conn_id == x_connid &&
288 conn->security_ix == x_secix && 288 conn->security_ix == x_secix &&
289 conn->service_id == x_servid && 289 conn->service_id == x_servid &&
290 conn->in_clientflag == x_clflag) 290 conn->in_clientflag == x_clflag)
291 goto found_active_second_chance; 291 goto found_active_second_chance;
292 } 292 }
@@ -299,7 +299,7 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
299 conn->in_epoch == x_epoch && 299 conn->in_epoch == x_epoch &&
300 conn->conn_id == x_connid && 300 conn->conn_id == x_connid &&
301 conn->security_ix == x_secix && 301 conn->security_ix == x_secix &&
302 conn->service_id == x_servid && 302 conn->service_id == x_servid &&
303 conn->in_clientflag == x_clflag) 303 conn->in_clientflag == x_clflag)
304 goto found_in_graveyard; 304 goto found_in_graveyard;
305 } 305 }
diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h
index 70e52f6b0b64..cc0c5795a103 100644
--- a/net/rxrpc/internal.h
+++ b/net/rxrpc/internal.h
@@ -14,7 +14,7 @@
14 */ 14 */
15#if 1 15#if 1
16#define __RXACCT_DECL(X) X 16#define __RXACCT_DECL(X) X
17#define __RXACCT(X) do { X; } while(0) 17#define __RXACCT(X) do { X; } while(0)
18#else 18#else
19#define __RXACCT_DECL(X) 19#define __RXACCT_DECL(X)
20#define __RXACCT(X) do { } while(0) 20#define __RXACCT(X) do { } while(0)
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
index 49effd92144e..bbbcd6c24048 100644
--- a/net/rxrpc/krxiod.c
+++ b/net/rxrpc/krxiod.c
@@ -141,7 +141,7 @@ static int rxrpc_krxiod(void *arg)
141 141
142 try_to_freeze(); 142 try_to_freeze();
143 143
144 /* discard pending signals */ 144 /* discard pending signals */
145 rxrpc_discard_my_signals(); 145 rxrpc_discard_my_signals();
146 146
147 } while (!rxrpc_krxiod_die); 147 } while (!rxrpc_krxiod_die);
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
index 3ab0f77409f4..9a1e7f5e034c 100644
--- a/net/rxrpc/krxsecd.c
+++ b/net/rxrpc/krxsecd.c
@@ -110,7 +110,7 @@ static int rxrpc_krxsecd(void *arg)
110 110
111 try_to_freeze(); 111 try_to_freeze();
112 112
113 /* discard pending signals */ 113 /* discard pending signals */
114 rxrpc_discard_my_signals(); 114 rxrpc_discard_my_signals();
115 115
116 } while (!die); 116 } while (!die);
diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c
index 48cbd065bb45..baec1f7fd8b9 100644
--- a/net/rxrpc/main.c
+++ b/net/rxrpc/main.c
@@ -146,17 +146,17 @@ __attribute__((no_instrument_function));
146void __cyg_profile_func_enter (void *this_fn, void *call_site) 146void __cyg_profile_func_enter (void *this_fn, void *call_site)
147{ 147{
148 asm volatile(" movl %%esp,%%edi \n" 148 asm volatile(" movl %%esp,%%edi \n"
149 " andl %0,%%edi \n" 149 " andl %0,%%edi \n"
150 " addl %1,%%edi \n" 150 " addl %1,%%edi \n"
151 " movl %%esp,%%ecx \n" 151 " movl %%esp,%%ecx \n"
152 " subl %%edi,%%ecx \n" 152 " subl %%edi,%%ecx \n"
153 " shrl $2,%%ecx \n" 153 " shrl $2,%%ecx \n"
154 " movl $0xedededed,%%eax \n" 154 " movl $0xedededed,%%eax \n"
155 " rep stosl \n" 155 " rep stosl \n"
156 : 156 :
157 : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info)) 157 : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info))
158 : "eax", "ecx", "edi", "memory", "cc" 158 : "eax", "ecx", "edi", "memory", "cc"
159 ); 159 );
160} 160}
161 161
162void __cyg_profile_func_exit(void *this_fn, void *call_site) 162void __cyg_profile_func_exit(void *this_fn, void *call_site)
@@ -165,16 +165,16 @@ __attribute__((no_instrument_function));
165void __cyg_profile_func_exit(void *this_fn, void *call_site) 165void __cyg_profile_func_exit(void *this_fn, void *call_site)
166{ 166{
167 asm volatile(" movl %%esp,%%edi \n" 167 asm volatile(" movl %%esp,%%edi \n"
168 " andl %0,%%edi \n" 168 " andl %0,%%edi \n"
169 " addl %1,%%edi \n" 169 " addl %1,%%edi \n"
170 " movl %%esp,%%ecx \n" 170 " movl %%esp,%%ecx \n"
171 " subl %%edi,%%ecx \n" 171 " subl %%edi,%%ecx \n"
172 " shrl $2,%%ecx \n" 172 " shrl $2,%%ecx \n"
173 " movl $0xdadadada,%%eax \n" 173 " movl $0xdadadada,%%eax \n"
174 " rep stosl \n" 174 " rep stosl \n"
175 : 175 :
176 : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info)) 176 : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info))
177 : "eax", "ecx", "edi", "memory", "cc" 177 : "eax", "ecx", "edi", "memory", "cc"
178 ); 178 );
179} 179}
180#endif 180#endif
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 6374df7e77d1..0755ae028e43 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -27,7 +27,7 @@ int rxrpc_knet;
27static struct ctl_table_header *rxrpc_sysctl = NULL; 27static struct ctl_table_header *rxrpc_sysctl = NULL;
28 28
29static ctl_table rxrpc_sysctl_table[] = { 29static ctl_table rxrpc_sysctl_table[] = {
30 { 30 {
31 .ctl_name = 1, 31 .ctl_name = 1,
32 .procname = "kdebug", 32 .procname = "kdebug",
33 .data = &rxrpc_kdebug, 33 .data = &rxrpc_kdebug,
@@ -35,7 +35,7 @@ static ctl_table rxrpc_sysctl_table[] = {
35 .mode = 0644, 35 .mode = 0644,
36 .proc_handler = &proc_dointvec 36 .proc_handler = &proc_dointvec
37 }, 37 },
38 { 38 {
39 .ctl_name = 2, 39 .ctl_name = 2,
40 .procname = "ktrace", 40 .procname = "ktrace",
41 .data = &rxrpc_ktrace, 41 .data = &rxrpc_ktrace,
@@ -43,7 +43,7 @@ static ctl_table rxrpc_sysctl_table[] = {
43 .mode = 0644, 43 .mode = 0644,
44 .proc_handler = &proc_dointvec 44 .proc_handler = &proc_dointvec
45 }, 45 },
46 { 46 {
47 .ctl_name = 3, 47 .ctl_name = 3,
48 .procname = "kproto", 48 .procname = "kproto",
49 .data = &rxrpc_kproto, 49 .data = &rxrpc_kproto,
@@ -51,7 +51,7 @@ static ctl_table rxrpc_sysctl_table[] = {
51 .mode = 0644, 51 .mode = 0644,
52 .proc_handler = &proc_dointvec 52 .proc_handler = &proc_dointvec
53 }, 53 },
54 { 54 {
55 .ctl_name = 4, 55 .ctl_name = 4,
56 .procname = "knet", 56 .procname = "knet",
57 .data = &rxrpc_knet, 57 .data = &rxrpc_knet,
@@ -59,7 +59,7 @@ static ctl_table rxrpc_sysctl_table[] = {
59 .mode = 0644, 59 .mode = 0644,
60 .proc_handler = &proc_dointvec 60 .proc_handler = &proc_dointvec
61 }, 61 },
62 { 62 {
63 .ctl_name = 5, 63 .ctl_name = 5,
64 .procname = "peertimo", 64 .procname = "peertimo",
65 .data = &rxrpc_peer_timeout, 65 .data = &rxrpc_peer_timeout,
@@ -67,7 +67,7 @@ static ctl_table rxrpc_sysctl_table[] = {
67 .mode = 0644, 67 .mode = 0644,
68 .proc_handler = &proc_doulongvec_minmax 68 .proc_handler = &proc_doulongvec_minmax
69 }, 69 },
70 { 70 {
71 .ctl_name = 6, 71 .ctl_name = 6,
72 .procname = "conntimo", 72 .procname = "conntimo",
73 .data = &rxrpc_conn_timeout, 73 .data = &rxrpc_conn_timeout,
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index 4268b38d92d2..ff5f4f333086 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -330,10 +330,10 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
330 msg->state = RXRPC_MSG_RECEIVED; 330 msg->state = RXRPC_MSG_RECEIVED;
331 skb_get_timestamp(pkt, &msg->stamp); 331 skb_get_timestamp(pkt, &msg->stamp);
332 if (msg->stamp.tv_sec == 0) { 332 if (msg->stamp.tv_sec == 0) {
333 do_gettimeofday(&msg->stamp); 333 do_gettimeofday(&msg->stamp);
334 if (pkt->sk) 334 if (pkt->sk)
335 sock_enable_timestamp(pkt->sk); 335 sock_enable_timestamp(pkt->sk);
336 } 336 }
337 msg->seq = ntohl(msg->hdr.seq); 337 msg->seq = ntohl(msg->hdr.seq);
338 338
339 /* attach the packet */ 339 /* attach the packet */
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 835070e9169c..dd0868dfbd90 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -65,7 +65,7 @@ int tcf_hash_release(struct tcf_common *p, int bind,
65 p->tcfc_bindcnt--; 65 p->tcfc_bindcnt--;
66 66
67 p->tcfc_refcnt--; 67 p->tcfc_refcnt--;
68 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { 68 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
69 tcf_hash_destroy(p, hinfo); 69 tcf_hash_destroy(p, hinfo);
70 ret = 1; 70 ret = 1;
71 } 71 }
@@ -362,7 +362,7 @@ static struct tc_action_ops *tc_lookup_action_id(u32 type)
362#endif 362#endif
363 363
364int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, 364int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
365 struct tcf_result *res) 365 struct tcf_result *res)
366{ 366{
367 struct tc_action *a; 367 struct tc_action *a;
368 int ret = -1; 368 int ret = -1;
@@ -473,7 +473,7 @@ errout:
473} 473}
474 474
475struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, 475struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
476 char *name, int ovr, int bind, int *err) 476 char *name, int ovr, int bind, int *err)
477{ 477{
478 struct tc_action *a; 478 struct tc_action *a;
479 struct tc_action_ops *a_o; 479 struct tc_action_ops *a_o;
@@ -553,7 +553,7 @@ err_out:
553} 553}
554 554
555struct tc_action *tcf_action_init(struct rtattr *rta, struct rtattr *est, 555struct tc_action *tcf_action_init(struct rtattr *rta, struct rtattr *est,
556 char *name, int ovr, int bind, int *err) 556 char *name, int ovr, int bind, int *err)
557{ 557{
558 struct rtattr *tb[TCA_ACT_MAX_PRIO+1]; 558 struct rtattr *tb[TCA_ACT_MAX_PRIO+1];
559 struct tc_action *head = NULL, *act, *act_prev = NULL; 559 struct tc_action *head = NULL, *act, *act_prev = NULL;
@@ -590,7 +590,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
590 int err = 0; 590 int err = 0;
591 struct gnet_dump d; 591 struct gnet_dump d;
592 struct tcf_act_hdr *h = a->priv; 592 struct tcf_act_hdr *h = a->priv;
593 593
594 if (h == NULL) 594 if (h == NULL)
595 goto errout; 595 goto errout;
596 596
@@ -632,7 +632,7 @@ errout:
632 632
633static int 633static int
634tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, 634tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
635 u16 flags, int event, int bind, int ref) 635 u16 flags, int event, int bind, int ref)
636{ 636{
637 struct tcamsg *t; 637 struct tcamsg *t;
638 struct nlmsghdr *nlh; 638 struct nlmsghdr *nlh;
@@ -645,7 +645,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
645 t->tca_family = AF_UNSPEC; 645 t->tca_family = AF_UNSPEC;
646 t->tca__pad1 = 0; 646 t->tca__pad1 = 0;
647 t->tca__pad2 = 0; 647 t->tca__pad2 = 0;
648 648
649 x = (struct rtattr*) skb->tail; 649 x = (struct rtattr*) skb->tail;
650 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 650 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
651 651
@@ -653,7 +653,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
653 goto rtattr_failure; 653 goto rtattr_failure;
654 654
655 x->rta_len = skb->tail - (u8*)x; 655 x->rta_len = skb->tail - (u8*)x;
656 656
657 nlh->nlmsg_len = skb->tail - b; 657 nlh->nlmsg_len = skb->tail - b;
658 return skb->len; 658 return skb->len;
659 659
@@ -852,7 +852,7 @@ tca_action_gd(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int event)
852 } 852 }
853 853
854 if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 854 if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event,
855 0, 1) <= 0) { 855 0, 1) <= 0) {
856 kfree_skb(skb); 856 kfree_skb(skb);
857 ret = -EINVAL; 857 ret = -EINVAL;
858 goto err; 858 goto err;
@@ -861,7 +861,7 @@ tca_action_gd(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int event)
861 /* now do the delete */ 861 /* now do the delete */
862 tcf_action_destroy(head, 0); 862 tcf_action_destroy(head, 0);
863 ret = rtnetlink_send(skb, pid, RTNLGRP_TC, 863 ret = rtnetlink_send(skb, pid, RTNLGRP_TC,
864 n->nlmsg_flags&NLM_F_ECHO); 864 n->nlmsg_flags&NLM_F_ECHO);
865 if (ret > 0) 865 if (ret > 0)
866 return 0; 866 return 0;
867 return ret; 867 return ret;
@@ -872,7 +872,7 @@ err:
872} 872}
873 873
874static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, 874static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
875 u16 flags) 875 u16 flags)
876{ 876{
877 struct tcamsg *t; 877 struct tcamsg *t;
878 struct nlmsghdr *nlh; 878 struct nlmsghdr *nlh;
@@ -900,10 +900,10 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
900 goto rtattr_failure; 900 goto rtattr_failure;
901 901
902 x->rta_len = skb->tail - (u8*)x; 902 x->rta_len = skb->tail - (u8*)x;
903 903
904 nlh->nlmsg_len = skb->tail - b; 904 nlh->nlmsg_len = skb->tail - b;
905 NETLINK_CB(skb).dst_group = RTNLGRP_TC; 905 NETLINK_CB(skb).dst_group = RTNLGRP_TC;
906 906
907 err = rtnetlink_send(skb, pid, RTNLGRP_TC, flags&NLM_F_ECHO); 907 err = rtnetlink_send(skb, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
908 if (err > 0) 908 if (err > 0)
909 err = 0; 909 err = 0;
@@ -915,7 +915,7 @@ nlmsg_failure:
915 return -1; 915 return -1;
916} 916}
917 917
918 918
919static int 919static int
920tcf_action_add(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int ovr) 920tcf_action_add(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int ovr)
921{ 921{
@@ -999,13 +999,13 @@ find_dump_kind(struct nlmsghdr *n)
999 return NULL; 999 return NULL;
1000 1000
1001 if (rtattr_parse(tb, TCA_ACT_MAX_PRIO, RTA_DATA(tb1), 1001 if (rtattr_parse(tb, TCA_ACT_MAX_PRIO, RTA_DATA(tb1),
1002 NLMSG_ALIGN(RTA_PAYLOAD(tb1))) < 0) 1002 NLMSG_ALIGN(RTA_PAYLOAD(tb1))) < 0)
1003 return NULL; 1003 return NULL;
1004 if (tb[0] == NULL) 1004 if (tb[0] == NULL)
1005 return NULL; 1005 return NULL;
1006 1006
1007 if (rtattr_parse(tb2, TCA_ACT_MAX, RTA_DATA(tb[0]), 1007 if (rtattr_parse(tb2, TCA_ACT_MAX, RTA_DATA(tb[0]),
1008 RTA_PAYLOAD(tb[0])) < 0) 1008 RTA_PAYLOAD(tb[0])) < 0)
1009 return NULL; 1009 return NULL;
1010 kind = tb2[TCA_ACT_KIND-1]; 1010 kind = tb2[TCA_ACT_KIND-1];
1011 1011
@@ -1043,7 +1043,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1043 } 1043 }
1044 1044
1045 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 1045 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
1046 cb->nlh->nlmsg_type, sizeof(*t)); 1046 cb->nlh->nlmsg_type, sizeof(*t));
1047 t = NLMSG_DATA(nlh); 1047 t = NLMSG_DATA(nlh);
1048 t->tca_family = AF_UNSPEC; 1048 t->tca_family = AF_UNSPEC;
1049 t->tca__pad1 = 0; 1049 t->tca__pad1 = 0;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 85de7efd5fea..60095d86fd8f 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -65,7 +65,7 @@ static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
65#endif /* CONFIG_GACT_PROB */ 65#endif /* CONFIG_GACT_PROB */
66 66
67static int tcf_gact_init(struct rtattr *rta, struct rtattr *est, 67static int tcf_gact_init(struct rtattr *rta, struct rtattr *est,
68 struct tc_action *a, int ovr, int bind) 68 struct tc_action *a, int ovr, int bind)
69{ 69{
70 struct rtattr *tb[TCA_GACT_MAX]; 70 struct rtattr *tb[TCA_GACT_MAX];
71 struct tc_gact *parm; 71 struct tc_gact *parm;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 4c68c718f5ec..0fdabfa9f4bf 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -70,7 +70,7 @@ static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int
70 } 70 }
71 if (t->u.kernel.target->checkentry 71 if (t->u.kernel.target->checkentry
72 && !t->u.kernel.target->checkentry(table, NULL, 72 && !t->u.kernel.target->checkentry(table, NULL,
73 t->u.kernel.target, t->data, 73 t->u.kernel.target, t->data,
74 hook)) { 74 hook)) {
75 module_put(t->u.kernel.target->me); 75 module_put(t->u.kernel.target->me);
76 ret = -EINVAL; 76 ret = -EINVAL;
@@ -83,7 +83,7 @@ static void ipt_destroy_target(struct ipt_entry_target *t)
83{ 83{
84 if (t->u.kernel.target->destroy) 84 if (t->u.kernel.target->destroy)
85 t->u.kernel.target->destroy(t->u.kernel.target, t->data); 85 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
86 module_put(t->u.kernel.target->me); 86 module_put(t->u.kernel.target->me);
87} 87}
88 88
89static int tcf_ipt_release(struct tcf_ipt *ipt, int bind) 89static int tcf_ipt_release(struct tcf_ipt *ipt, int bind)
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 8ac65c219b98..53aa96cd579b 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -153,8 +153,8 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
153 if (tkey->offmask) { 153 if (tkey->offmask) {
154 if (skb->len > tkey->at) { 154 if (skb->len > tkey->at) {
155 char *j = pptr + tkey->at; 155 char *j = pptr + tkey->at;
156 offset += ((*j & tkey->offmask) >> 156 offset += ((*j & tkey->offmask) >>
157 tkey->shift); 157 tkey->shift);
158 } else { 158 } else {
159 goto bad; 159 goto bad;
160 } 160 }
@@ -176,7 +176,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
176 *ptr = ((*ptr & tkey->mask) ^ tkey->val); 176 *ptr = ((*ptr & tkey->mask) ^ tkey->val);
177 munged++; 177 munged++;
178 } 178 }
179 179
180 if (munged) 180 if (munged)
181 skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); 181 skb->tc_verd = SET_TC_MUNGED(skb->tc_verd);
182 goto done; 182 goto done;
@@ -200,8 +200,8 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
200 struct tcf_pedit *p = a->priv; 200 struct tcf_pedit *p = a->priv;
201 struct tc_pedit *opt; 201 struct tc_pedit *opt;
202 struct tcf_t t; 202 struct tcf_t t;
203 int s; 203 int s;
204 204
205 s = sizeof(*opt) + p->tcfp_nkeys * sizeof(struct tc_pedit_key); 205 s = sizeof(*opt) + p->tcfp_nkeys * sizeof(struct tc_pedit_key);
206 206
207 /* netlink spinlocks held above us - must use ATOMIC */ 207 /* netlink spinlocks held above us - must use ATOMIC */
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index af68e1e83251..6ffe35da22b1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -62,7 +62,7 @@ struct tc_police_compat
62 62
63#ifdef CONFIG_NET_CLS_ACT 63#ifdef CONFIG_NET_CLS_ACT
64static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, 64static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
65 int type, struct tc_action *a) 65 int type, struct tc_action *a)
66{ 66{
67 struct tcf_common *p; 67 struct tcf_common *p;
68 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; 68 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
@@ -112,7 +112,7 @@ void tcf_police_destroy(struct tcf_police *p)
112{ 112{
113 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); 113 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
114 struct tcf_common **p1p; 114 struct tcf_common **p1p;
115 115
116 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) { 116 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
117 if (*p1p == &p->common) { 117 if (*p1p == &p->common) {
118 write_lock_bh(&police_lock); 118 write_lock_bh(&police_lock);
@@ -135,7 +135,7 @@ void tcf_police_destroy(struct tcf_police *p)
135 135
136#ifdef CONFIG_NET_CLS_ACT 136#ifdef CONFIG_NET_CLS_ACT
137static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, 137static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
138 struct tc_action *a, int ovr, int bind) 138 struct tc_action *a, int ovr, int bind)
139{ 139{
140 unsigned h; 140 unsigned h;
141 int ret = 0, err; 141 int ret = 0, err;
@@ -269,7 +269,7 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind)
269} 269}
270 270
271static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 271static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
272 struct tcf_result *res) 272 struct tcf_result *res)
273{ 273{
274 struct tcf_police *police = a->priv; 274 struct tcf_police *police = a->priv;
275 psched_time_t now; 275 psched_time_t now;
@@ -606,12 +606,12 @@ rtattr_failure:
606int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police) 606int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
607{ 607{
608 struct gnet_dump d; 608 struct gnet_dump d;
609 609
610 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 610 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
611 TCA_XSTATS, police->tcf_stats_lock, 611 TCA_XSTATS, police->tcf_stats_lock,
612 &d) < 0) 612 &d) < 0)
613 goto errout; 613 goto errout;
614 614
615 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 || 615 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
616#ifdef CONFIG_NET_ESTIMATOR 616#ifdef CONFIG_NET_ESTIMATOR
617 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 || 617 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 5fe80854ca91..c7971182af07 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -43,9 +43,9 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
43 d->tcf_bstats.bytes += skb->len; 43 d->tcf_bstats.bytes += skb->len;
44 d->tcf_bstats.packets++; 44 d->tcf_bstats.packets++;
45 45
46 /* print policy string followed by _ then packet count 46 /* print policy string followed by _ then packet count
47 * Example if this was the 3rd packet and the string was "hello" 47 * Example if this was the 3rd packet and the string was "hello"
48 * then it would look like "hello_3" (without quotes) 48 * then it would look like "hello_3" (without quotes)
49 **/ 49 **/
50 printk("simple: %s_%d\n", 50 printk("simple: %s_%d\n",
51 (char *)d->tcfd_defdata, d->tcf_bstats.packets); 51 (char *)d->tcfd_defdata, d->tcf_bstats.packets);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index edb8fc97ae11..f41f4ee0587a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -286,7 +286,7 @@ replay:
286 goto errout; 286 goto errout;
287 } else { 287 } else {
288 switch (n->nlmsg_type) { 288 switch (n->nlmsg_type) {
289 case RTM_NEWTFILTER: 289 case RTM_NEWTFILTER:
290 err = -EEXIST; 290 err = -EEXIST;
291 if (n->nlmsg_flags&NLM_F_EXCL) 291 if (n->nlmsg_flags&NLM_F_EXCL)
292 goto errout; 292 goto errout;
@@ -481,11 +481,11 @@ tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
481 481
482int 482int
483tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb, 483tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
484 struct rtattr *rate_tlv, struct tcf_exts *exts, 484 struct rtattr *rate_tlv, struct tcf_exts *exts,
485 struct tcf_ext_map *map) 485 struct tcf_ext_map *map)
486{ 486{
487 memset(exts, 0, sizeof(*exts)); 487 memset(exts, 0, sizeof(*exts));
488 488
489#ifdef CONFIG_NET_CLS_ACT 489#ifdef CONFIG_NET_CLS_ACT
490 { 490 {
491 int err; 491 int err;
@@ -511,7 +511,7 @@ tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
511#elif defined CONFIG_NET_CLS_POLICE 511#elif defined CONFIG_NET_CLS_POLICE
512 if (map->police && tb[map->police-1]) { 512 if (map->police && tb[map->police-1]) {
513 struct tcf_police *p; 513 struct tcf_police *p;
514 514
515 p = tcf_police_locate(tb[map->police-1], rate_tlv); 515 p = tcf_police_locate(tb[map->police-1], rate_tlv);
516 if (p == NULL) 516 if (p == NULL)
517 return -EINVAL; 517 return -EINVAL;
@@ -530,7 +530,7 @@ tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
530 530
531void 531void
532tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, 532tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
533 struct tcf_exts *src) 533 struct tcf_exts *src)
534{ 534{
535#ifdef CONFIG_NET_CLS_ACT 535#ifdef CONFIG_NET_CLS_ACT
536 if (src->action) { 536 if (src->action) {
@@ -597,7 +597,7 @@ rtattr_failure: __attribute__ ((unused))
597 597
598int 598int
599tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, 599tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
600 struct tcf_ext_map *map) 600 struct tcf_ext_map *map)
601{ 601{
602#ifdef CONFIG_NET_CLS_ACT 602#ifdef CONFIG_NET_CLS_ACT
603 if (exts->action) 603 if (exts->action)
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 09fda68c8b39..ea13c2c5b061 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -98,7 +98,7 @@ static void basic_destroy(struct tcf_proto *tp)
98{ 98{
99 struct basic_head *head = (struct basic_head *) xchg(&tp->root, NULL); 99 struct basic_head *head = (struct basic_head *) xchg(&tp->root, NULL);
100 struct basic_filter *f, *n; 100 struct basic_filter *f, *n;
101 101
102 list_for_each_entry_safe(f, n, &head->flist, link) { 102 list_for_each_entry_safe(f, n, &head->flist, link) {
103 list_del(&f->link); 103 list_del(&f->link);
104 basic_delete_filter(tp, f); 104 basic_delete_filter(tp, f);
@@ -157,7 +157,7 @@ errout:
157} 157}
158 158
159static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, 159static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
160 struct rtattr **tca, unsigned long *arg) 160 struct rtattr **tca, unsigned long *arg)
161{ 161{
162 int err = -EINVAL; 162 int err = -EINVAL;
163 struct basic_head *head = (struct basic_head *) tp->root; 163 struct basic_head *head = (struct basic_head *) tp->root;
@@ -292,7 +292,7 @@ static int __init init_basic(void)
292 return register_tcf_proto_ops(&cls_basic_ops); 292 return register_tcf_proto_ops(&cls_basic_ops);
293} 293}
294 294
295static void __exit exit_basic(void) 295static void __exit exit_basic(void)
296{ 296{
297 unregister_tcf_proto_ops(&cls_basic_ops); 297 unregister_tcf_proto_ops(&cls_basic_ops);
298} 298}
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index c797d6ada7de..2ce3ce5c66eb 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -91,7 +91,7 @@ static __inline__ int fw_hash(u32 handle)
91 else if (HTSIZE == 256) { 91 else if (HTSIZE == 256) {
92 u8 *t = (u8 *) &handle; 92 u8 *t = (u8 *) &handle;
93 return t[0] ^ t[1] ^ t[2] ^ t[3]; 93 return t[0] ^ t[1] ^ t[2] ^ t[3];
94 } else 94 } else
95 return handle & (HTSIZE - 1); 95 return handle & (HTSIZE - 1);
96} 96}
97 97
@@ -407,7 +407,7 @@ static int __init init_fw(void)
407 return register_tcf_proto_ops(&cls_fw_ops); 407 return register_tcf_proto_ops(&cls_fw_ops);
408} 408}
409 409
410static void __exit exit_fw(void) 410static void __exit exit_fw(void)
411{ 411{
412 unregister_tcf_proto_ops(&cls_fw_ops); 412 unregister_tcf_proto_ops(&cls_fw_ops);
413} 413}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 587b9adab38c..7853621a04cc 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -130,7 +130,7 @@ static struct tcf_ext_map rsvp_ext_map = {
130 else if (r > 0) \ 130 else if (r > 0) \
131 return r; \ 131 return r; \
132} 132}
133 133
134static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, 134static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
135 struct tcf_result *res) 135 struct tcf_result *res)
136{ 136{
@@ -347,7 +347,7 @@ static int tunnel_bts(struct rsvp_head *data)
347{ 347{
348 int n = data->tgenerator>>5; 348 int n = data->tgenerator>>5;
349 u32 b = 1<<(data->tgenerator&0x1F); 349 u32 b = 1<<(data->tgenerator&0x1F);
350 350
351 if (data->tmap[n]&b) 351 if (data->tmap[n]&b)
352 return 0; 352 return 0;
353 data->tmap[n] |= b; 353 data->tmap[n] |= b;
@@ -547,7 +547,7 @@ insert:
547 s->next = *sp; 547 s->next = *sp;
548 wmb(); 548 wmb();
549 *sp = s; 549 *sp = s;
550 550
551 goto insert; 551 goto insert;
552 552
553errout: 553errout:
@@ -654,7 +654,7 @@ static int __init init_rsvp(void)
654 return register_tcf_proto_ops(&RSVP_OPS); 654 return register_tcf_proto_ops(&RSVP_OPS);
655} 655}
656 656
657static void __exit exit_rsvp(void) 657static void __exit exit_rsvp(void)
658{ 658{
659 unregister_tcf_proto_ops(&RSVP_OPS); 659 unregister_tcf_proto_ops(&RSVP_OPS);
660} 660}
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 5af8a59e1503..040e2d2d281a 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -222,7 +222,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
222 err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map); 222 err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map);
223 if (err < 0) 223 if (err < 0)
224 return err; 224 return err;
225 225
226 memcpy(&cp, p, sizeof(cp)); 226 memcpy(&cp, p, sizeof(cp));
227 memset(&new_filter_result, 0, sizeof(new_filter_result)); 227 memset(&new_filter_result, 0, sizeof(new_filter_result));
228 228
@@ -316,12 +316,12 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
316 f = kzalloc(sizeof(*f), GFP_KERNEL); 316 f = kzalloc(sizeof(*f), GFP_KERNEL);
317 if (!f) 317 if (!f)
318 goto errout_alloc; 318 goto errout_alloc;
319 } 319 }
320 320
321 if (tb[TCA_TCINDEX_CLASSID-1]) { 321 if (tb[TCA_TCINDEX_CLASSID-1]) {
322 cr.res.classid = *(u32 *) RTA_DATA(tb[TCA_TCINDEX_CLASSID-1]); 322 cr.res.classid = *(u32 *) RTA_DATA(tb[TCA_TCINDEX_CLASSID-1]);
323 tcf_bind_filter(tp, &cr.res, base); 323 tcf_bind_filter(tp, &cr.res, base);
324 } 324 }
325 325
326 tcf_exts_change(tp, &cr.exts, &e); 326 tcf_exts_change(tp, &cr.exts, &e);
327 327
@@ -341,7 +341,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
341 for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next) 341 for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
342 /* nothing */; 342 /* nothing */;
343 *fp = f; 343 *fp = f;
344 } 344 }
345 tcf_tree_unlock(tp); 345 tcf_tree_unlock(tp);
346 346
347 return 0; 347 return 0;
@@ -491,7 +491,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
491 if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0) 491 if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
492 goto rtattr_failure; 492 goto rtattr_failure;
493 } 493 }
494 494
495 return skb->len; 495 return skb->len;
496 496
497rtattr_failure: 497rtattr_failure:
@@ -519,7 +519,7 @@ static int __init init_tcindex(void)
519 return register_tcf_proto_ops(&cls_tcindex_ops); 519 return register_tcf_proto_ops(&cls_tcindex_ops);
520} 520}
521 521
522static void __exit exit_tcindex(void) 522static void __exit exit_tcindex(void)
523{ 523{
524 unregister_tcf_proto_ops(&cls_tcindex_ops); 524 unregister_tcf_proto_ops(&cls_tcindex_ops);
525} 525}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 8b5194801995..a232671cfa4e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -760,7 +760,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
760 RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev); 760 RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
761#endif 761#endif
762#ifdef CONFIG_CLS_U32_PERF 762#ifdef CONFIG_CLS_U32_PERF
763 RTA_PUT(skb, TCA_U32_PCNT, 763 RTA_PUT(skb, TCA_U32_PCNT,
764 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), 764 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
765 n->pf); 765 n->pf);
766#endif 766#endif
@@ -810,7 +810,7 @@ static int __init init_u32(void)
810 return register_tcf_proto_ops(&cls_u32_ops); 810 return register_tcf_proto_ops(&cls_u32_ops);
811} 811}
812 812
813static void __exit exit_u32(void) 813static void __exit exit_u32(void)
814{ 814{
815 unregister_tcf_proto_ops(&cls_u32_ops); 815 unregister_tcf_proto_ops(&cls_u32_ops);
816} 816}
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index 8ed93c39b4ea..8d6dacd81900 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -88,7 +88,7 @@ static int __init init_em_cmp(void)
88 return tcf_em_register(&em_cmp_ops); 88 return tcf_em_register(&em_cmp_ops);
89} 89}
90 90
91static void __exit exit_em_cmp(void) 91static void __exit exit_em_cmp(void)
92{ 92{
93 tcf_em_unregister(&em_cmp_ops); 93 tcf_em_unregister(&em_cmp_ops);
94} 94}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 45d47d37155e..60acf8cdb27b 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -9,7 +9,7 @@
9 * Authors: Thomas Graf <tgraf@suug.ch> 9 * Authors: Thomas Graf <tgraf@suug.ch>
10 * 10 *
11 * ========================================================================== 11 * ==========================================================================
12 * 12 *
13 * The metadata ematch compares two meta objects where each object 13 * The metadata ematch compares two meta objects where each object
14 * represents either a meta value stored in the kernel or a static 14 * represents either a meta value stored in the kernel or a static
15 * value provided by userspace. The objects are not provided by 15 * value provided by userspace. The objects are not provided by
@@ -290,7 +290,7 @@ META_COLLECTOR(var_sk_bound_if)
290 dst->len = 3; 290 dst->len = 3;
291 } else { 291 } else {
292 struct net_device *dev; 292 struct net_device *dev;
293 293
294 dev = dev_get_by_index(skb->sk->sk_bound_dev_if); 294 dev = dev_get_by_index(skb->sk->sk_bound_dev_if);
295 *err = var_dev(dev, dst); 295 *err = var_dev(dev, dst);
296 if (dev) 296 if (dev)
@@ -671,7 +671,7 @@ static inline struct meta_type_ops * meta_type_ops(struct meta_value *v)
671 * Core 671 * Core
672 **************************************************************************/ 672 **************************************************************************/
673 673
674static inline int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, 674static inline int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
675 struct meta_value *v, struct meta_obj *dst) 675 struct meta_value *v, struct meta_obj *dst)
676{ 676{
677 int err = 0; 677 int err = 0;
@@ -753,7 +753,7 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
753 struct rtattr *tb[TCA_EM_META_MAX]; 753 struct rtattr *tb[TCA_EM_META_MAX];
754 struct tcf_meta_hdr *hdr; 754 struct tcf_meta_hdr *hdr;
755 struct meta_match *meta = NULL; 755 struct meta_match *meta = NULL;
756 756
757 if (rtattr_parse(tb, TCA_EM_META_MAX, data, len) < 0) 757 if (rtattr_parse(tb, TCA_EM_META_MAX, data, len) < 0)
758 goto errout; 758 goto errout;
759 759
@@ -822,7 +822,7 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
822 822
823rtattr_failure: 823rtattr_failure:
824 return -1; 824 return -1;
825} 825}
826 826
827static struct tcf_ematch_ops em_meta_ops = { 827static struct tcf_ematch_ops em_meta_ops = {
828 .kind = TCF_EM_META, 828 .kind = TCF_EM_META,
@@ -839,7 +839,7 @@ static int __init init_em_meta(void)
839 return tcf_em_register(&em_meta_ops); 839 return tcf_em_register(&em_meta_ops);
840} 840}
841 841
842static void __exit exit_em_meta(void) 842static void __exit exit_em_meta(void)
843{ 843{
844 tcf_em_unregister(&em_meta_ops); 844 tcf_em_unregister(&em_meta_ops);
845} 845}
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 005db409be64..42103b2bdc51 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -23,7 +23,7 @@ struct nbyte_data
23 struct tcf_em_nbyte hdr; 23 struct tcf_em_nbyte hdr;
24 char pattern[0]; 24 char pattern[0];
25}; 25};
26 26
27static int em_nbyte_change(struct tcf_proto *tp, void *data, int data_len, 27static int em_nbyte_change(struct tcf_proto *tp, void *data, int data_len,
28 struct tcf_ematch *em) 28 struct tcf_ematch *em)
29{ 29{
@@ -68,7 +68,7 @@ static int __init init_em_nbyte(void)
68 return tcf_em_register(&em_nbyte_ops); 68 return tcf_em_register(&em_nbyte_ops);
69} 69}
70 70
71static void __exit exit_em_nbyte(void) 71static void __exit exit_em_nbyte(void)
72{ 72{
73 tcf_em_unregister(&em_nbyte_ops); 73 tcf_em_unregister(&em_nbyte_ops);
74} 74}
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index aa17d8f7c4c8..8ad894b58fce 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -125,7 +125,7 @@ static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
125 125
126rtattr_failure: 126rtattr_failure:
127 return -1; 127 return -1;
128} 128}
129 129
130static struct tcf_ematch_ops em_text_ops = { 130static struct tcf_ematch_ops em_text_ops = {
131 .kind = TCF_EM_TEXT, 131 .kind = TCF_EM_TEXT,
@@ -142,7 +142,7 @@ static int __init init_em_text(void)
142 return tcf_em_register(&em_text_ops); 142 return tcf_em_register(&em_text_ops);
143} 143}
144 144
145static void __exit exit_em_text(void) 145static void __exit exit_em_text(void)
146{ 146{
147 tcf_em_unregister(&em_text_ops); 147 tcf_em_unregister(&em_text_ops);
148} 148}
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c
index e3ddfce0ac8d..cd0600c67969 100644
--- a/net/sched/em_u32.c
+++ b/net/sched/em_u32.c
@@ -23,7 +23,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
23{ 23{
24 struct tc_u32_key *key = (struct tc_u32_key *) em->data; 24 struct tc_u32_key *key = (struct tc_u32_key *) em->data;
25 unsigned char *ptr = skb->nh.raw; 25 unsigned char *ptr = skb->nh.raw;
26 26
27 if (info) { 27 if (info) {
28 if (info->ptr) 28 if (info->ptr)
29 ptr = info->ptr; 29 ptr = info->ptr;
@@ -34,7 +34,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
34 34
35 if (!tcf_valid_offset(skb, ptr, sizeof(u32))) 35 if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
36 return 0; 36 return 0;
37 37
38 return !(((*(u32*) ptr) ^ key->val) & key->mask); 38 return !(((*(u32*) ptr) ^ key->val) & key->mask);
39} 39}
40 40
@@ -51,7 +51,7 @@ static int __init init_em_u32(void)
51 return tcf_em_register(&em_u32_ops); 51 return tcf_em_register(&em_u32_ops);
52} 52}
53 53
54static void __exit exit_em_u32(void) 54static void __exit exit_em_u32(void)
55{ 55{
56 tcf_em_unregister(&em_u32_ops); 56 tcf_em_unregister(&em_u32_ops);
57} 57}
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 8f8a16da72a8..d3ad36b36129 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -37,12 +37,12 @@
37 * --------<-POP--------- 37 * --------<-POP---------
38 * 38 *
39 * where B is a virtual ematch referencing to sequence starting with B1. 39 * where B is a virtual ematch referencing to sequence starting with B1.
40 * 40 *
41 * ========================================================================== 41 * ==========================================================================
42 * 42 *
43 * How to write an ematch in 60 seconds 43 * How to write an ematch in 60 seconds
44 * ------------------------------------ 44 * ------------------------------------
45 * 45 *
46 * 1) Provide a matcher function: 46 * 1) Provide a matcher function:
47 * static int my_match(struct sk_buff *skb, struct tcf_ematch *m, 47 * static int my_match(struct sk_buff *skb, struct tcf_ematch *m,
48 * struct tcf_pkt_info *info) 48 * struct tcf_pkt_info *info)
@@ -115,7 +115,7 @@ static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
115 115
116/** 116/**
117 * tcf_em_register - register an extended match 117 * tcf_em_register - register an extended match
118 * 118 *
119 * @ops: ematch operations lookup table 119 * @ops: ematch operations lookup table
120 * 120 *
121 * This function must be called by ematches to announce their presence. 121 * This function must be called by ematches to announce their presence.
@@ -211,7 +211,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
211 if (ref <= idx) 211 if (ref <= idx)
212 goto errout; 212 goto errout;
213 213
214 214
215 em->data = ref; 215 em->data = ref;
216 } else { 216 } else {
217 /* Note: This lookup will increase the module refcnt 217 /* Note: This lookup will increase the module refcnt
@@ -327,7 +327,7 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct rtattr *rta,
327 /* We do not use rtattr_parse_nested here because the maximum 327 /* We do not use rtattr_parse_nested here because the maximum
328 * number of attributes is unknown. This saves us the allocation 328 * number of attributes is unknown. This saves us the allocation
329 * for a tb buffer which would serve no purpose at all. 329 * for a tb buffer which would serve no purpose at all.
330 * 330 *
331 * The array of rt attributes is parsed in the order as they are 331 * The array of rt attributes is parsed in the order as they are
332 * provided, their type must be incremental from 1 to n. Even 332 * provided, their type must be incremental from 1 to n. Even
333 * if it does not serve any real purpose, a failure of sticking 333 * if it does not serve any real purpose, a failure of sticking
@@ -399,7 +399,7 @@ void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree)
399 module_put(em->ops->owner); 399 module_put(em->ops->owner);
400 } 400 }
401 } 401 }
402 402
403 tree->hdr.nmatches = 0; 403 tree->hdr.nmatches = 0;
404 kfree(tree->matches); 404 kfree(tree->matches);
405} 405}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 65825f4409d9..60b92fcdc8bb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -389,7 +389,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
389 struct Qdisc *q = *old; 389 struct Qdisc *q = *old;
390 390
391 391
392 if (parent == NULL) { 392 if (parent == NULL) {
393 if (q && q->flags&TCQ_F_INGRESS) { 393 if (q && q->flags&TCQ_F_INGRESS) {
394 *old = dev_graft_qdisc(dev, q); 394 *old = dev_graft_qdisc(dev, q);
395 } else { 395 } else {
@@ -596,7 +596,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
596 q = qdisc_leaf(p, clid); 596 q = qdisc_leaf(p, clid);
597 } else { /* ingress */ 597 } else { /* ingress */
598 q = dev->qdisc_ingress; 598 q = dev->qdisc_ingress;
599 } 599 }
600 } else { 600 } else {
601 q = dev->qdisc_sleeping; 601 q = dev->qdisc_sleeping;
602 } 602 }
@@ -743,7 +743,7 @@ create_n_graft:
743 return -ENOENT; 743 return -ENOENT;
744 if (clid == TC_H_INGRESS) 744 if (clid == TC_H_INGRESS)
745 q = qdisc_create(dev, tcm->tcm_parent, tca, &err); 745 q = qdisc_create(dev, tcm->tcm_parent, tca, &err);
746 else 746 else
747 q = qdisc_create(dev, tcm->tcm_handle, tca, &err); 747 q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
748 if (q == NULL) { 748 if (q == NULL) {
749 if (err == -EAGAIN) 749 if (err == -EAGAIN)
@@ -808,10 +808,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
808#endif 808#endif
809 gnet_stats_copy_queue(&d, &q->qstats) < 0) 809 gnet_stats_copy_queue(&d, &q->qstats) < 0)
810 goto rtattr_failure; 810 goto rtattr_failure;
811 811
812 if (gnet_stats_finish_copy(&d) < 0) 812 if (gnet_stats_finish_copy(&d) < 0)
813 goto rtattr_failure; 813 goto rtattr_failure;
814 814
815 nlh->nlmsg_len = skb->tail - b; 815 nlh->nlmsg_len = skb->tail - b;
816 return skb->len; 816 return skb->len;
817 817
@@ -954,7 +954,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
954 } 954 }
955 955
956 /* OK. Locate qdisc */ 956 /* OK. Locate qdisc */
957 if ((q = qdisc_lookup(dev, qid)) == NULL) 957 if ((q = qdisc_lookup(dev, qid)) == NULL)
958 return -ENOENT; 958 return -ENOENT;
959 959
960 /* An check that it supports classes */ 960 /* An check that it supports classes */
@@ -978,7 +978,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
978 goto out; 978 goto out;
979 } else { 979 } else {
980 switch (n->nlmsg_type) { 980 switch (n->nlmsg_type) {
981 case RTM_NEWTCLASS: 981 case RTM_NEWTCLASS:
982 err = -EEXIST; 982 err = -EEXIST;
983 if (n->nlmsg_flags&NLM_F_EXCL) 983 if (n->nlmsg_flags&NLM_F_EXCL)
984 goto out; 984 goto out;
@@ -1162,7 +1162,7 @@ reclassify:
1162 skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd); 1162 skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
1163 goto reclassify; 1163 goto reclassify;
1164 } else { 1164 } else {
1165 if (skb->tc_verd) 1165 if (skb->tc_verd)
1166 skb->tc_verd = SET_TC_VERD(skb->tc_verd,0); 1166 skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
1167 return err; 1167 return err;
1168 } 1168 }
@@ -1200,7 +1200,7 @@ static struct file_operations psched_fops = {
1200 .read = seq_read, 1200 .read = seq_read,
1201 .llseek = seq_lseek, 1201 .llseek = seq_lseek,
1202 .release = single_release, 1202 .release = single_release,
1203}; 1203};
1204#endif 1204#endif
1205 1205
1206#ifdef CONFIG_NET_SCH_CLK_CPU 1206#ifdef CONFIG_NET_SCH_CLK_CPU
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index edc7bb0b9c8b..afb3bbd571f2 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -107,7 +107,7 @@ static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch,
107 struct atm_qdisc_data *p = PRIV(sch); 107 struct atm_qdisc_data *p = PRIV(sch);
108 struct atm_flow_data *flow; 108 struct atm_flow_data *flow;
109 109
110 for (flow = p->flows; flow; flow = flow->next) 110 for (flow = p->flows; flow; flow = flow->next)
111 if (flow->classid == classid) break; 111 if (flow->classid == classid) break;
112 return flow; 112 return flow;
113} 113}
@@ -125,7 +125,7 @@ static int atm_tc_graft(struct Qdisc *sch,unsigned long arg,
125 if (!new) new = &noop_qdisc; 125 if (!new) new = &noop_qdisc;
126 *old = xchg(&flow->q,new); 126 *old = xchg(&flow->q,new);
127 if (*old) qdisc_reset(*old); 127 if (*old) qdisc_reset(*old);
128 return 0; 128 return 0;
129} 129}
130 130
131 131
@@ -145,7 +145,7 @@ static unsigned long atm_tc_get(struct Qdisc *sch,u32 classid)
145 145
146 DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid); 146 DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid);
147 flow = lookup_flow(sch,classid); 147 flow = lookup_flow(sch,classid);
148 if (flow) flow->ref++; 148 if (flow) flow->ref++;
149 DPRINTK("atm_tc_get: flow %p\n",flow); 149 DPRINTK("atm_tc_get: flow %p\n",flow);
150 return (unsigned long) flow; 150 return (unsigned long) flow;
151} 151}
@@ -280,9 +280,9 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
280 opt->rta_type,RTA_PAYLOAD(opt),hdr_len); 280 opt->rta_type,RTA_PAYLOAD(opt),hdr_len);
281 if (!(sock = sockfd_lookup(fd,&error))) return error; /* f_count++ */ 281 if (!(sock = sockfd_lookup(fd,&error))) return error; /* f_count++ */
282 DPRINTK("atm_tc_change: f_count %d\n",file_count(sock->file)); 282 DPRINTK("atm_tc_change: f_count %d\n",file_count(sock->file));
283 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { 283 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
284 error = -EPROTOTYPE; 284 error = -EPROTOTYPE;
285 goto err_out; 285 goto err_out;
286 } 286 }
287 /* @@@ should check if the socket is really operational or we'll crash 287 /* @@@ should check if the socket is really operational or we'll crash
288 on vcc->send */ 288 on vcc->send */
@@ -320,9 +320,9 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
320 flow->q = &noop_qdisc; 320 flow->q = &noop_qdisc;
321 DPRINTK("atm_tc_change: qdisc %p\n",flow->q); 321 DPRINTK("atm_tc_change: qdisc %p\n",flow->q);
322 flow->sock = sock; 322 flow->sock = sock;
323 flow->vcc = ATM_SD(sock); /* speedup */ 323 flow->vcc = ATM_SD(sock); /* speedup */
324 flow->vcc->user_back = flow; 324 flow->vcc->user_back = flow;
325 DPRINTK("atm_tc_change: vcc %p\n",flow->vcc); 325 DPRINTK("atm_tc_change: vcc %p\n",flow->vcc);
326 flow->old_pop = flow->vcc->pop; 326 flow->old_pop = flow->vcc->pop;
327 flow->parent = p; 327 flow->parent = p;
328 flow->vcc->pop = sch_atm_pop; 328 flow->vcc->pop = sch_atm_pop;
@@ -391,7 +391,7 @@ static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch,unsigned long cl)
391 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 391 struct atm_flow_data *flow = (struct atm_flow_data *) cl;
392 392
393 DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); 393 DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n",sch,p,flow);
394 return flow ? &flow->filter_list : &p->link.filter_list; 394 return flow ? &flow->filter_list : &p->link.filter_list;
395} 395}
396 396
397 397
@@ -546,8 +546,8 @@ static int atm_tc_requeue(struct sk_buff *skb,struct Qdisc *sch)
546 D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p); 546 D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
547 ret = p->link.q->ops->requeue(skb,p->link.q); 547 ret = p->link.q->ops->requeue(skb,p->link.q);
548 if (!ret) { 548 if (!ret) {
549 sch->q.qlen++; 549 sch->q.qlen++;
550 sch->qstats.requeues++; 550 sch->qstats.requeues++;
551 } else { 551 } else {
552 sch->qstats.drops++; 552 sch->qstats.drops++;
553 p->link.qstats.drops++; 553 p->link.qstats.drops++;
@@ -726,7 +726,7 @@ static int __init atm_init(void)
726 return register_qdisc(&atm_qdisc_ops); 726 return register_qdisc(&atm_qdisc_ops);
727} 727}
728 728
729static void __exit atm_exit(void) 729static void __exit atm_exit(void)
730{ 730{
731 unregister_qdisc(&atm_qdisc_ops); 731 unregister_qdisc(&atm_qdisc_ops);
732} 732}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index f79a4f3d0a95..48830cac1014 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -40,12 +40,12 @@
40 ======================================= 40 =======================================
41 41
42 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource 42 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
43 Management Models for Packet Networks", 43 Management Models for Packet Networks",
44 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995 44 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
45 45
46 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 46 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
47 47
48 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting 48 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
49 Parameters", 1996 49 Parameters", 1996
50 50
51 [4] Sally Floyd and Michael Speer, "Experimental Results 51 [4] Sally Floyd and Michael Speer, "Experimental Results
@@ -59,12 +59,12 @@
59 the implementation is different. Particularly: 59 the implementation is different. Particularly:
60 60
61 --- The WRR algorithm is different. Our version looks more 61 --- The WRR algorithm is different. Our version looks more
62 reasonable (I hope) and works when quanta are allowed to be 62 reasonable (I hope) and works when quanta are allowed to be
63 less than MTU, which is always the case when real time classes 63 less than MTU, which is always the case when real time classes
64 have small rates. Note, that the statement of [3] is 64 have small rates. Note, that the statement of [3] is
65 incomplete, delay may actually be estimated even if class 65 incomplete, delay may actually be estimated even if class
66 per-round allotment is less than MTU. Namely, if per-round 66 per-round allotment is less than MTU. Namely, if per-round
67 allotment is W*r_i, and r_1+...+r_k = r < 1 67 allotment is W*r_i, and r_1+...+r_k = r < 1
68 68
69 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B 69 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
70 70
@@ -280,7 +280,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
280#ifdef CONFIG_NET_CLS_ACT 280#ifdef CONFIG_NET_CLS_ACT
281 switch (result) { 281 switch (result) {
282 case TC_ACT_QUEUED: 282 case TC_ACT_QUEUED:
283 case TC_ACT_STOLEN: 283 case TC_ACT_STOLEN:
284 *qerr = NET_XMIT_SUCCESS; 284 *qerr = NET_XMIT_SUCCESS;
285 case TC_ACT_SHOT: 285 case TC_ACT_SHOT:
286 return NULL; 286 return NULL;
@@ -479,7 +479,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
479 if (!cl->delayed) { 479 if (!cl->delayed) {
480 delay += cl->offtime; 480 delay += cl->offtime;
481 481
482 /* 482 /*
483 Class goes to sleep, so that it will have no 483 Class goes to sleep, so that it will have no
484 chance to work avgidle. Let's forgive it 8) 484 chance to work avgidle. Let's forgive it 8)
485 485
@@ -717,7 +717,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
717} 717}
718#endif 718#endif
719 719
720/* 720/*
721 It is mission critical procedure. 721 It is mission critical procedure.
722 722
723 We "regenerate" toplevel cutoff, if transmitting class 723 We "regenerate" toplevel cutoff, if transmitting class
@@ -739,7 +739,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
739 } 739 }
740 } while ((borrowed=borrowed->borrow) != NULL); 740 } while ((borrowed=borrowed->borrow) != NULL);
741 } 741 }
742#if 0 742#if 0
743 /* It is not necessary now. Uncommenting it 743 /* It is not necessary now. Uncommenting it
744 will save CPU cycles, but decrease fairness. 744 will save CPU cycles, but decrease fairness.
745 */ 745 */
@@ -768,7 +768,7 @@ cbq_update(struct cbq_sched_data *q)
768 (now - last) is total time between packet right edges. 768 (now - last) is total time between packet right edges.
769 (last_pktlen/rate) is "virtual" busy time, so that 769 (last_pktlen/rate) is "virtual" busy time, so that
770 770
771 idle = (now - last) - last_pktlen/rate 771 idle = (now - last) - last_pktlen/rate
772 */ 772 */
773 773
774 idle = PSCHED_TDIFF(q->now, cl->last); 774 idle = PSCHED_TDIFF(q->now, cl->last);
@@ -907,7 +907,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
907 skb = cl->q->dequeue(cl->q); 907 skb = cl->q->dequeue(cl->q);
908 908
909 /* Class did not give us any skb :-( 909 /* Class did not give us any skb :-(
910 It could occur even if cl->q->q.qlen != 0 910 It could occur even if cl->q->q.qlen != 0
911 f.e. if cl->q == "tbf" 911 f.e. if cl->q == "tbf"
912 */ 912 */
913 if (skb == NULL) 913 if (skb == NULL)
@@ -2131,7 +2131,7 @@ static int __init cbq_module_init(void)
2131{ 2131{
2132 return register_qdisc(&cbq_qdisc_ops); 2132 return register_qdisc(&cbq_qdisc_ops);
2133} 2133}
2134static void __exit cbq_module_exit(void) 2134static void __exit cbq_module_exit(void)
2135{ 2135{
2136 unregister_qdisc(&cbq_qdisc_ops); 2136 unregister_qdisc(&cbq_qdisc_ops);
2137} 2137}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index d5421816f007..96324cf4e6a9 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -68,7 +68,7 @@ static inline int dsmark_valid_indices(u16 indices)
68 return 0; 68 return 0;
69 indices >>= 1; 69 indices >>= 1;
70 } 70 }
71 71
72 return 1; 72 return 1;
73} 73}
74 74
@@ -100,7 +100,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
100 qdisc_reset(*old); 100 qdisc_reset(*old);
101 sch_tree_unlock(sch); 101 sch_tree_unlock(sch);
102 102
103 return 0; 103 return 0;
104} 104}
105 105
106static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg) 106static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
@@ -151,7 +151,7 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
151 151
152 if (tb[TCA_DSMARK_VALUE-1]) 152 if (tb[TCA_DSMARK_VALUE-1])
153 p->value[*arg-1] = RTA_GET_U8(tb[TCA_DSMARK_VALUE-1]); 153 p->value[*arg-1] = RTA_GET_U8(tb[TCA_DSMARK_VALUE-1]);
154 154
155 if (tb[TCA_DSMARK_MASK-1]) 155 if (tb[TCA_DSMARK_MASK-1])
156 p->mask[*arg-1] = mask; 156 p->mask[*arg-1] = mask;
157 157
@@ -167,7 +167,7 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
167 167
168 if (!dsmark_valid_index(p, arg)) 168 if (!dsmark_valid_index(p, arg))
169 return -EINVAL; 169 return -EINVAL;
170 170
171 p->mask[arg-1] = 0xff; 171 p->mask[arg-1] = 0xff;
172 p->value[arg-1] = 0; 172 p->value[arg-1] = 0;
173 173
@@ -193,9 +193,9 @@ static void dsmark_walk(struct Qdisc *sch,struct qdisc_walker *walker)
193 break; 193 break;
194 } 194 }
195 } 195 }
196ignore: 196ignore:
197 walker->count++; 197 walker->count++;
198 } 198 }
199} 199}
200 200
201static struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,unsigned long cl) 201static struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,unsigned long cl)
@@ -338,7 +338,7 @@ static unsigned int dsmark_drop(struct Qdisc *sch)
338{ 338{
339 struct dsmark_qdisc_data *p = PRIV(sch); 339 struct dsmark_qdisc_data *p = PRIV(sch);
340 unsigned int len; 340 unsigned int len;
341 341
342 DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n", sch, p); 342 DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
343 343
344 if (p->q->ops->drop == NULL) 344 if (p->q->ops->drop == NULL)
@@ -506,7 +506,7 @@ static int __init dsmark_module_init(void)
506 return register_qdisc(&dsmark_qdisc_ops); 506 return register_qdisc(&dsmark_qdisc_ops);
507} 507}
508 508
509static void __exit dsmark_module_exit(void) 509static void __exit dsmark_module_exit(void)
510{ 510{
511 unregister_qdisc(&dsmark_qdisc_ops); 511 unregister_qdisc(&dsmark_qdisc_ops);
512} 512}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3b6e6a780927..52eb3439d7c6 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -36,7 +36,7 @@
36 36
37/* Main transmission queue. */ 37/* Main transmission queue. */
38 38
39/* Main qdisc structure lock. 39/* Main qdisc structure lock.
40 40
41 However, modifications 41 However, modifications
42 to data, participating in scheduling must be additionally 42 to data, participating in scheduling must be additionally
@@ -66,7 +66,7 @@ void qdisc_unlock_tree(struct net_device *dev)
66 write_unlock(&qdisc_tree_lock); 66 write_unlock(&qdisc_tree_lock);
67} 67}
68 68
69/* 69/*
70 dev->queue_lock serializes queue accesses for this device 70 dev->queue_lock serializes queue accesses for this device
71 AND dev->qdisc pointer itself. 71 AND dev->qdisc pointer itself.
72 72
@@ -82,7 +82,7 @@ void qdisc_unlock_tree(struct net_device *dev)
82 we do not check dev->tbusy flag here. 82 we do not check dev->tbusy flag here.
83 83
84 Returns: 0 - queue is empty. 84 Returns: 0 - queue is empty.
85 >0 - queue is not empty, but throttled. 85 >0 - queue is not empty, but throttled.
86 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. 86 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
87 87
88 NOTE: Called under dev->queue_lock with locally disabled BH. 88 NOTE: Called under dev->queue_lock with locally disabled BH.
@@ -112,7 +112,7 @@ static inline int qdisc_restart(struct net_device *dev)
112 if (!netif_tx_trylock(dev)) { 112 if (!netif_tx_trylock(dev)) {
113 collision: 113 collision:
114 /* So, someone grabbed the driver. */ 114 /* So, someone grabbed the driver. */
115 115
116 /* It may be transient configuration error, 116 /* It may be transient configuration error,
117 when hard_start_xmit() recurses. We detect 117 when hard_start_xmit() recurses. We detect
118 it by checking xmit owner and drop the 118 it by checking xmit owner and drop the
@@ -128,7 +128,7 @@ static inline int qdisc_restart(struct net_device *dev)
128 goto requeue; 128 goto requeue;
129 } 129 }
130 } 130 }
131 131
132 { 132 {
133 /* And release queue */ 133 /* And release queue */
134 spin_unlock(&dev->queue_lock); 134 spin_unlock(&dev->queue_lock);
@@ -137,7 +137,7 @@ static inline int qdisc_restart(struct net_device *dev)
137 int ret; 137 int ret;
138 138
139 ret = dev_hard_start_xmit(skb, dev); 139 ret = dev_hard_start_xmit(skb, dev);
140 if (ret == NETDEV_TX_OK) { 140 if (ret == NETDEV_TX_OK) {
141 if (!nolock) { 141 if (!nolock) {
142 netif_tx_unlock(dev); 142 netif_tx_unlock(dev);
143 } 143 }
@@ -146,15 +146,15 @@ static inline int qdisc_restart(struct net_device *dev)
146 } 146 }
147 if (ret == NETDEV_TX_LOCKED && nolock) { 147 if (ret == NETDEV_TX_LOCKED && nolock) {
148 spin_lock(&dev->queue_lock); 148 spin_lock(&dev->queue_lock);
149 goto collision; 149 goto collision;
150 } 150 }
151 } 151 }
152 152
153 /* NETDEV_TX_BUSY - we need to requeue */ 153 /* NETDEV_TX_BUSY - we need to requeue */
154 /* Release the driver */ 154 /* Release the driver */
155 if (!nolock) { 155 if (!nolock) {
156 netif_tx_unlock(dev); 156 netif_tx_unlock(dev);
157 } 157 }
158 spin_lock(&dev->queue_lock); 158 spin_lock(&dev->queue_lock);
159 q = dev->qdisc; 159 q = dev->qdisc;
160 } 160 }
@@ -300,7 +300,7 @@ struct Qdisc noop_qdisc = {
300 .enqueue = noop_enqueue, 300 .enqueue = noop_enqueue,
301 .dequeue = noop_dequeue, 301 .dequeue = noop_dequeue,
302 .flags = TCQ_F_BUILTIN, 302 .flags = TCQ_F_BUILTIN,
303 .ops = &noop_qdisc_ops, 303 .ops = &noop_qdisc_ops,
304 .list = LIST_HEAD_INIT(noop_qdisc.list), 304 .list = LIST_HEAD_INIT(noop_qdisc.list),
305}; 305};
306 306
@@ -454,7 +454,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
454 unsigned int parentid) 454 unsigned int parentid)
455{ 455{
456 struct Qdisc *sch; 456 struct Qdisc *sch;
457 457
458 sch = qdisc_alloc(dev, ops); 458 sch = qdisc_alloc(dev, ops);
459 if (IS_ERR(sch)) 459 if (IS_ERR(sch))
460 goto errout; 460 goto errout;
@@ -478,7 +478,7 @@ void qdisc_reset(struct Qdisc *qdisc)
478 ops->reset(qdisc); 478 ops->reset(qdisc);
479} 479}
480 480
481/* this is the rcu callback function to clean up a qdisc when there 481/* this is the rcu callback function to clean up a qdisc when there
482 * are no further references to it */ 482 * are no further references to it */
483 483
484static void __qdisc_destroy(struct rcu_head *head) 484static void __qdisc_destroy(struct rcu_head *head)
@@ -600,10 +600,10 @@ void dev_shutdown(struct net_device *dev)
600 dev->qdisc_sleeping = &noop_qdisc; 600 dev->qdisc_sleeping = &noop_qdisc;
601 qdisc_destroy(qdisc); 601 qdisc_destroy(qdisc);
602#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) 602#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
603 if ((qdisc = dev->qdisc_ingress) != NULL) { 603 if ((qdisc = dev->qdisc_ingress) != NULL) {
604 dev->qdisc_ingress = NULL; 604 dev->qdisc_ingress = NULL;
605 qdisc_destroy(qdisc); 605 qdisc_destroy(qdisc);
606 } 606 }
607#endif 607#endif
608 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 608 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
609 qdisc_unlock_tree(dev); 609 qdisc_unlock_tree(dev);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 18e81a8ffb01..fa1b4fe7a5fd 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -335,7 +335,7 @@ static void gred_reset(struct Qdisc* sch)
335 335
336 qdisc_reset_queue(sch); 336 qdisc_reset_queue(sch);
337 337
338 for (i = 0; i < t->DPs; i++) { 338 for (i = 0; i < t->DPs; i++) {
339 struct gred_sched_data *q = t->tab[i]; 339 struct gred_sched_data *q = t->tab[i];
340 340
341 if (!q) 341 if (!q)
@@ -393,7 +393,7 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
393 "shadowed VQ 0x%x\n", i); 393 "shadowed VQ 0x%x\n", i);
394 gred_destroy_vq(table->tab[i]); 394 gred_destroy_vq(table->tab[i]);
395 table->tab[i] = NULL; 395 table->tab[i] = NULL;
396 } 396 }
397 } 397 }
398 398
399 return 0; 399 return 0;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6eefa6995777..135087d4213a 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -284,7 +284,7 @@ static inline struct hfsc_class *
284eltree_get_minel(struct hfsc_sched *q) 284eltree_get_minel(struct hfsc_sched *q)
285{ 285{
286 struct rb_node *n; 286 struct rb_node *n;
287 287
288 n = rb_first(&q->eligible); 288 n = rb_first(&q->eligible);
289 if (n == NULL) 289 if (n == NULL)
290 return NULL; 290 return NULL;
@@ -773,7 +773,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
773 /* update the virtual curve */ 773 /* update the virtual curve */
774 vt = cl->cl_vt + cl->cl_vtoff; 774 vt = cl->cl_vt + cl->cl_vtoff;
775 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, 775 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
776 cl->cl_total); 776 cl->cl_total);
777 if (cl->cl_virtual.x == vt) { 777 if (cl->cl_virtual.x == vt) {
778 cl->cl_virtual.x -= cl->cl_vtoff; 778 cl->cl_virtual.x -= cl->cl_vtoff;
779 cl->cl_vtoff = 0; 779 cl->cl_vtoff = 0;
@@ -796,10 +796,10 @@ init_vf(struct hfsc_class *cl, unsigned int len)
796 796
797 /* update the ulimit curve */ 797 /* update the ulimit curve */
798 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, 798 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
799 cl->cl_total); 799 cl->cl_total);
800 /* compute myf */ 800 /* compute myf */
801 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, 801 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
802 cl->cl_total); 802 cl->cl_total);
803 cl->cl_myfadj = 0; 803 cl->cl_myfadj = 0;
804 } 804 }
805 } 805 }
@@ -853,7 +853,7 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
853 * update vt and f 853 * update vt and f
854 */ 854 */
855 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) 855 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
856 - cl->cl_vtoff + cl->cl_vtadj; 856 - cl->cl_vtoff + cl->cl_vtadj;
857 857
858 /* 858 /*
859 * if vt of the class is smaller than cvtmin, 859 * if vt of the class is smaller than cvtmin,
@@ -870,7 +870,7 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
870 870
871 if (cl->cl_flags & HFSC_USC) { 871 if (cl->cl_flags & HFSC_USC) {
872 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, 872 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
873 cl->cl_total); 873 cl->cl_total);
874#if 0 874#if 0
875 /* 875 /*
876 * This code causes classes to stay way under their 876 * This code causes classes to stay way under their
@@ -1001,7 +1001,7 @@ hfsc_find_class(u32 classid, struct Qdisc *sch)
1001 1001
1002static void 1002static void
1003hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, 1003hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
1004 u64 cur_time) 1004 u64 cur_time)
1005{ 1005{
1006 sc2isc(rsc, &cl->cl_rsc); 1006 sc2isc(rsc, &cl->cl_rsc);
1007 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); 1007 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
@@ -1023,7 +1023,7 @@ hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
1023 1023
1024static void 1024static void
1025hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, 1025hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
1026 u64 cur_time) 1026 u64 cur_time)
1027{ 1027{
1028 sc2isc(usc, &cl->cl_usc); 1028 sc2isc(usc, &cl->cl_usc);
1029 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); 1029 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
@@ -1032,7 +1032,7 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
1032 1032
1033static int 1033static int
1034hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 1034hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1035 struct rtattr **tca, unsigned long *arg) 1035 struct rtattr **tca, unsigned long *arg)
1036{ 1036{
1037 struct hfsc_sched *q = qdisc_priv(sch); 1037 struct hfsc_sched *q = qdisc_priv(sch);
1038 struct hfsc_class *cl = (struct hfsc_class *)*arg; 1038 struct hfsc_class *cl = (struct hfsc_class *)*arg;
@@ -1228,9 +1228,9 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1228#ifdef CONFIG_NET_CLS_ACT 1228#ifdef CONFIG_NET_CLS_ACT
1229 switch (result) { 1229 switch (result) {
1230 case TC_ACT_QUEUED: 1230 case TC_ACT_QUEUED:
1231 case TC_ACT_STOLEN: 1231 case TC_ACT_STOLEN:
1232 *qerr = NET_XMIT_SUCCESS; 1232 *qerr = NET_XMIT_SUCCESS;
1233 case TC_ACT_SHOT: 1233 case TC_ACT_SHOT:
1234 return NULL; 1234 return NULL;
1235 } 1235 }
1236#elif defined(CONFIG_NET_CLS_POLICE) 1236#elif defined(CONFIG_NET_CLS_POLICE)
@@ -1259,7 +1259,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1259 1259
1260static int 1260static int
1261hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, 1261hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1262 struct Qdisc **old) 1262 struct Qdisc **old)
1263{ 1263{
1264 struct hfsc_class *cl = (struct hfsc_class *)arg; 1264 struct hfsc_class *cl = (struct hfsc_class *)arg;
1265 1265
@@ -1397,7 +1397,7 @@ hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1397 1397
1398static int 1398static int
1399hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, 1399hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1400 struct tcmsg *tcm) 1400 struct tcmsg *tcm)
1401{ 1401{
1402 struct hfsc_class *cl = (struct hfsc_class *)arg; 1402 struct hfsc_class *cl = (struct hfsc_class *)arg;
1403 unsigned char *b = skb->tail; 1403 unsigned char *b = skb->tail;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 15f23c5511a8..1f098d862f92 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -11,7 +11,7 @@
11 * Credits (in time order) for older HTB versions: 11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org> 12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list 13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz> 14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb) 15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert 16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug 17 * helped a lot to locate nasty class stall bug
@@ -59,11 +59,11 @@
59 Author: devik@cdi.cz 59 Author: devik@cdi.cz
60 ======================================================================== 60 ========================================================================
61 HTB is like TBF with multiple classes. It is also similar to CBQ because 61 HTB is like TBF with multiple classes. It is also similar to CBQ because
62 it allows to assign priority to each class in hierarchy. 62 it allows to assign priority to each class in hierarchy.
63 In fact it is another implementation of Floyd's formal sharing. 63 In fact it is another implementation of Floyd's formal sharing.
64 64
65 Levels: 65 Levels:
66 Each class is assigned level. Leaf has ALWAYS level 0 and root 66 Each class is assigned level. Leaf has ALWAYS level 0 and root
67 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level 67 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
68 one less than their parent. 68 one less than their parent.
69*/ 69*/
@@ -245,7 +245,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
245 * We allow direct class selection by classid in priority. The we examine 245 * We allow direct class selection by classid in priority. The we examine
246 * filters in qdisc and in inner nodes (if higher filter points to the inner 246 * filters in qdisc and in inner nodes (if higher filter points to the inner
247 * node). If we end up with classid MAJOR:0 we enqueue the skb into special 247 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
248 * internal fifo (direct). These packets then go directly thru. If we still 248 * internal fifo (direct). These packets then go directly thru. If we still
249 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull 249 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
250 * then finish and return direct queue. 250 * then finish and return direct queue.
251 */ 251 */
@@ -433,7 +433,7 @@ static inline void htb_remove_class_from_row(struct htb_sched *q,
433 * htb_activate_prios - creates active classe's feed chain 433 * htb_activate_prios - creates active classe's feed chain
434 * 434 *
435 * The class is connected to ancestors and/or appropriate rows 435 * The class is connected to ancestors and/or appropriate rows
436 * for priorities it is participating on. cl->cmode must be new 436 * for priorities it is participating on. cl->cmode must be new
437 * (activated) mode. It does nothing if cl->prio_activity == 0. 437 * (activated) mode. It does nothing if cl->prio_activity == 0.
438 */ 438 */
439static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) 439static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
@@ -466,7 +466,7 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
466/** 466/**
467 * htb_deactivate_prios - remove class from feed chain 467 * htb_deactivate_prios - remove class from feed chain
468 * 468 *
469 * cl->cmode must represent old mode (before deactivation). It does 469 * cl->cmode must represent old mode (before deactivation). It does
470 * nothing if cl->prio_activity == 0. Class is removed from all feed 470 * nothing if cl->prio_activity == 0. Class is removed from all feed
471 * chains and rows. 471 * chains and rows.
472 */ 472 */
@@ -524,9 +524,9 @@ static inline long htb_hiwater(const struct htb_class *cl)
524 * 524 *
525 * It computes cl's mode at time cl->t_c+diff and returns it. If mode 525 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
526 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference 526 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
527 * from now to time when cl will change its state. 527 * from now to time when cl will change its state.
528 * Also it is worth to note that class mode doesn't change simply 528 * Also it is worth to note that class mode doesn't change simply
529 * at cl->{c,}tokens == 0 but there can rather be hysteresis of 529 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
530 * 0 .. -cl->{c,}buffer range. It is meant to limit number of 530 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
531 * mode transitions per time unit. The speed gain is about 1/6. 531 * mode transitions per time unit. The speed gain is about 1/6.
532 */ 532 */
@@ -575,7 +575,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
575} 575}
576 576
577/** 577/**
578 * htb_activate - inserts leaf cl into appropriate active feeds 578 * htb_activate - inserts leaf cl into appropriate active feeds
579 * 579 *
580 * Routine learns (new) priority of leaf and activates feed chain 580 * Routine learns (new) priority of leaf and activates feed chain
581 * for the prio. It can be called on already active leaf safely. 581 * for the prio. It can be called on already active leaf safely.
@@ -594,7 +594,7 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
594} 594}
595 595
596/** 596/**
597 * htb_deactivate - remove leaf cl from active feeds 597 * htb_deactivate - remove leaf cl from active feeds
598 * 598 *
599 * Make sure that leaf is active. In the other words it can't be called 599 * Make sure that leaf is active. In the other words it can't be called
600 * with non-active leaf. It also removes class from the drop list. 600 * with non-active leaf. It also removes class from the drop list.
@@ -854,7 +854,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
854 854
855 for (i = 0; i < 65535; i++) { 855 for (i = 0; i < 65535; i++) {
856 if (!*sp->pptr && *sp->pid) { 856 if (!*sp->pptr && *sp->pid) {
857 /* ptr was invalidated but id is valid - try to recover 857 /* ptr was invalidated but id is valid - try to recover
858 the original or next ptr */ 858 the original or next ptr */
859 *sp->pptr = 859 *sp->pptr =
860 htb_id_find_next_upper(prio, sp->root, *sp->pid); 860 htb_id_find_next_upper(prio, sp->root, *sp->pid);
@@ -906,7 +906,7 @@ next:
906 906
907 /* class can be empty - it is unlikely but can be true if leaf 907 /* class can be empty - it is unlikely but can be true if leaf
908 qdisc drops packets in enqueue routine or if someone used 908 qdisc drops packets in enqueue routine or if someone used
909 graft operation on the leaf since last dequeue; 909 graft operation on the leaf since last dequeue;
910 simply deactivate and skip such class */ 910 simply deactivate and skip such class */
911 if (unlikely(cl->un.leaf.q->q.qlen == 0)) { 911 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
912 struct htb_class *next; 912 struct htb_class *next;
@@ -1229,7 +1229,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1229 if (cl && !cl->level) { 1229 if (cl && !cl->level) {
1230 if (new == NULL && 1230 if (new == NULL &&
1231 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1231 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1232 cl->classid)) 1232 cl->classid))
1233 == NULL) 1233 == NULL)
1234 return -ENOBUFS; 1234 return -ENOBUFS;
1235 sch_tree_lock(sch); 1235 sch_tree_lock(sch);
@@ -1347,7 +1347,7 @@ static void htb_destroy(struct Qdisc *sch)
1347 del_timer_sync(&q->rttim); 1347 del_timer_sync(&q->rttim);
1348#endif 1348#endif
1349 /* This line used to be after htb_destroy_class call below 1349 /* This line used to be after htb_destroy_class call below
1350 and surprisingly it worked in 2.4. But it must precede it 1350 and surprisingly it worked in 2.4. But it must precede it
1351 because filter need its target class alive to be able to call 1351 because filter need its target class alive to be able to call
1352 unbind_filter on it (without Oops). */ 1352 unbind_filter on it (without Oops). */
1353 htb_destroy_filters(&q->filter_list); 1353 htb_destroy_filters(&q->filter_list);
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index c3242f727d41..cfe070ee6ee3 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -1,4 +1,4 @@
1/* net/sched/sch_ingress.c - Ingress qdisc 1/* net/sched/sch_ingress.c - Ingress qdisc
2 * This program is free software; you can redistribute it and/or 2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License 3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 4 * as published by the Free Software Foundation; either version
@@ -47,7 +47,7 @@
47*/ 47*/
48#ifndef CONFIG_NET_CLS_ACT 48#ifndef CONFIG_NET_CLS_ACT
49#ifdef CONFIG_NETFILTER 49#ifdef CONFIG_NETFILTER
50static int nf_registered; 50static int nf_registered;
51#endif 51#endif
52#endif 52#endif
53 53
@@ -70,7 +70,7 @@ static int ingress_graft(struct Qdisc *sch,unsigned long arg,
70 DPRINTK("ingress_graft(sch %p,[qdisc %p],new %p,old %p)\n", 70 DPRINTK("ingress_graft(sch %p,[qdisc %p],new %p,old %p)\n",
71 sch, p, new, old); 71 sch, p, new, old);
72 DPRINTK("\n ingress_graft: You cannot add qdiscs to classes"); 72 DPRINTK("\n ingress_graft: You cannot add qdiscs to classes");
73 return 1; 73 return 1;
74} 74}
75 75
76 76
@@ -162,7 +162,7 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
162 case TC_ACT_QUEUED: 162 case TC_ACT_QUEUED:
163 result = TC_ACT_STOLEN; 163 result = TC_ACT_STOLEN;
164 break; 164 break;
165 case TC_ACT_RECLASSIFY: 165 case TC_ACT_RECLASSIFY:
166 case TC_ACT_OK: 166 case TC_ACT_OK:
167 case TC_ACT_UNSPEC: 167 case TC_ACT_UNSPEC:
168 default: 168 default:
@@ -172,7 +172,7 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
172 }; 172 };
173/* backward compat */ 173/* backward compat */
174#else 174#else
175#ifdef CONFIG_NET_CLS_POLICE 175#ifdef CONFIG_NET_CLS_POLICE
176 switch (result) { 176 switch (result) {
177 case TC_POLICE_SHOT: 177 case TC_POLICE_SHOT:
178 result = NF_DROP; 178 result = NF_DROP;
@@ -232,14 +232,14 @@ static unsigned int ingress_drop(struct Qdisc *sch)
232#ifdef CONFIG_NETFILTER 232#ifdef CONFIG_NETFILTER
233static unsigned int 233static unsigned int
234ing_hook(unsigned int hook, struct sk_buff **pskb, 234ing_hook(unsigned int hook, struct sk_buff **pskb,
235 const struct net_device *indev, 235 const struct net_device *indev,
236 const struct net_device *outdev, 236 const struct net_device *outdev,
237 int (*okfn)(struct sk_buff *)) 237 int (*okfn)(struct sk_buff *))
238{ 238{
239 239
240 struct Qdisc *q; 240 struct Qdisc *q;
241 struct sk_buff *skb = *pskb; 241 struct sk_buff *skb = *pskb;
242 struct net_device *dev = skb->dev; 242 struct net_device *dev = skb->dev;
243 int fwres=NF_ACCEPT; 243 int fwres=NF_ACCEPT;
244 244
245 DPRINTK("ing_hook: skb %s dev=%s len=%u\n", 245 DPRINTK("ing_hook: skb %s dev=%s len=%u\n",
@@ -247,7 +247,7 @@ ing_hook(unsigned int hook, struct sk_buff **pskb,
247 skb->dev ? (*pskb)->dev->name : "(no dev)", 247 skb->dev ? (*pskb)->dev->name : "(no dev)",
248 skb->len); 248 skb->len);
249 249
250/* 250/*
251revisit later: Use a private since lock dev->queue_lock is also 251revisit later: Use a private since lock dev->queue_lock is also
252used on the egress (might slow things for an iota) 252used on the egress (might slow things for an iota)
253*/ 253*/
@@ -257,8 +257,8 @@ used on the egress (might slow things for an iota)
257 if ((q = dev->qdisc_ingress) != NULL) 257 if ((q = dev->qdisc_ingress) != NULL)
258 fwres = q->enqueue(skb, q); 258 fwres = q->enqueue(skb, q);
259 spin_unlock(&dev->queue_lock); 259 spin_unlock(&dev->queue_lock);
260 } 260 }
261 261
262 return fwres; 262 return fwres;
263} 263}
264 264
@@ -296,7 +296,7 @@ static int ingress_init(struct Qdisc *sch,struct rtattr *opt)
296 printk("Ingress scheduler: Classifier actions prefered over netfilter\n"); 296 printk("Ingress scheduler: Classifier actions prefered over netfilter\n");
297#endif 297#endif
298#endif 298#endif
299 299
300#ifndef CONFIG_NET_CLS_ACT 300#ifndef CONFIG_NET_CLS_ACT
301#ifdef CONFIG_NETFILTER 301#ifdef CONFIG_NETFILTER
302 if (!nf_registered) { 302 if (!nf_registered) {
@@ -417,7 +417,7 @@ static int __init ingress_module_init(void)
417 417
418 return ret; 418 return ret;
419} 419}
420static void __exit ingress_module_exit(void) 420static void __exit ingress_module_exit(void)
421{ 421{
422 unregister_qdisc(&ingress_qdisc_ops); 422 unregister_qdisc(&ingress_qdisc_ops);
423#ifndef CONFIG_NET_CLS_ACT 423#ifndef CONFIG_NET_CLS_ACT
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 79542af9dab1..1ccbfb55b0b8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -7,7 +7,7 @@
7 * 2 of the License. 7 * 2 of the License.
8 * 8 *
9 * Many of the algorithms and ideas for this came from 9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted. 10 * NIST Net which is not copyrighted.
11 * 11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org> 12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> 13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
@@ -114,7 +114,7 @@ static unsigned long get_crandom(struct crndstate *state)
114 * std deviation sigma. Uses table lookup to approximate the desired 114 * std deviation sigma. Uses table lookup to approximate the desired
115 * distribution, and a uniformly-distributed pseudo-random source. 115 * distribution, and a uniformly-distributed pseudo-random source.
116 */ 116 */
117static long tabledist(unsigned long mu, long sigma, 117static long tabledist(unsigned long mu, long sigma,
118 struct crndstate *state, const struct disttable *dist) 118 struct crndstate *state, const struct disttable *dist)
119{ 119{
120 long t, x; 120 long t, x;
@@ -126,7 +126,7 @@ static long tabledist(unsigned long mu, long sigma,
126 rnd = get_crandom(state); 126 rnd = get_crandom(state);
127 127
128 /* default uniform distribution */ 128 /* default uniform distribution */
129 if (dist == NULL) 129 if (dist == NULL)
130 return (rnd % (2*sigma)) - sigma + mu; 130 return (rnd % (2*sigma)) - sigma + mu;
131 131
132 t = dist->table[rnd % dist->size]; 132 t = dist->table[rnd % dist->size];
@@ -218,7 +218,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
218 ++q->counter; 218 ++q->counter;
219 ret = q->qdisc->enqueue(skb, q->qdisc); 219 ret = q->qdisc->enqueue(skb, q->qdisc);
220 } else { 220 } else {
221 /* 221 /*
222 * Do re-ordering by putting one out of N packets at the front 222 * Do re-ordering by putting one out of N packets at the front
223 * of the queue. 223 * of the queue.
224 */ 224 */
@@ -323,7 +323,7 @@ static void netem_reset(struct Qdisc *sch)
323/* Pass size change message down to embedded FIFO */ 323/* Pass size change message down to embedded FIFO */
324static int set_fifo_limit(struct Qdisc *q, int limit) 324static int set_fifo_limit(struct Qdisc *q, int limit)
325{ 325{
326 struct rtattr *rta; 326 struct rtattr *rta;
327 int ret = -ENOMEM; 327 int ret = -ENOMEM;
328 328
329 /* Hack to avoid sending change message to non-FIFO */ 329 /* Hack to avoid sending change message to non-FIFO */
@@ -333,9 +333,9 @@ static int set_fifo_limit(struct Qdisc *q, int limit)
333 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); 333 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
334 if (rta) { 334 if (rta) {
335 rta->rta_type = RTM_NEWQDISC; 335 rta->rta_type = RTM_NEWQDISC;
336 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); 336 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
337 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; 337 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
338 338
339 ret = q->ops->change(q, rta); 339 ret = q->ops->change(q, rta);
340 kfree(rta); 340 kfree(rta);
341 } 341 }
@@ -364,7 +364,7 @@ static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
364 d->size = n; 364 d->size = n;
365 for (i = 0; i < n; i++) 365 for (i = 0; i < n; i++)
366 d->table[i] = data[i]; 366 d->table[i] = data[i];
367 367
368 spin_lock_bh(&sch->dev->queue_lock); 368 spin_lock_bh(&sch->dev->queue_lock);
369 d = xchg(&q->delay_dist, d); 369 d = xchg(&q->delay_dist, d);
370 spin_unlock_bh(&sch->dev->queue_lock); 370 spin_unlock_bh(&sch->dev->queue_lock);
@@ -419,7 +419,7 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
419 struct netem_sched_data *q = qdisc_priv(sch); 419 struct netem_sched_data *q = qdisc_priv(sch);
420 struct tc_netem_qopt *qopt; 420 struct tc_netem_qopt *qopt;
421 int ret; 421 int ret;
422 422
423 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) 423 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
424 return -EINVAL; 424 return -EINVAL;
425 425
@@ -429,7 +429,7 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
429 pr_debug("netem: can't set fifo limit\n"); 429 pr_debug("netem: can't set fifo limit\n");
430 return ret; 430 return ret;
431 } 431 }
432 432
433 q->latency = qopt->latency; 433 q->latency = qopt->latency;
434 q->jitter = qopt->jitter; 434 q->jitter = qopt->jitter;
435 q->limit = qopt->limit; 435 q->limit = qopt->limit;
@@ -445,10 +445,10 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
445 445
446 /* Handle nested options after initial queue options. 446 /* Handle nested options after initial queue options.
447 * Should have put all options in nested format but too late now. 447 * Should have put all options in nested format but too late now.
448 */ 448 */
449 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) { 449 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
450 struct rtattr *tb[TCA_NETEM_MAX]; 450 struct rtattr *tb[TCA_NETEM_MAX];
451 if (rtattr_parse(tb, TCA_NETEM_MAX, 451 if (rtattr_parse(tb, TCA_NETEM_MAX,
452 RTA_DATA(opt) + sizeof(*qopt), 452 RTA_DATA(opt) + sizeof(*qopt),
453 RTA_PAYLOAD(opt) - sizeof(*qopt))) 453 RTA_PAYLOAD(opt) - sizeof(*qopt)))
454 return -EINVAL; 454 return -EINVAL;
@@ -681,7 +681,7 @@ static void netem_put(struct Qdisc *sch, unsigned long arg)
681{ 681{
682} 682}
683 683
684static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 684static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
685 struct rtattr **tca, unsigned long *arg) 685 struct rtattr **tca, unsigned long *arg)
686{ 686{
687 return -ENOSYS; 687 return -ENOSYS;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 000e043ebd62..9f957ca5073b 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -7,7 +7,7 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>: 10 * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
11 * Init -- EINVAL when opt undefined 11 * Init -- EINVAL when opt undefined
12 */ 12 */
13 13
@@ -105,7 +105,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
105 return NET_XMIT_SUCCESS; 105 return NET_XMIT_SUCCESS;
106 } 106 }
107 sch->qstats.drops++; 107 sch->qstats.drops++;
108 return ret; 108 return ret;
109} 109}
110 110
111 111
@@ -453,7 +453,7 @@ static int __init prio_module_init(void)
453 return register_qdisc(&prio_qdisc_ops); 453 return register_qdisc(&prio_qdisc_ops);
454} 454}
455 455
456static void __exit prio_module_exit(void) 456static void __exit prio_module_exit(void)
457{ 457{
458 unregister_qdisc(&prio_qdisc_ops); 458 unregister_qdisc(&prio_qdisc_ops);
459} 459}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index acddad08850f..00db53eb8159 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -185,7 +185,7 @@ static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit)
185 TC_H_MAKE(sch->handle, 1)); 185 TC_H_MAKE(sch->handle, 1));
186 if (q) { 186 if (q) {
187 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), 187 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)),
188 GFP_KERNEL); 188 GFP_KERNEL);
189 if (rta) { 189 if (rta) {
190 rta->rta_type = RTM_NEWQDISC; 190 rta->rta_type = RTM_NEWQDISC;
191 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); 191 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 82844801e421..66f32051a99b 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -53,7 +53,7 @@
53 Queuing using Deficit Round Robin", Proc. SIGCOMM 95. 53 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
54 54
55 55
56 This is not the thing that is usually called (W)FQ nowadays. 56 This is not the thing that is usually called (W)FQ nowadays.
57 It does not use any timestamp mechanism, but instead 57 It does not use any timestamp mechanism, but instead
58 processes queues in round-robin order. 58 processes queues in round-robin order.
59 59
@@ -63,7 +63,7 @@
63 63
64 DRAWBACKS: 64 DRAWBACKS:
65 65
66 - "Stochastic" -> It is not 100% fair. 66 - "Stochastic" -> It is not 100% fair.
67 When hash collisions occur, several flows are considered as one. 67 When hash collisions occur, several flows are considered as one.
68 68
69 - "Round-robin" -> It introduces larger delays than virtual clock 69 - "Round-robin" -> It introduces larger delays than virtual clock
@@ -501,7 +501,7 @@ static int __init sfq_module_init(void)
501{ 501{
502 return register_qdisc(&sfq_qdisc_ops); 502 return register_qdisc(&sfq_qdisc_ops);
503} 503}
504static void __exit sfq_module_exit(void) 504static void __exit sfq_module_exit(void)
505{ 505{
506 unregister_qdisc(&sfq_qdisc_ops); 506 unregister_qdisc(&sfq_qdisc_ops);
507} 507}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index ed9b6d938540..85da8daa61d2 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -276,7 +276,7 @@ static void tbf_reset(struct Qdisc* sch)
276static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) 276static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
277{ 277{
278 struct Qdisc *q; 278 struct Qdisc *q;
279 struct rtattr *rta; 279 struct rtattr *rta;
280 int ret; 280 int ret;
281 281
282 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, 282 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
@@ -285,7 +285,7 @@ static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
285 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); 285 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
286 if (rta) { 286 if (rta) {
287 rta->rta_type = RTM_NEWQDISC; 287 rta->rta_type = RTM_NEWQDISC;
288 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); 288 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
289 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; 289 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
290 290
291 ret = q->ops->change(q, rta); 291 ret = q->ops->change(q, rta);
@@ -475,7 +475,7 @@ static void tbf_put(struct Qdisc *sch, unsigned long arg)
475{ 475{
476} 476}
477 477
478static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 478static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
479 struct rtattr **tca, unsigned long *arg) 479 struct rtattr **tca, unsigned long *arg)
480{ 480{
481 return -ENOSYS; 481 return -ENOSYS;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 4c16ad57a3e4..6a66037abac9 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -178,7 +178,7 @@ teql_destroy(struct Qdisc* sch)
178 teql_neigh_release(xchg(&dat->ncache, NULL)); 178 teql_neigh_release(xchg(&dat->ncache, NULL));
179 break; 179 break;
180 } 180 }
181 181
182 } while ((prev = q) != master->slaves); 182 } while ((prev = q) != master->slaves);
183 } 183 }
184} 184}
@@ -292,7 +292,7 @@ restart:
292 292
293 do { 293 do {
294 struct net_device *slave = q->dev; 294 struct net_device *slave = q->dev;
295 295
296 if (slave->qdisc_sleeping != q) 296 if (slave->qdisc_sleeping != q)
297 continue; 297 continue;
298 if (netif_queue_stopped(slave) || ! netif_running(slave)) { 298 if (netif_queue_stopped(slave) || ! netif_running(slave)) {
@@ -425,7 +425,7 @@ static __init void teql_master_setup(struct net_device *dev)
425 425
426 master->dev = dev; 426 master->dev = dev;
427 ops->priv_size = sizeof(struct teql_sched_data); 427 ops->priv_size = sizeof(struct teql_sched_data);
428 428
429 ops->enqueue = teql_enqueue; 429 ops->enqueue = teql_enqueue;
430 ops->dequeue = teql_dequeue; 430 ops->dequeue = teql_dequeue;
431 ops->requeue = teql_requeue; 431 ops->requeue = teql_requeue;
@@ -489,7 +489,7 @@ static int __init teql_init(void)
489 return i ? 0 : err; 489 return i ? 0 : err;
490} 490}
491 491
492static void __exit teql_exit(void) 492static void __exit teql_exit(void)
493{ 493{
494 struct teql_master *master, *nxt; 494 struct teql_master *master, *nxt;
495 495
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5db95caed0a3..fca6f75b0a0d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -158,14 +158,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
158 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the 158 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
159 * recommended value of 5 times 'RTO.Max'. 159 * recommended value of 5 times 'RTO.Max'.
160 */ 160 */
161 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] 161 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
162 = 5 * asoc->rto_max; 162 = 5 * asoc->rto_max;
163 163
164 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 164 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
165 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 165 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
166 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 166 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
167 sp->autoclose * HZ; 167 sp->autoclose * HZ;
168 168
169 /* Initilizes the timers */ 169 /* Initilizes the timers */
170 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 170 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
171 init_timer(&asoc->timers[i]); 171 init_timer(&asoc->timers[i]);
@@ -1334,8 +1334,8 @@ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1334 asoc->ep->base.bind_addr.port, gfp); 1334 asoc->ep->base.bind_addr.port, gfp);
1335} 1335}
1336 1336
1337/* Lookup laddr in the bind address list of an association. */ 1337/* Lookup laddr in the bind address list of an association. */
1338int sctp_assoc_lookup_laddr(struct sctp_association *asoc, 1338int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1339 const union sctp_addr *laddr) 1339 const union sctp_addr *laddr)
1340{ 1340{
1341 int found; 1341 int found;
@@ -1343,7 +1343,7 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1343 sctp_read_lock(&asoc->base.addr_lock); 1343 sctp_read_lock(&asoc->base.addr_lock);
1344 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && 1344 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1345 sctp_bind_addr_match(&asoc->base.bind_addr, laddr, 1345 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1346 sctp_sk(asoc->base.sk))) { 1346 sctp_sk(asoc->base.sk))) {
1347 found = 1; 1347 found = 1;
1348 goto out; 1348 goto out;
1349 } 1349 }
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 00994158e496..80294cbc0de6 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -62,7 +62,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *);
62/* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses 62/* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses
63 * in 'src' which have a broader scope than 'scope'. 63 * in 'src' which have a broader scope than 'scope'.
64 */ 64 */
65int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 65int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
66 const struct sctp_bind_addr *src, 66 const struct sctp_bind_addr *src,
67 sctp_scope_t scope, gfp_t gfp, 67 sctp_scope_t scope, gfp_t gfp,
68 int flags) 68 int flags)
@@ -296,7 +296,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
296 ********************************************************************/ 296 ********************************************************************/
297 297
298/* Does this contain a specified address? Allow wildcarding. */ 298/* Does this contain a specified address? Allow wildcarding. */
299int sctp_bind_addr_match(struct sctp_bind_addr *bp, 299int sctp_bind_addr_match(struct sctp_bind_addr *bp,
300 const union sctp_addr *addr, 300 const union sctp_addr *addr,
301 struct sctp_sock *opt) 301 struct sctp_sock *opt)
302{ 302{
@@ -306,7 +306,7 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
306 list_for_each(pos, &bp->address_list) { 306 list_for_each(pos, &bp->address_list) {
307 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 307 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
308 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) 308 if (opt->pf->cmp_addr(&laddr->a, addr, opt))
309 return 1; 309 return 1;
310 } 310 }
311 311
312 return 0; 312 return 0;
@@ -329,12 +329,12 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
329 329
330 list_for_each(pos, &bp->address_list) { 330 list_for_each(pos, &bp->address_list) {
331 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 331 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
332 332
333 addr_buf = (union sctp_addr *)addrs; 333 addr_buf = (union sctp_addr *)addrs;
334 for (i = 0; i < addrcnt; i++) { 334 for (i = 0; i < addrcnt; i++) {
335 addr = (union sctp_addr *)addr_buf; 335 addr = (union sctp_addr *)addr_buf;
336 af = sctp_get_af_specific(addr->v4.sin_family); 336 af = sctp_get_af_specific(addr->v4.sin_family);
337 if (!af) 337 if (!af)
338 return NULL; 338 return NULL;
339 339
340 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) 340 if (opt->pf->cmp_addr(&laddr->a, addr, opt))
@@ -350,7 +350,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
350} 350}
351 351
352/* Copy out addresses from the global local address list. */ 352/* Copy out addresses from the global local address list. */
353static int sctp_copy_one_addr(struct sctp_bind_addr *dest, 353static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
354 union sctp_addr *addr, 354 union sctp_addr *addr,
355 sctp_scope_t scope, gfp_t gfp, 355 sctp_scope_t scope, gfp_t gfp,
356 int flags) 356 int flags)
diff --git a/net/sctp/crc32c.c b/net/sctp/crc32c.c
index 31f05ec8e1d3..59cf7b06d216 100644
--- a/net/sctp/crc32c.c
+++ b/net/sctp/crc32c.c
@@ -1,40 +1,40 @@
1/* SCTP kernel reference Implementation 1/* SCTP kernel reference Implementation
2 * Copyright (c) 1999-2001 Motorola, Inc. 2 * Copyright (c) 1999-2001 Motorola, Inc.
3 * Copyright (c) 2001-2003 International Business Machines, Corp. 3 * Copyright (c) 2001-2003 International Business Machines, Corp.
4 * 4 *
5 * This file is part of the SCTP kernel reference Implementation 5 * This file is part of the SCTP kernel reference Implementation
6 * 6 *
7 * SCTP Checksum functions 7 * SCTP Checksum functions
8 * 8 *
9 * The SCTP reference implementation is free software; 9 * The SCTP reference implementation is free software;
10 * you can redistribute it and/or modify it under the terms of 10 * you can redistribute it and/or modify it under the terms of
11 * the GNU General Public License as published by 11 * the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option) 12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version. 13 * any later version.
14 * 14 *
15 * The SCTP reference implementation is distributed in the hope that it 15 * The SCTP reference implementation is distributed in the hope that it
16 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 16 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
17 * ************************ 17 * ************************
18 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 18 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details. 19 * See the GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with GNU CC; see the file COPYING. If not, write to 22 * along with GNU CC; see the file COPYING. If not, write to
23 * the Free Software Foundation, 59 Temple Place - Suite 330, 23 * the Free Software Foundation, 59 Temple Place - Suite 330,
24 * Boston, MA 02111-1307, USA. 24 * Boston, MA 02111-1307, USA.
25 * 25 *
26 * Please send any bug reports or fixes you make to the 26 * Please send any bug reports or fixes you make to the
27 * email address(es): 27 * email address(es):
28 * lksctp developers <lksctp-developers@lists.sourceforge.net> 28 * lksctp developers <lksctp-developers@lists.sourceforge.net>
29 * 29 *
30 * Or submit a bug report through the following website: 30 * Or submit a bug report through the following website:
31 * http://www.sf.net/projects/lksctp 31 * http://www.sf.net/projects/lksctp
32 * 32 *
33 * Written or modified by: 33 * Written or modified by:
34 * Dinakaran Joseph 34 * Dinakaran Joseph
35 * Jon Grimm <jgrimm@us.ibm.com> 35 * Jon Grimm <jgrimm@us.ibm.com>
36 * Sridhar Samudrala <sri@us.ibm.com> 36 * Sridhar Samudrala <sri@us.ibm.com>
37 * 37 *
38 * Any bugs reported given to us we will try to fix... any fixes shared will 38 * Any bugs reported given to us we will try to fix... any fixes shared will
39 * be incorporated into the next SCTP release. 39 * be incorporated into the next SCTP release.
40 */ 40 */
@@ -135,10 +135,10 @@ static const __u32 crc_c[256] = {
135 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 135 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
136 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, 136 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
137}; 137};
138 138
139__u32 sctp_start_cksum(__u8 *buffer, __u16 length) 139__u32 sctp_start_cksum(__u8 *buffer, __u16 length)
140{ 140{
141 __u32 crc32 = ~(__u32) 0; 141 __u32 crc32 = ~(__u32) 0;
142 __u32 i; 142 __u32 i;
143 143
144 /* Optimize this routine to be SCTP specific, knowing how 144 /* Optimize this routine to be SCTP specific, knowing how
@@ -147,7 +147,7 @@ __u32 sctp_start_cksum(__u8 *buffer, __u16 length)
147 147
148 /* Calculate CRC up to the checksum. */ 148 /* Calculate CRC up to the checksum. */
149 for (i = 0; i < (sizeof(struct sctphdr) - sizeof(__u32)); i++) 149 for (i = 0; i < (sizeof(struct sctphdr) - sizeof(__u32)); i++)
150 CRC32C(crc32, buffer[i]); 150 CRC32C(crc32, buffer[i]);
151 151
152 /* Skip checksum field of the header. */ 152 /* Skip checksum field of the header. */
153 for (i = 0; i < sizeof(__u32); i++) 153 for (i = 0; i < sizeof(__u32); i++)
@@ -175,13 +175,13 @@ __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32)
175 __u32 i; 175 __u32 i;
176 __u32 *_to = (__u32 *)to; 176 __u32 *_to = (__u32 *)to;
177 __u32 *_from = (__u32 *)from; 177 __u32 *_from = (__u32 *)from;
178 178
179 for (i = 0; i < (length/4); i++) { 179 for (i = 0; i < (length/4); i++) {
180 _to[i] = _from[i]; 180 _to[i] = _from[i];
181 CRC32C(crc32, from[i*4]); 181 CRC32C(crc32, from[i*4]);
182 CRC32C(crc32, from[i*4+1]); 182 CRC32C(crc32, from[i*4+1]);
183 CRC32C(crc32, from[i*4+2]); 183 CRC32C(crc32, from[i*4+2]);
184 CRC32C(crc32, from[i*4+3]); 184 CRC32C(crc32, from[i*4+3]);
185 } 185 }
186 186
187 return crc32; 187 return crc32;
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index aa8340373af7..5f5ab28977c9 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -3,48 +3,48 @@
3 * Copyright (c) 1999-2000 Cisco, Inc. 3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp. 5 * Copyright (c) 2001 Intel Corp.
6 * 6 *
7 * This file is part of the SCTP kernel reference Implementation 7 * This file is part of the SCTP kernel reference Implementation
8 * 8 *
9 * This file is part of the implementation of the add-IP extension, 9 * This file is part of the implementation of the add-IP extension,
10 * based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001, 10 * based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001,
11 * for the SCTP kernel reference Implementation. 11 * for the SCTP kernel reference Implementation.
12 * 12 *
13 * This file converts numerical ID value to alphabetical names for SCTP 13 * This file converts numerical ID value to alphabetical names for SCTP
14 * terms such as chunk type, parameter time, event type, etc. 14 * terms such as chunk type, parameter time, event type, etc.
15 * 15 *
16 * The SCTP reference implementation is free software; 16 * The SCTP reference implementation is free software;
17 * you can redistribute it and/or modify it under the terms of 17 * you can redistribute it and/or modify it under the terms of
18 * the GNU General Public License as published by 18 * the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option) 19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version. 20 * any later version.
21 * 21 *
22 * The SCTP reference implementation is distributed in the hope that it 22 * The SCTP reference implementation is distributed in the hope that it
23 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 23 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
24 * ************************ 24 * ************************
25 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 25 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
26 * See the GNU General Public License for more details. 26 * See the GNU General Public License for more details.
27 * 27 *
28 * You should have received a copy of the GNU General Public License 28 * You should have received a copy of the GNU General Public License
29 * along with GNU CC; see the file COPYING. If not, write to 29 * along with GNU CC; see the file COPYING. If not, write to
30 * the Free Software Foundation, 59 Temple Place - Suite 330, 30 * the Free Software Foundation, 59 Temple Place - Suite 330,
31 * Boston, MA 02111-1307, USA. 31 * Boston, MA 02111-1307, USA.
32 * 32 *
33 * Please send any bug reports or fixes you make to the 33 * Please send any bug reports or fixes you make to the
34 * email address(es): 34 * email address(es):
35 * lksctp developers <lksctp-developers@lists.sourceforge.net> 35 * lksctp developers <lksctp-developers@lists.sourceforge.net>
36 * 36 *
37 * Or submit a bug report through the following website: 37 * Or submit a bug report through the following website:
38 * http://www.sf.net/projects/lksctp 38 * http://www.sf.net/projects/lksctp
39 * 39 *
40 * Written or modified by: 40 * Written or modified by:
41 * La Monte H.P. Yarroll <piggy@acm.org> 41 * La Monte H.P. Yarroll <piggy@acm.org>
42 * Karl Knutson <karl@athena.chicago.il.us> 42 * Karl Knutson <karl@athena.chicago.il.us>
43 * Xingang Guo <xingang.guo@intel.com> 43 * Xingang Guo <xingang.guo@intel.com>
44 * Jon Grimm <jgrimm@us.ibm.com> 44 * Jon Grimm <jgrimm@us.ibm.com>
45 * Daisy Chang <daisyc@us.ibm.com> 45 * Daisy Chang <daisyc@us.ibm.com>
46 * Sridhar Samudrala <sri@us.ibm.com> 46 * Sridhar Samudrala <sri@us.ibm.com>
47 * 47 *
48 * Any bugs reported given to us we will try to fix... any fixes shared will 48 * Any bugs reported given to us we will try to fix... any fixes shared will
49 * be incorporated into the next SCTP release. 49 * be incorporated into the next SCTP release.
50 */ 50 */
@@ -81,7 +81,7 @@ const char *sctp_cname(const sctp_subtype_t cid)
81 return "illegal chunk id"; 81 return "illegal chunk id";
82 if (cid.chunk <= SCTP_CID_BASE_MAX) 82 if (cid.chunk <= SCTP_CID_BASE_MAX)
83 return sctp_cid_tbl[cid.chunk]; 83 return sctp_cid_tbl[cid.chunk];
84 84
85 switch (cid.chunk) { 85 switch (cid.chunk) {
86 case SCTP_CID_ASCONF: 86 case SCTP_CID_ASCONF:
87 return "ASCONF"; 87 return "ASCONF";
@@ -154,7 +154,7 @@ const char *sctp_pname(const sctp_subtype_t id)
154 154
155static const char *sctp_other_tbl[] = { 155static const char *sctp_other_tbl[] = {
156 "NO_PENDING_TSN", 156 "NO_PENDING_TSN",
157 "ICMP_PROTO_UNREACH", 157 "ICMP_PROTO_UNREACH",
158}; 158};
159 159
160/* Lookup "other" debug name. */ 160/* Lookup "other" debug name. */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 129756908da4..286a8dbb63b7 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -369,7 +369,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
369 chunk->transport->last_time_heard = jiffies; 369 chunk->transport->last_time_heard = jiffies;
370 370
371 error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state, 371 error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state,
372 ep, asoc, chunk, GFP_ATOMIC); 372 ep, asoc, chunk, GFP_ATOMIC);
373 373
374 if (error && chunk) 374 if (error && chunk)
375 chunk->pdiscard = 1; 375 chunk->pdiscard = 1;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 33111873a488..71db66873695 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -226,7 +226,7 @@ int sctp_rcv(struct sk_buff *skb)
226 nf_reset(skb); 226 nf_reset(skb);
227 227
228 if (sk_filter(sk, skb)) 228 if (sk_filter(sk, skb))
229 goto discard_release; 229 goto discard_release;
230 230
231 /* Create an SCTP packet structure. */ 231 /* Create an SCTP packet structure. */
232 chunk = sctp_chunkify(skb, asoc, sk); 232 chunk = sctp_chunkify(skb, asoc, sk);
@@ -293,11 +293,11 @@ discard_release:
293int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) 293int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
294{ 294{
295 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 295 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
296 struct sctp_inq *inqueue = &chunk->rcvr->inqueue; 296 struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
297 struct sctp_ep_common *rcvr = NULL; 297 struct sctp_ep_common *rcvr = NULL;
298 int backloged = 0; 298 int backloged = 0;
299 299
300 rcvr = chunk->rcvr; 300 rcvr = chunk->rcvr;
301 301
302 /* If the rcvr is dead then the association or endpoint 302 /* If the rcvr is dead then the association or endpoint
303 * has been deleted and we can safely drop the chunk 303 * has been deleted and we can safely drop the chunk
@@ -347,7 +347,7 @@ done:
347 else 347 else
348 BUG(); 348 BUG();
349 349
350 return 0; 350 return 0;
351} 351}
352 352
353static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 353static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
@@ -399,7 +399,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
399 * Normally, if PMTU discovery is disabled, an ICMP Fragmentation 399 * Normally, if PMTU discovery is disabled, an ICMP Fragmentation
400 * Needed will never be sent, but if a message was sent before 400 * Needed will never be sent, but if a message was sent before
401 * PMTU discovery was disabled that was larger than the PMTU, it 401 * PMTU discovery was disabled that was larger than the PMTU, it
402 * would not be fragmented, so it must be re-transmitted fragmented. 402 * would not be fragmented, so it must be re-transmitted fragmented.
403 */ 403 */
404 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); 404 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
405} 405}
@@ -416,8 +416,8 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
416 * 416 *
417 */ 417 */
418void sctp_icmp_proto_unreachable(struct sock *sk, 418void sctp_icmp_proto_unreachable(struct sock *sk,
419 struct sctp_association *asoc, 419 struct sctp_association *asoc,
420 struct sctp_transport *t) 420 struct sctp_transport *t)
421{ 421{
422 SCTP_DEBUG_PRINTK("%s\n", __FUNCTION__); 422 SCTP_DEBUG_PRINTK("%s\n", __FUNCTION__);
423 423
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 71b07466e880..c30629e17781 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -2,43 +2,43 @@
2 * Copyright (c) 1999-2000 Cisco, Inc. 2 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc. 3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2002 International Business Machines, Corp. 4 * Copyright (c) 2002 International Business Machines, Corp.
5 * 5 *
6 * This file is part of the SCTP kernel reference Implementation 6 * This file is part of the SCTP kernel reference Implementation
7 * 7 *
8 * These functions are the methods for accessing the SCTP inqueue. 8 * These functions are the methods for accessing the SCTP inqueue.
9 * 9 *
10 * An SCTP inqueue is a queue into which you push SCTP packets 10 * An SCTP inqueue is a queue into which you push SCTP packets
11 * (which might be bundles or fragments of chunks) and out of which you 11 * (which might be bundles or fragments of chunks) and out of which you
12 * pop SCTP whole chunks. 12 * pop SCTP whole chunks.
13 * 13 *
14 * The SCTP reference implementation is free software; 14 * The SCTP reference implementation is free software;
15 * you can redistribute it and/or modify it under the terms of 15 * you can redistribute it and/or modify it under the terms of
16 * the GNU General Public License as published by 16 * the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option) 17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version. 18 * any later version.
19 * 19 *
20 * The SCTP reference implementation is distributed in the hope that it 20 * The SCTP reference implementation is distributed in the hope that it
21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22 * ************************ 22 * ************************
23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24 * See the GNU General Public License for more details. 24 * See the GNU General Public License for more details.
25 * 25 *
26 * You should have received a copy of the GNU General Public License 26 * You should have received a copy of the GNU General Public License
27 * along with GNU CC; see the file COPYING. If not, write to 27 * along with GNU CC; see the file COPYING. If not, write to
28 * the Free Software Foundation, 59 Temple Place - Suite 330, 28 * the Free Software Foundation, 59 Temple Place - Suite 330,
29 * Boston, MA 02111-1307, USA. 29 * Boston, MA 02111-1307, USA.
30 * 30 *
31 * Please send any bug reports or fixes you make to the 31 * Please send any bug reports or fixes you make to the
32 * email address(es): 32 * email address(es):
33 * lksctp developers <lksctp-developers@lists.sourceforge.net> 33 * lksctp developers <lksctp-developers@lists.sourceforge.net>
34 * 34 *
35 * Or submit a bug report through the following website: 35 * Or submit a bug report through the following website:
36 * http://www.sf.net/projects/lksctp 36 * http://www.sf.net/projects/lksctp
37 * 37 *
38 * Written or modified by: 38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org> 39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Karl Knutson <karl@athena.chicago.il.us> 40 * Karl Knutson <karl@athena.chicago.il.us>
41 * 41 *
42 * Any bugs reported given to us we will try to fix... any fixes shared will 42 * Any bugs reported given to us we will try to fix... any fixes shared will
43 * be incorporated into the next SCTP release. 43 * be incorporated into the next SCTP release.
44 */ 44 */
@@ -152,8 +152,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
152 chunk->data_accepted = 0; 152 chunk->data_accepted = 0;
153 } 153 }
154 154
155 chunk->chunk_hdr = ch; 155 chunk->chunk_hdr = ch;
156 chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 156 chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
157 /* In the unlikely case of an IP reassembly, the skb could be 157 /* In the unlikely case of an IP reassembly, the skb could be
158 * non-linear. If so, update chunk_end so that it doesn't go past 158 * non-linear. If so, update chunk_end so that it doesn't go past
159 * the skb->tail. 159 * the skb->tail.
@@ -169,11 +169,11 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
169 /* This is not a singleton */ 169 /* This is not a singleton */
170 chunk->singleton = 0; 170 chunk->singleton = 0;
171 } else if (chunk->chunk_end > chunk->skb->tail) { 171 } else if (chunk->chunk_end > chunk->skb->tail) {
172 /* RFC 2960, Section 6.10 Bundling 172 /* RFC 2960, Section 6.10 Bundling
173 * 173 *
174 * Partial chunks MUST NOT be placed in an SCTP packet. 174 * Partial chunks MUST NOT be placed in an SCTP packet.
175 * If the receiver detects a partial chunk, it MUST drop 175 * If the receiver detects a partial chunk, it MUST drop
176 * the chunk. 176 * the chunk.
177 * 177 *
178 * Since the end of the chunk is past the end of our buffer 178 * Since the end of the chunk is past the end of our buffer
179 * (which contains the whole packet, we can freely discard 179 * (which contains the whole packet, we can freely discard
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ef36be073a13..01b27fb5dfc5 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -236,7 +236,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
236 ipv6_addr_copy(&fl.fl6_dst, &daddr->v6.sin6_addr); 236 ipv6_addr_copy(&fl.fl6_dst, &daddr->v6.sin6_addr);
237 if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) 237 if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
238 fl.oif = daddr->v6.sin6_scope_id; 238 fl.oif = daddr->v6.sin6_scope_id;
239 239
240 240
241 SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ", 241 SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ",
242 __FUNCTION__, NIP6(fl.fl6_dst)); 242 __FUNCTION__, NIP6(fl.fl6_dst));
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index 8ff588f0d76a..fcfb9d806de1 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -1,39 +1,39 @@
1/* SCTP kernel reference Implementation 1/* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004 2 * (C) Copyright IBM Corp. 2001, 2004
3 * 3 *
4 * This file is part of the SCTP kernel reference Implementation 4 * This file is part of the SCTP kernel reference Implementation
5 * 5 *
6 * Support for memory object debugging. This allows one to monitor the 6 * Support for memory object debugging. This allows one to monitor the
7 * object allocations/deallocations for types instrumented for this 7 * object allocations/deallocations for types instrumented for this
8 * via the proc fs. 8 * via the proc fs.
9 * 9 *
10 * The SCTP reference implementation is free software; 10 * The SCTP reference implementation is free software;
11 * you can redistribute it and/or modify it under the terms of 11 * you can redistribute it and/or modify it under the terms of
12 * the GNU General Public License as published by 12 * the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option) 13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 * The SCTP reference implementation is distributed in the hope that it 16 * The SCTP reference implementation is distributed in the hope that it
17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18 * ************************ 18 * ************************
19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 * See the GNU General Public License for more details. 20 * See the GNU General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with GNU CC; see the file COPYING. If not, write to 23 * along with GNU CC; see the file COPYING. If not, write to
24 * the Free Software Foundation, 59 Temple Place - Suite 330, 24 * the Free Software Foundation, 59 Temple Place - Suite 330,
25 * Boston, MA 02111-1307, USA. 25 * Boston, MA 02111-1307, USA.
26 * 26 *
27 * Please send any bug reports or fixes you make to the 27 * Please send any bug reports or fixes you make to the
28 * email address(es): 28 * email address(es):
29 * lksctp developers <lksctp-developers@lists.sourceforge.net> 29 * lksctp developers <lksctp-developers@lists.sourceforge.net>
30 * 30 *
31 * Or submit a bug report through the following website: 31 * Or submit a bug report through the following website:
32 * http://www.sf.net/projects/lksctp 32 * http://www.sf.net/projects/lksctp
33 * 33 *
34 * Written or modified by: 34 * Written or modified by:
35 * Jon Grimm <jgrimm@us.ibm.com> 35 * Jon Grimm <jgrimm@us.ibm.com>
36 * 36 *
37 * Any bugs reported given to us we will try to fix... any fixes shared will 37 * Any bugs reported given to us we will try to fix... any fixes shared will
38 * be incorporated into the next SCTP release. 38 * be incorporated into the next SCTP release.
39 */ 39 */
@@ -121,7 +121,7 @@ done:
121 if (len > length) 121 if (len > length)
122 len = length; 122 len = length;
123 123
124 return len; 124 return len;
125} 125}
126 126
127/* Initialize the objcount in the proc filesystem. */ 127/* Initialize the objcount in the proc filesystem. */
@@ -131,7 +131,7 @@ void sctp_dbg_objcnt_init(void)
131 ent = create_proc_read_entry("sctp_dbg_objcnt", 0, proc_net_sctp, 131 ent = create_proc_read_entry("sctp_dbg_objcnt", 0, proc_net_sctp,
132 sctp_dbg_objcnt_read, NULL); 132 sctp_dbg_objcnt_read, NULL);
133 if (!ent) 133 if (!ent)
134 printk(KERN_WARNING 134 printk(KERN_WARNING
135 "sctp_dbg_objcnt: Unable to create /proc entry.\n"); 135 "sctp_dbg_objcnt: Unable to create /proc entry.\n");
136} 136}
137 137
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 3ef4351dd956..f875fc3ced54 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -85,8 +85,8 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
85 chunk = sctp_get_ecne_prepend(packet->transport->asoc); 85 chunk = sctp_get_ecne_prepend(packet->transport->asoc);
86 86
87 /* If there a is a prepend chunk stick it on the list before 87 /* If there a is a prepend chunk stick it on the list before
88 * any other chunks get appended. 88 * any other chunks get appended.
89 */ 89 */
90 if (chunk) 90 if (chunk)
91 sctp_packet_append_chunk(packet, chunk); 91 sctp_packet_append_chunk(packet, chunk);
92 } 92 }
@@ -110,8 +110,8 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
110 packet->destination_port = dport; 110 packet->destination_port = dport;
111 INIT_LIST_HEAD(&packet->chunk_list); 111 INIT_LIST_HEAD(&packet->chunk_list);
112 if (asoc) { 112 if (asoc) {
113 struct sctp_sock *sp = sctp_sk(asoc->base.sk); 113 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
114 overhead = sp->pf->af->net_header_len; 114 overhead = sp->pf->af->net_header_len;
115 } else { 115 } else {
116 overhead = sizeof(struct ipv6hdr); 116 overhead = sizeof(struct ipv6hdr);
117 } 117 }
@@ -442,7 +442,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
442 * acknowledged or have failed. 442 * acknowledged or have failed.
443 */ 443 */
444 if (!sctp_chunk_is_data(chunk)) 444 if (!sctp_chunk_is_data(chunk))
445 sctp_chunk_free(chunk); 445 sctp_chunk_free(chunk);
446 } 446 }
447 447
448 /* Perform final transformation on checksum. */ 448 /* Perform final transformation on checksum. */
@@ -528,7 +528,7 @@ err:
528 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 528 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
529 list_del_init(&chunk->list); 529 list_del_init(&chunk->list);
530 if (!sctp_chunk_is_data(chunk)) 530 if (!sctp_chunk_is_data(chunk))
531 sctp_chunk_free(chunk); 531 sctp_chunk_free(chunk);
532 } 532 }
533 goto out; 533 goto out;
534nomem: 534nomem:
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index fba567a7cb64..5c2ddd10db06 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -376,7 +376,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new)
376 } 376 }
377 } 377 }
378 if (!done) 378 if (!done)
379 list_add_tail(new, head); 379 list_add_tail(new, head);
380} 380}
381 381
382/* Mark all the eligible packets on a transport for retransmission. */ 382/* Mark all the eligible packets on a transport for retransmission. */
@@ -578,7 +578,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
578 break; 578 break;
579 579
580 case SCTP_XMIT_RWND_FULL: 580 case SCTP_XMIT_RWND_FULL:
581 /* Send this packet. */ 581 /* Send this packet. */
582 if ((error = sctp_packet_transmit(pkt)) == 0) 582 if ((error = sctp_packet_transmit(pkt)) == 0)
583 *start_timer = 1; 583 *start_timer = 1;
584 584
@@ -590,7 +590,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
590 break; 590 break;
591 591
592 case SCTP_XMIT_NAGLE_DELAY: 592 case SCTP_XMIT_NAGLE_DELAY:
593 /* Send this packet. */ 593 /* Send this packet. */
594 if ((error = sctp_packet_transmit(pkt)) == 0) 594 if ((error = sctp_packet_transmit(pkt)) == 0)
595 *start_timer = 1; 595 *start_timer = 1;
596 596
@@ -605,7 +605,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
605 */ 605 */
606 list_add_tail(lchunk, &transport->transmitted); 606 list_add_tail(lchunk, &transport->transmitted);
607 607
608 /* Mark the chunk as ineligible for fast retransmit 608 /* Mark the chunk as ineligible for fast retransmit
609 * after it is retransmitted. 609 * after it is retransmitted.
610 */ 610 */
611 if (chunk->fast_retransmit > 0) 611 if (chunk->fast_retransmit > 0)
@@ -703,11 +703,11 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
703 * inactive. 703 * inactive.
704 * 704 *
705 * 3.3.6 Heartbeat Acknowledgement: 705 * 3.3.6 Heartbeat Acknowledgement:
706 * ... 706 * ...
707 * A HEARTBEAT ACK is always sent to the source IP 707 * A HEARTBEAT ACK is always sent to the source IP
708 * address of the IP datagram containing the 708 * address of the IP datagram containing the
709 * HEARTBEAT chunk to which this ack is responding. 709 * HEARTBEAT chunk to which this ack is responding.
710 * ... 710 * ...
711 */ 711 */
712 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && 712 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
713 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK) 713 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK)
@@ -914,7 +914,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
914 BUG(); 914 BUG();
915 } 915 }
916 916
917 /* BUG: We assume that the sctp_packet_transmit() 917 /* BUG: We assume that the sctp_packet_transmit()
918 * call below will succeed all the time and add the 918 * call below will succeed all the time and add the
919 * chunk to the transmitted list and restart the 919 * chunk to the transmitted list and restart the
920 * timers. 920 * timers.
@@ -1266,7 +1266,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1266 * first instance of the packet or a later 1266 * first instance of the packet or a later
1267 * instance). 1267 * instance).
1268 */ 1268 */
1269 if (!tchunk->tsn_gap_acked && 1269 if (!tchunk->tsn_gap_acked &&
1270 !tchunk->resent && 1270 !tchunk->resent &&
1271 tchunk->rtt_in_progress) { 1271 tchunk->rtt_in_progress) {
1272 tchunk->rtt_in_progress = 0; 1272 tchunk->rtt_in_progress = 0;
@@ -1275,7 +1275,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1275 rtt); 1275 rtt);
1276 } 1276 }
1277 } 1277 }
1278 if (TSN_lte(tsn, sack_ctsn)) { 1278 if (TSN_lte(tsn, sack_ctsn)) {
1279 /* RFC 2960 6.3.2 Retransmission Timer Rules 1279 /* RFC 2960 6.3.2 Retransmission Timer Rules
1280 * 1280 *
1281 * R3) Whenever a SACK is received 1281 * R3) Whenever a SACK is received
@@ -1590,7 +1590,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
1590 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " 1590 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1591 "ssthresh: %d, flight_size: %d, pba: %d\n", 1591 "ssthresh: %d, flight_size: %d, pba: %d\n",
1592 __FUNCTION__, transport, transport->cwnd, 1592 __FUNCTION__, transport, transport->cwnd,
1593 transport->ssthresh, transport->flight_size, 1593 transport->ssthresh, transport->flight_size,
1594 transport->partial_bytes_acked); 1594 transport->partial_bytes_acked);
1595 } 1595 }
1596} 1596}
@@ -1603,7 +1603,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1603 __u16 gap; 1603 __u16 gap;
1604 __u32 ctsn = ntohl(sack->cum_tsn_ack); 1604 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1605 1605
1606 if (TSN_lte(tsn, ctsn)) 1606 if (TSN_lte(tsn, ctsn))
1607 goto pass; 1607 goto pass;
1608 1608
1609 /* 3.3.4 Selective Acknowledgement (SACK) (3): 1609 /* 3.3.4 Selective Acknowledgement (SACK) (3):
@@ -1657,7 +1657,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1657 1657
1658 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the 1658 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1659 * received SACK. 1659 * received SACK.
1660 * 1660 *
1661 * If (Advanced.Peer.Ack.Point < SackCumAck), then update 1661 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1662 * Advanced.Peer.Ack.Point to be equal to SackCumAck. 1662 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1663 */ 1663 */
@@ -1671,7 +1671,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1671 * 1671 *
1672 * Assuming that a SACK arrived with the Cumulative TSN ACK 102 1672 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1673 * and the Advanced.Peer.Ack.Point is updated to this value: 1673 * and the Advanced.Peer.Ack.Point is updated to this value:
1674 * 1674 *
1675 * out-queue at the end of ==> out-queue after Adv.Ack.Point 1675 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1676 * normal SACK processing local advancement 1676 * normal SACK processing local advancement
1677 * ... ... 1677 * ... ...
@@ -1692,7 +1692,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1692 1692
1693 /* Remove any chunks in the abandoned queue that are acked by 1693 /* Remove any chunks in the abandoned queue that are acked by
1694 * the ctsn. 1694 * the ctsn.
1695 */ 1695 */
1696 if (TSN_lte(tsn, ctsn)) { 1696 if (TSN_lte(tsn, ctsn)) {
1697 list_del_init(lchunk); 1697 list_del_init(lchunk);
1698 if (!chunk->tsn_gap_acked) { 1698 if (!chunk->tsn_gap_acked) {
@@ -1743,7 +1743,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1743 */ 1743 */
1744 if (asoc->adv_peer_ack_point > ctsn) 1744 if (asoc->adv_peer_ack_point > ctsn)
1745 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, 1745 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1746 nskips, &ftsn_skip_arr[0]); 1746 nskips, &ftsn_skip_arr[0]);
1747 1747
1748 if (ftsn_chunk) { 1748 if (ftsn_chunk) {
1749 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1749 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 3a7ebfcc1fdb..1b2976d34ac7 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -70,7 +70,7 @@ int sctp_primitive_ ## name(struct sctp_association *asoc, \
70 \ 70 \
71 error = sctp_do_sm(event_type, subtype, state, ep, asoc, \ 71 error = sctp_do_sm(event_type, subtype, state, ep, asoc, \
72 arg, GFP_KERNEL); \ 72 arg, GFP_KERNEL); \
73 return error; \ 73 return error; \
74} 74}
75 75
76/* 10.1 ULP-to-SCTP 76/* 10.1 ULP-to-SCTP
@@ -207,7 +207,7 @@ DECLARE_PRIMITIVE(REQUESTHEARTBEAT);
207 207
208/* ADDIP 208/* ADDIP
209* 3.1.1 Address Configuration Change Chunk (ASCONF) 209* 3.1.1 Address Configuration Change Chunk (ASCONF)
210* 210*
211* This chunk is used to communicate to the remote endpoint one of the 211* This chunk is used to communicate to the remote endpoint one of the
212* configuration change requests that MUST be acknowledged. The 212* configuration change requests that MUST be acknowledged. The
213* information carried in the ASCONF Chunk uses the form of a 213* information carried in the ASCONF Chunk uses the form of a
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index b3493bdbcacb..e93fc1cc430a 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -77,7 +77,7 @@ static struct snmp_mib sctp_snmp_list[] = {
77 77
78/* Return the current value of a particular entry in the mib by adding its 78/* Return the current value of a particular entry in the mib by adding its
79 * per cpu counters. 79 * per cpu counters.
80 */ 80 */
81static unsigned long 81static unsigned long
82fold_field(void *mib[], int nr) 82fold_field(void *mib[], int nr)
83{ 83{
@@ -102,7 +102,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
102 102
103 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 103 for (i = 0; sctp_snmp_list[i].name != NULL; i++)
104 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 104 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
105 fold_field((void **)sctp_statistics, 105 fold_field((void **)sctp_statistics,
106 sctp_snmp_list[i].entry)); 106 sctp_snmp_list[i].entry));
107 107
108 return 0; 108 return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 0ef48126b117..e17a823ca90f 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -102,11 +102,11 @@ static __init int sctp_proc_init(void)
102 } 102 }
103 103
104 if (sctp_snmp_proc_init()) 104 if (sctp_snmp_proc_init())
105 goto out_nomem; 105 goto out_nomem;
106 if (sctp_eps_proc_init()) 106 if (sctp_eps_proc_init())
107 goto out_nomem; 107 goto out_nomem;
108 if (sctp_assocs_proc_init()) 108 if (sctp_assocs_proc_init())
109 goto out_nomem; 109 goto out_nomem;
110 110
111 return 0; 111 return 0;
112 112
@@ -114,7 +114,7 @@ out_nomem:
114 return -ENOMEM; 114 return -ENOMEM;
115} 115}
116 116
117/* Clean up the proc fs entry for the SCTP protocol. 117/* Clean up the proc fs entry for the SCTP protocol.
118 * Note: Do not make this __exit as it is used in the init error 118 * Note: Do not make this __exit as it is used in the init error
119 * path. 119 * path.
120 */ 120 */
@@ -286,7 +286,7 @@ static int sctp_v4_to_addr_param(const union sctp_addr *addr,
286 286
287 param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; 287 param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS;
288 param->v4.param_hdr.length = htons(length); 288 param->v4.param_hdr.length = htons(length);
289 param->v4.addr.s_addr = addr->v4.sin_addr.s_addr; 289 param->v4.addr.s_addr = addr->v4.sin_addr.s_addr;
290 290
291 return length; 291 return length;
292} 292}
@@ -344,9 +344,9 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
344 if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr)) 344 if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr))
345 return 0; 345 return 0;
346 346
347 /* Is this a broadcast address? */ 347 /* Is this a broadcast address? */
348 if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST) 348 if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST)
349 return 0; 349 return 0;
350 350
351 return 1; 351 return 1;
352} 352}
@@ -494,7 +494,7 @@ out_unlock:
494out: 494out:
495 if (dst) 495 if (dst)
496 SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n", 496 SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n",
497 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src)); 497 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src));
498 else 498 else
499 SCTP_DEBUG_PRINTK("NO ROUTE\n"); 499 SCTP_DEBUG_PRINTK("NO ROUTE\n");
500 500
@@ -517,14 +517,14 @@ static void sctp_v4_get_saddr(struct sctp_association *asoc,
517 if (rt) { 517 if (rt) {
518 saddr->v4.sin_family = AF_INET; 518 saddr->v4.sin_family = AF_INET;
519 saddr->v4.sin_port = htons(asoc->base.bind_addr.port); 519 saddr->v4.sin_port = htons(asoc->base.bind_addr.port);
520 saddr->v4.sin_addr.s_addr = rt->rt_src; 520 saddr->v4.sin_addr.s_addr = rt->rt_src;
521 } 521 }
522} 522}
523 523
524/* What interface did this skb arrive on? */ 524/* What interface did this skb arrive on? */
525static int sctp_v4_skb_iif(const struct sk_buff *skb) 525static int sctp_v4_skb_iif(const struct sk_buff *skb)
526{ 526{
527 return ((struct rtable *)skb->dst)->rt_iif; 527 return ((struct rtable *)skb->dst)->rt_iif;
528} 528}
529 529
530/* Was this packet marked by Explicit Congestion Notification? */ 530/* Was this packet marked by Explicit Congestion Notification? */
@@ -569,7 +569,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
569 newinet->dport = htons(asoc->peer.port); 569 newinet->dport = htons(asoc->peer.port);
570 newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; 570 newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
571 newinet->pmtudisc = inet->pmtudisc; 571 newinet->pmtudisc = inet->pmtudisc;
572 newinet->id = asoc->next_tsn ^ jiffies; 572 newinet->id = asoc->next_tsn ^ jiffies;
573 573
574 newinet->uc_ttl = -1; 574 newinet->uc_ttl = -1;
575 newinet->mc_loop = 1; 575 newinet->mc_loop = 1;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 783481860174..f7fb29d5a0c7 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -118,7 +118,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
118 int padlen; 118 int padlen;
119 __u16 len; 119 __u16 len;
120 120
121 /* Cause code constants are now defined in network order. */ 121 /* Cause code constants are now defined in network order. */
122 err.cause = cause_code; 122 err.cause = cause_code;
123 len = sizeof(sctp_errhdr_t) + paylen; 123 len = sizeof(sctp_errhdr_t) + paylen;
124 padlen = len % 4; 124 padlen = len % 4;
@@ -295,11 +295,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
295 */ 295 */
296 chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; 296 chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len;
297 297
298 /* Tell peer that we'll do ECN only if peer advertised such cap. */ 298 /* Tell peer that we'll do ECN only if peer advertised such cap. */
299 if (asoc->peer.ecn_capable) 299 if (asoc->peer.ecn_capable)
300 chunksize += sizeof(ecap_param); 300 chunksize += sizeof(ecap_param);
301 301
302 /* Tell peer that we'll do PR-SCTP only if peer advertised. */ 302 /* Tell peer that we'll do PR-SCTP only if peer advertised. */
303 if (asoc->peer.prsctp_capable) 303 if (asoc->peer.prsctp_capable)
304 chunksize += sizeof(prsctp_param); 304 chunksize += sizeof(prsctp_param);
305 305
@@ -728,7 +728,7 @@ struct sctp_chunk *sctp_make_shutdown_complete(
728 if (retval && chunk) 728 if (retval && chunk)
729 retval->transport = chunk->transport; 729 retval->transport = chunk->transport;
730 730
731 return retval; 731 return retval;
732} 732}
733 733
734/* Create an ABORT. Note that we set the T bit if we have no 734/* Create an ABORT. Note that we set the T bit if we have no
@@ -844,7 +844,7 @@ err_chunk:
844 return retval; 844 return retval;
845} 845}
846 846
847/* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ 847/* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */
848struct sctp_chunk *sctp_make_abort_violation( 848struct sctp_chunk *sctp_make_abort_violation(
849 const struct sctp_association *asoc, 849 const struct sctp_association *asoc,
850 const struct sctp_chunk *chunk, 850 const struct sctp_chunk *chunk,
@@ -1264,8 +1264,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1264 /* Header size is static data prior to the actual cookie, including 1264 /* Header size is static data prior to the actual cookie, including
1265 * any padding. 1265 * any padding.
1266 */ 1266 */
1267 headersize = sizeof(sctp_paramhdr_t) + 1267 headersize = sizeof(sctp_paramhdr_t) +
1268 (sizeof(struct sctp_signed_cookie) - 1268 (sizeof(struct sctp_signed_cookie) -
1269 sizeof(struct sctp_cookie)); 1269 sizeof(struct sctp_cookie));
1270 bodysize = sizeof(struct sctp_cookie) 1270 bodysize = sizeof(struct sctp_cookie)
1271 + ntohs(init_chunk->chunk_hdr->length) + addrs_len; 1271 + ntohs(init_chunk->chunk_hdr->length) + addrs_len;
@@ -1314,7 +1314,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1314 memcpy((__u8 *)&cookie->c.peer_init[0] + 1314 memcpy((__u8 *)&cookie->c.peer_init[0] +
1315 ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); 1315 ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
1316 1316
1317 if (sctp_sk(ep->base.sk)->hmac) { 1317 if (sctp_sk(ep->base.sk)->hmac) {
1318 struct hash_desc desc; 1318 struct hash_desc desc;
1319 1319
1320 /* Sign the message. */ 1320 /* Sign the message. */
@@ -1323,8 +1323,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1323 sg.length = bodysize; 1323 sg.length = bodysize;
1324 keylen = SCTP_SECRET_SIZE; 1324 keylen = SCTP_SECRET_SIZE;
1325 key = (char *)ep->secret_key[ep->current_key]; 1325 key = (char *)ep->secret_key[ep->current_key];
1326 desc.tfm = sctp_sk(ep->base.sk)->hmac; 1326 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1327 desc.flags = 0; 1327 desc.flags = 0;
1328 1328
1329 if (crypto_hash_setkey(desc.tfm, key, keylen) || 1329 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1330 crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) 1330 crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
@@ -1364,7 +1364,7 @@ struct sctp_association *sctp_unpack_cookie(
1364 * any padding. 1364 * any padding.
1365 */ 1365 */
1366 headersize = sizeof(sctp_chunkhdr_t) + 1366 headersize = sizeof(sctp_chunkhdr_t) +
1367 (sizeof(struct sctp_signed_cookie) - 1367 (sizeof(struct sctp_signed_cookie) -
1368 sizeof(struct sctp_cookie)); 1368 sizeof(struct sctp_cookie));
1369 bodysize = ntohs(chunk->chunk_hdr->length) - headersize; 1369 bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
1370 fixed_size = headersize + sizeof(struct sctp_cookie); 1370 fixed_size = headersize + sizeof(struct sctp_cookie);
@@ -1592,7 +1592,7 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
1592 struct sctp_chunk **errp) 1592 struct sctp_chunk **errp)
1593{ 1593{
1594 char error[] = "The following parameter had invalid length:"; 1594 char error[] = "The following parameter had invalid length:";
1595 size_t payload_len = WORD_ROUND(sizeof(error)) + 1595 size_t payload_len = WORD_ROUND(sizeof(error)) +
1596 sizeof(sctp_paramhdr_t); 1596 sizeof(sctp_paramhdr_t);
1597 1597
1598 1598
@@ -1751,7 +1751,7 @@ static int sctp_verify_param(const struct sctp_association *asoc,
1751 case SCTP_PARAM_FWD_TSN_SUPPORT: 1751 case SCTP_PARAM_FWD_TSN_SUPPORT:
1752 if (sctp_prsctp_enable) 1752 if (sctp_prsctp_enable)
1753 break; 1753 break;
1754 /* Fall Through */ 1754 /* Fall Through */
1755 default: 1755 default:
1756 SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", 1756 SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
1757 ntohs(param.p->type), cid); 1757 ntohs(param.p->type), cid);
@@ -1860,7 +1860,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
1860 sctp_walk_params(param, peer_init, init_hdr.params) { 1860 sctp_walk_params(param, peer_init, init_hdr.params) {
1861 1861
1862 if (!sctp_process_param(asoc, param, peer_addr, gfp)) 1862 if (!sctp_process_param(asoc, param, peer_addr, gfp))
1863 goto clean_up; 1863 goto clean_up;
1864 } 1864 }
1865 1865
1866 /* Walk list of transports, removing transports in the UNKNOWN state. */ 1866 /* Walk list of transports, removing transports in the UNKNOWN state. */
@@ -1936,7 +1936,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
1936 */ 1936 */
1937 1937
1938 /* Allocate storage for the negotiated streams if it is not a temporary 1938 /* Allocate storage for the negotiated streams if it is not a temporary
1939 * association. 1939 * association.
1940 */ 1940 */
1941 if (!asoc->temp) { 1941 if (!asoc->temp) {
1942 int assoc_id; 1942 int assoc_id;
@@ -2108,7 +2108,7 @@ static int sctp_process_param(struct sctp_association *asoc,
2108 asoc->peer.prsctp_capable = 1; 2108 asoc->peer.prsctp_capable = 1;
2109 break; 2109 break;
2110 } 2110 }
2111 /* Fall Through */ 2111 /* Fall Through */
2112 default: 2112 default:
2113 /* Any unrecognized parameters should have been caught 2113 /* Any unrecognized parameters should have been caught
2114 * and handled by sctp_verify_param() which should be 2114 * and handled by sctp_verify_param() which should be
@@ -2167,7 +2167,7 @@ __u32 sctp_generate_tsn(const struct sctp_endpoint *ep)
2167 * | ASCONF Parameter #N | 2167 * | ASCONF Parameter #N |
2168 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 2168 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2169 * 2169 *
2170 * Address Parameter and other parameter will not be wrapped in this function 2170 * Address Parameter and other parameter will not be wrapped in this function
2171 */ 2171 */
2172static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, 2172static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
2173 union sctp_addr *addr, 2173 union sctp_addr *addr,
@@ -2289,7 +2289,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2289 * | Address Parameter | 2289 * | Address Parameter |
2290 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 2290 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2291 * 2291 *
2292 * Create an ASCONF chunk with Set Primary IP address parameter. 2292 * Create an ASCONF chunk with Set Primary IP address parameter.
2293 */ 2293 */
2294struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, 2294struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
2295 union sctp_addr *addr) 2295 union sctp_addr *addr)
@@ -2338,7 +2338,7 @@ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
2338 * | ASCONF Parameter Response#N | 2338 * | ASCONF Parameter Response#N |
2339 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 2339 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2340 * 2340 *
2341 * Create an ASCONF_ACK chunk with enough space for the parameter responses. 2341 * Create an ASCONF_ACK chunk with enough space for the parameter responses.
2342 */ 2342 */
2343static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, 2343static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc,
2344 __u32 serial, int vparam_len) 2344 __u32 serial, int vparam_len)
@@ -2380,7 +2380,7 @@ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id,
2380 ntohs(asconf_param->param_hdr.length); 2380 ntohs(asconf_param->param_hdr.length);
2381 } 2381 }
2382 2382
2383 /* Add Success Indication or Error Cause Indication parameter. */ 2383 /* Add Success Indication or Error Cause Indication parameter. */
2384 ack_param.param_hdr.type = response_type; 2384 ack_param.param_hdr.type = response_type;
2385 ack_param.param_hdr.length = htons(sizeof(ack_param) + 2385 ack_param.param_hdr.length = htons(sizeof(ack_param) +
2386 err_param_len + 2386 err_param_len +
@@ -2423,11 +2423,11 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2423 switch (asconf_param->param_hdr.type) { 2423 switch (asconf_param->param_hdr.type) {
2424 case SCTP_PARAM_ADD_IP: 2424 case SCTP_PARAM_ADD_IP:
2425 /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address 2425 /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address
2426 * request and does not have the local resources to add this 2426 * request and does not have the local resources to add this
2427 * new address to the association, it MUST return an Error 2427 * new address to the association, it MUST return an Error
2428 * Cause TLV set to the new error code 'Operation Refused 2428 * Cause TLV set to the new error code 'Operation Refused
2429 * Due to Resource Shortage'. 2429 * Due to Resource Shortage'.
2430 */ 2430 */
2431 2431
2432 peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); 2432 peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
2433 if (!peer) 2433 if (!peer)
@@ -2439,10 +2439,10 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2439 break; 2439 break;
2440 case SCTP_PARAM_DEL_IP: 2440 case SCTP_PARAM_DEL_IP:
2441 /* ADDIP 4.3 D7) If a request is received to delete the 2441 /* ADDIP 4.3 D7) If a request is received to delete the
2442 * last remaining IP address of a peer endpoint, the receiver 2442 * last remaining IP address of a peer endpoint, the receiver
2443 * MUST send an Error Cause TLV with the error cause set to the 2443 * MUST send an Error Cause TLV with the error cause set to the
2444 * new error code 'Request to Delete Last Remaining IP Address'. 2444 * new error code 'Request to Delete Last Remaining IP Address'.
2445 */ 2445 */
2446 pos = asoc->peer.transport_addr_list.next; 2446 pos = asoc->peer.transport_addr_list.next;
2447 if (pos->next == &asoc->peer.transport_addr_list) 2447 if (pos->next == &asoc->peer.transport_addr_list)
2448 return SCTP_ERROR_DEL_LAST_IP; 2448 return SCTP_ERROR_DEL_LAST_IP;
@@ -2474,7 +2474,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2474 return SCTP_ERROR_NO_ERROR; 2474 return SCTP_ERROR_NO_ERROR;
2475} 2475}
2476 2476
2477/* Process an incoming ASCONF chunk with the next expected serial no. and 2477/* Process an incoming ASCONF chunk with the next expected serial no. and
2478 * return an ASCONF_ACK chunk to be sent in response. 2478 * return an ASCONF_ACK chunk to be sent in response.
2479 */ 2479 */
2480struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, 2480struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
@@ -2494,19 +2494,19 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
2494 hdr = (sctp_addiphdr_t *)asconf->skb->data; 2494 hdr = (sctp_addiphdr_t *)asconf->skb->data;
2495 serial = ntohl(hdr->serial); 2495 serial = ntohl(hdr->serial);
2496 2496
2497 /* Skip the addiphdr and store a pointer to address parameter. */ 2497 /* Skip the addiphdr and store a pointer to address parameter. */
2498 length = sizeof(sctp_addiphdr_t); 2498 length = sizeof(sctp_addiphdr_t);
2499 addr_param = (union sctp_addr_param *)(asconf->skb->data + length); 2499 addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
2500 chunk_len -= length; 2500 chunk_len -= length;
2501 2501
2502 /* Skip the address parameter and store a pointer to the first 2502 /* Skip the address parameter and store a pointer to the first
2503 * asconf paramter. 2503 * asconf paramter.
2504 */ 2504 */
2505 length = ntohs(addr_param->v4.param_hdr.length); 2505 length = ntohs(addr_param->v4.param_hdr.length);
2506 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 2506 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
2507 chunk_len -= length; 2507 chunk_len -= length;
2508 2508
2509 /* create an ASCONF_ACK chunk. 2509 /* create an ASCONF_ACK chunk.
2510 * Based on the definitions of parameters, we know that the size of 2510 * Based on the definitions of parameters, we know that the size of
2511 * ASCONF_ACK parameters are less than or equal to the twice of ASCONF 2511 * ASCONF_ACK parameters are less than or equal to the twice of ASCONF
2512 * paramters. 2512 * paramters.
@@ -2537,7 +2537,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
2537 /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add 2537 /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
2538 * an IP address sends an 'Out of Resource' in its response, it 2538 * an IP address sends an 'Out of Resource' in its response, it
2539 * MUST also fail any subsequent add or delete requests bundled 2539 * MUST also fail any subsequent add or delete requests bundled
2540 * in the ASCONF. 2540 * in the ASCONF.
2541 */ 2541 */
2542 if (SCTP_ERROR_RSRC_LOW == err_code) 2542 if (SCTP_ERROR_RSRC_LOW == err_code)
2543 goto done; 2543 goto done;
@@ -2548,12 +2548,12 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
2548 length); 2548 length);
2549 chunk_len -= length; 2549 chunk_len -= length;
2550 } 2550 }
2551 2551
2552done: 2552done:
2553 asoc->peer.addip_serial++; 2553 asoc->peer.addip_serial++;
2554 2554
2555 /* If we are sending a new ASCONF_ACK hold a reference to it in assoc 2555 /* If we are sending a new ASCONF_ACK hold a reference to it in assoc
2556 * after freeing the reference to old asconf ack if any. 2556 * after freeing the reference to old asconf ack if any.
2557 */ 2557 */
2558 if (asconf_ack) { 2558 if (asconf_ack) {
2559 if (asoc->addip_last_asconf_ack) 2559 if (asoc->addip_last_asconf_ack)
@@ -2621,7 +2621,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2621 2621
2622/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk 2622/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk
2623 * for the given asconf parameter. If there is no response for this parameter, 2623 * for the given asconf parameter. If there is no response for this parameter,
2624 * return the error code based on the third argument 'no_err'. 2624 * return the error code based on the third argument 'no_err'.
2625 * ADDIP 4.1 2625 * ADDIP 4.1
2626 * A7) If an error response is received for a TLV parameter, all TLVs with no 2626 * A7) If an error response is received for a TLV parameter, all TLVs with no
2627 * response before the failed TLV are considered successful if not reported. 2627 * response before the failed TLV are considered successful if not reported.
@@ -2645,7 +2645,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
2645 2645
2646 /* Skip the addiphdr from the asconf_ack chunk and store a pointer to 2646 /* Skip the addiphdr from the asconf_ack chunk and store a pointer to
2647 * the first asconf_ack parameter. 2647 * the first asconf_ack parameter.
2648 */ 2648 */
2649 length = sizeof(sctp_addiphdr_t); 2649 length = sizeof(sctp_addiphdr_t);
2650 asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + 2650 asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data +
2651 length); 2651 length);
@@ -2696,14 +2696,14 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
2696 2696
2697 /* Skip the chunkhdr and addiphdr from the last asconf sent and store 2697 /* Skip the chunkhdr and addiphdr from the last asconf sent and store
2698 * a pointer to address parameter. 2698 * a pointer to address parameter.
2699 */ 2699 */
2700 length = sizeof(sctp_addip_chunk_t); 2700 length = sizeof(sctp_addip_chunk_t);
2701 addr_param = (union sctp_addr_param *)(asconf->skb->data + length); 2701 addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
2702 asconf_len -= length; 2702 asconf_len -= length;
2703 2703
2704 /* Skip the address parameter in the last asconf sent and store a 2704 /* Skip the address parameter in the last asconf sent and store a
2705 * pointer to the first asconf paramter. 2705 * pointer to the first asconf paramter.
2706 */ 2706 */
2707 length = ntohs(addr_param->v4.param_hdr.length); 2707 length = ntohs(addr_param->v4.param_hdr.length);
2708 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 2708 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
2709 asconf_len -= length; 2709 asconf_len -= length;
@@ -2740,7 +2740,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
2740 case SCTP_ERROR_INV_PARAM: 2740 case SCTP_ERROR_INV_PARAM:
2741 /* Disable sending this type of asconf parameter in 2741 /* Disable sending this type of asconf parameter in
2742 * future. 2742 * future.
2743 */ 2743 */
2744 asoc->peer.addip_disabled_mask |= 2744 asoc->peer.addip_disabled_mask |=
2745 asconf_param->param_hdr.type; 2745 asconf_param->param_hdr.type;
2746 break; 2746 break;
@@ -2754,7 +2754,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
2754 2754
2755 /* Skip the processed asconf parameter and move to the next 2755 /* Skip the processed asconf parameter and move to the next
2756 * one. 2756 * one.
2757 */ 2757 */
2758 length = ntohs(asconf_param->param_hdr.length); 2758 length = ntohs(asconf_param->param_hdr.length);
2759 asconf_param = (sctp_addip_param_t *)((void *)asconf_param + 2759 asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
2760 length); 2760 length);
@@ -2783,14 +2783,14 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
2783 return retval; 2783 return retval;
2784} 2784}
2785 2785
2786/* Make a FWD TSN chunk. */ 2786/* Make a FWD TSN chunk. */
2787struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, 2787struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
2788 __u32 new_cum_tsn, size_t nstreams, 2788 __u32 new_cum_tsn, size_t nstreams,
2789 struct sctp_fwdtsn_skip *skiplist) 2789 struct sctp_fwdtsn_skip *skiplist)
2790{ 2790{
2791 struct sctp_chunk *retval = NULL; 2791 struct sctp_chunk *retval = NULL;
2792 struct sctp_fwdtsn_chunk *ftsn_chunk; 2792 struct sctp_fwdtsn_chunk *ftsn_chunk;
2793 struct sctp_fwdtsn_hdr ftsn_hdr; 2793 struct sctp_fwdtsn_hdr ftsn_hdr;
2794 struct sctp_fwdtsn_skip skip; 2794 struct sctp_fwdtsn_skip skip;
2795 size_t hint; 2795 size_t hint;
2796 int i; 2796 int i;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 6db77d1329f7..135567493119 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -61,7 +61,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
61 struct sctp_endpoint *ep, 61 struct sctp_endpoint *ep,
62 struct sctp_association *asoc, 62 struct sctp_association *asoc,
63 void *event_arg, 63 void *event_arg,
64 sctp_disposition_t status, 64 sctp_disposition_t status,
65 sctp_cmd_seq_t *commands, 65 sctp_cmd_seq_t *commands,
66 gfp_t gfp); 66 gfp_t gfp);
67static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, 67static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
@@ -78,7 +78,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
78 ********************************************************************/ 78 ********************************************************************/
79 79
80/* A helper function for delayed processing of INET ECN CE bit. */ 80/* A helper function for delayed processing of INET ECN CE bit. */
81static void sctp_do_ecn_ce_work(struct sctp_association *asoc, 81static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
82 __u32 lowest_tsn) 82 __u32 lowest_tsn)
83{ 83{
84 /* Save the TSN away for comparison when we receive CWR */ 84 /* Save the TSN away for comparison when we receive CWR */
@@ -160,7 +160,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
160 struct sctp_transport *trans = asoc->peer.last_data_from; 160 struct sctp_transport *trans = asoc->peer.last_data_from;
161 int error = 0; 161 int error = 0;
162 162
163 if (force || 163 if (force ||
164 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || 164 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
165 (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) 165 (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
166 asoc->peer.sack_needed = 1; 166 asoc->peer.sack_needed = 1;
@@ -178,7 +178,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
178 * [This is actually not mentioned in Section 6, but we 178 * [This is actually not mentioned in Section 6, but we
179 * implement it here anyway. --piggy] 179 * implement it here anyway. --piggy]
180 */ 180 */
181 if (max_tsn_seen != ctsn) 181 if (max_tsn_seen != ctsn)
182 asoc->peer.sack_needed = 1; 182 asoc->peer.sack_needed = 1;
183 183
184 /* From 6.2 Acknowledgement on Reception of DATA Chunks: 184 /* From 6.2 Acknowledgement on Reception of DATA Chunks:
@@ -199,10 +199,10 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
199 * for the association. 199 * for the association.
200 */ 200 */
201 if (trans) 201 if (trans)
202 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 202 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
203 trans->sackdelay; 203 trans->sackdelay;
204 else 204 else
205 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 205 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
206 asoc->sackdelay; 206 asoc->sackdelay;
207 207
208 /* Restart the SACK timer. */ 208 /* Restart the SACK timer. */
@@ -338,8 +338,8 @@ static void sctp_generate_t4_rto_event(unsigned long data)
338 338
339static void sctp_generate_t5_shutdown_guard_event(unsigned long data) 339static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
340{ 340{
341 struct sctp_association *asoc = (struct sctp_association *)data; 341 struct sctp_association *asoc = (struct sctp_association *)data;
342 sctp_generate_timeout_event(asoc, 342 sctp_generate_timeout_event(asoc,
343 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); 343 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
344 344
345} /* sctp_generate_t5_shutdown_guard_event() */ 345} /* sctp_generate_t5_shutdown_guard_event() */
@@ -380,7 +380,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
380 asoc->state, asoc->ep, asoc, 380 asoc->state, asoc->ep, asoc,
381 transport, GFP_ATOMIC); 381 transport, GFP_ATOMIC);
382 382
383 if (error) 383 if (error)
384 asoc->base.sk->sk_err = -error; 384 asoc->base.sk->sk_err = -error;
385 385
386out_unlock: 386out_unlock:
@@ -570,7 +570,7 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
570 570
571/* Helper function to stop any pending T3-RTX timers */ 571/* Helper function to stop any pending T3-RTX timers */
572static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, 572static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
573 struct sctp_association *asoc) 573 struct sctp_association *asoc)
574{ 574{
575 struct sctp_transport *t; 575 struct sctp_transport *t;
576 struct list_head *pos; 576 struct list_head *pos;
@@ -675,7 +675,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
675/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set 675/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
676 * the transport for a shutdown chunk. 676 * the transport for a shutdown chunk.
677 */ 677 */
678static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, 678static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
679 struct sctp_association *asoc, 679 struct sctp_association *asoc,
680 struct sctp_chunk *chunk) 680 struct sctp_chunk *chunk)
681{ 681{
@@ -688,7 +688,7 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
688} 688}
689 689
690/* Helper function to change the state of an association. */ 690/* Helper function to change the state of an association. */
691static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, 691static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
692 struct sctp_association *asoc, 692 struct sctp_association *asoc,
693 sctp_state_t state) 693 sctp_state_t state)
694{ 694{
@@ -727,7 +727,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
727 sctp_state(asoc, SHUTDOWN_RECEIVED)) { 727 sctp_state(asoc, SHUTDOWN_RECEIVED)) {
728 /* Wake up any processes waiting in the asoc's wait queue in 728 /* Wake up any processes waiting in the asoc's wait queue in
729 * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). 729 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
730 */ 730 */
731 if (waitqueue_active(&asoc->wait)) 731 if (waitqueue_active(&asoc->wait))
732 wake_up_interruptible(&asoc->wait); 732 wake_up_interruptible(&asoc->wait);
733 733
@@ -749,9 +749,9 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
749 struct sock *sk = asoc->base.sk; 749 struct sock *sk = asoc->base.sk;
750 750
751 /* If it is a non-temporary association belonging to a TCP-style 751 /* If it is a non-temporary association belonging to a TCP-style
752 * listening socket that is not closed, do not free it so that accept() 752 * listening socket that is not closed, do not free it so that accept()
753 * can pick it up later. 753 * can pick it up later.
754 */ 754 */
755 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && 755 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
756 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) 756 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
757 return; 757 return;
@@ -764,7 +764,7 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
764 * ADDIP Section 4.1 ASCONF Chunk Procedures 764 * ADDIP Section 4.1 ASCONF Chunk Procedures
765 * A4) Start a T-4 RTO timer, using the RTO value of the selected 765 * A4) Start a T-4 RTO timer, using the RTO value of the selected
766 * destination address (we use active path instead of primary path just 766 * destination address (we use active path instead of primary path just
767 * because primary path may be inactive. 767 * because primary path may be inactive.
768 */ 768 */
769static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, 769static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
770 struct sctp_association *asoc, 770 struct sctp_association *asoc,
@@ -777,7 +777,7 @@ static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
777 chunk->transport = t; 777 chunk->transport = t;
778} 778}
779 779
780/* Process an incoming Operation Error Chunk. */ 780/* Process an incoming Operation Error Chunk. */
781static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, 781static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
782 struct sctp_association *asoc, 782 struct sctp_association *asoc,
783 struct sctp_chunk *chunk) 783 struct sctp_chunk *chunk)
@@ -816,7 +816,7 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
816} 816}
817 817
818/* Process variable FWDTSN chunk information. */ 818/* Process variable FWDTSN chunk information. */
819static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, 819static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
820 struct sctp_chunk *chunk) 820 struct sctp_chunk *chunk)
821{ 821{
822 struct sctp_fwdtsn_skip *skip; 822 struct sctp_fwdtsn_skip *skip;
@@ -828,9 +828,9 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
828 return; 828 return;
829} 829}
830 830
831/* Helper function to remove the association non-primary peer 831/* Helper function to remove the association non-primary peer
832 * transports. 832 * transports.
833 */ 833 */
834static void sctp_cmd_del_non_primary(struct sctp_association *asoc) 834static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
835{ 835{
836 struct sctp_transport *t; 836 struct sctp_transport *t;
@@ -840,7 +840,7 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
840 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 840 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
841 t = list_entry(pos, struct sctp_transport, transports); 841 t = list_entry(pos, struct sctp_transport, transports);
842 if (!sctp_cmp_addr_exact(&t->ipaddr, 842 if (!sctp_cmp_addr_exact(&t->ipaddr,
843 &asoc->peer.primary_addr)) { 843 &asoc->peer.primary_addr)) {
844 sctp_assoc_del_peer(asoc, &t->ipaddr); 844 sctp_assoc_del_peer(asoc, &t->ipaddr);
845 } 845 }
846 } 846 }
@@ -915,7 +915,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
915 DEBUG_POST; 915 DEBUG_POST;
916 916
917 error = sctp_side_effects(event_type, subtype, state, 917 error = sctp_side_effects(event_type, subtype, state,
918 ep, asoc, event_arg, status, 918 ep, asoc, event_arg, status,
919 &commands, gfp); 919 &commands, gfp);
920 DEBUG_POST_SFX; 920 DEBUG_POST_SFX;
921 921
@@ -968,7 +968,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
968 error = -ENOMEM; 968 error = -ENOMEM;
969 break; 969 break;
970 970
971 case SCTP_DISPOSITION_DELETE_TCB: 971 case SCTP_DISPOSITION_DELETE_TCB:
972 /* This should now be a command. */ 972 /* This should now be a command. */
973 break; 973 break;
974 974
@@ -1021,7 +1021,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1021 struct sctp_endpoint *ep, 1021 struct sctp_endpoint *ep,
1022 struct sctp_association *asoc, 1022 struct sctp_association *asoc,
1023 void *event_arg, 1023 void *event_arg,
1024 sctp_disposition_t status, 1024 sctp_disposition_t status,
1025 sctp_cmd_seq_t *commands, 1025 sctp_cmd_seq_t *commands,
1026 gfp_t gfp) 1026 gfp_t gfp)
1027{ 1027{
@@ -1057,7 +1057,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1057 case SCTP_CMD_NEW_ASOC: 1057 case SCTP_CMD_NEW_ASOC:
1058 /* Register a new association. */ 1058 /* Register a new association. */
1059 if (local_cork) { 1059 if (local_cork) {
1060 sctp_outq_uncork(&asoc->outqueue); 1060 sctp_outq_uncork(&asoc->outqueue);
1061 local_cork = 0; 1061 local_cork = 0;
1062 } 1062 }
1063 asoc = cmd->obj.ptr; 1063 asoc = cmd->obj.ptr;
@@ -1074,7 +1074,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1074 sctp_outq_teardown(&asoc->outqueue); 1074 sctp_outq_teardown(&asoc->outqueue);
1075 break; 1075 break;
1076 1076
1077 case SCTP_CMD_DELETE_TCB: 1077 case SCTP_CMD_DELETE_TCB:
1078 if (local_cork) { 1078 if (local_cork) {
1079 sctp_outq_uncork(&asoc->outqueue); 1079 sctp_outq_uncork(&asoc->outqueue);
1080 local_cork = 0; 1080 local_cork = 0;
@@ -1104,7 +1104,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1104 1104
1105 case SCTP_CMD_PROCESS_FWDTSN: 1105 case SCTP_CMD_PROCESS_FWDTSN:
1106 sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr); 1106 sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr);
1107 break; 1107 break;
1108 1108
1109 case SCTP_CMD_GEN_SACK: 1109 case SCTP_CMD_GEN_SACK:
1110 /* Generate a Selective ACK. 1110 /* Generate a Selective ACK.
@@ -1162,12 +1162,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1162 SCTP_CHUNK(cmd->obj.ptr)); 1162 SCTP_CHUNK(cmd->obj.ptr));
1163 1163
1164 /* FIXME - Eventually come up with a cleaner way to 1164 /* FIXME - Eventually come up with a cleaner way to
1165 * enabling COOKIE-ECHO + DATA bundling during 1165 * enabling COOKIE-ECHO + DATA bundling during
1166 * multihoming stale cookie scenarios, the following 1166 * multihoming stale cookie scenarios, the following
1167 * command plays with asoc->peer.retran_path to 1167 * command plays with asoc->peer.retran_path to
1168 * avoid the problem of sending the COOKIE-ECHO and 1168 * avoid the problem of sending the COOKIE-ECHO and
1169 * DATA in different paths, which could result 1169 * DATA in different paths, which could result
1170 * in the association being ABORTed if the DATA chunk 1170 * in the association being ABORTed if the DATA chunk
1171 * is processed first by the server. Checking the 1171 * is processed first by the server. Checking the
1172 * init error counter simply causes this command 1172 * init error counter simply causes this command
1173 * to be executed only during failed attempts of 1173 * to be executed only during failed attempts of
@@ -1177,7 +1177,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1177 asoc->peer.primary_path) && 1177 asoc->peer.primary_path) &&
1178 (asoc->init_err_counter > 0)) { 1178 (asoc->init_err_counter > 0)) {
1179 sctp_add_cmd_sf(commands, 1179 sctp_add_cmd_sf(commands,
1180 SCTP_CMD_FORCE_PRIM_RETRAN, 1180 SCTP_CMD_FORCE_PRIM_RETRAN,
1181 SCTP_NULL()); 1181 SCTP_NULL());
1182 } 1182 }
1183 1183
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index fbbc9e6a3b78..b3cad8a03736 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -189,7 +189,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
189 0, 0, 0, GFP_ATOMIC); 189 0, 0, 0, GFP_ATOMIC);
190 if (ev) 190 if (ev)
191 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 191 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
192 SCTP_ULPEVENT(ev)); 192 SCTP_ULPEVENT(ev));
193 193
194 /* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint 194 /* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint
195 * will verify that it is in SHUTDOWN-ACK-SENT state, if it is 195 * will verify that it is in SHUTDOWN-ACK-SENT state, if it is
@@ -228,7 +228,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
228 * Verification Tag field to Tag_A, and also provide its own 228 * Verification Tag field to Tag_A, and also provide its own
229 * Verification Tag (Tag_Z) in the Initiate Tag field. 229 * Verification Tag (Tag_Z) in the Initiate Tag field.
230 * 230 *
231 * Verification Tag: Must be 0. 231 * Verification Tag: Must be 0.
232 * 232 *
233 * Inputs 233 * Inputs
234 * (endpoint, asoc, chunk) 234 * (endpoint, asoc, chunk)
@@ -256,7 +256,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
256 /* 6.10 Bundling 256 /* 6.10 Bundling
257 * An endpoint MUST NOT bundle INIT, INIT ACK or 257 * An endpoint MUST NOT bundle INIT, INIT ACK or
258 * SHUTDOWN COMPLETE with any other chunks. 258 * SHUTDOWN COMPLETE with any other chunks.
259 * 259 *
260 * IG Section 2.11.2 260 * IG Section 2.11.2
261 * Furthermore, we require that the receiver of an INIT chunk MUST 261 * Furthermore, we require that the receiver of an INIT chunk MUST
262 * enforce these rules by silently discarding an arriving packet 262 * enforce these rules by silently discarding an arriving packet
@@ -282,7 +282,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
282 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 282 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
283 283
284 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 284 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
285 * Tag. 285 * Tag.
286 */ 286 */
287 if (chunk->sctp_hdr->vtag != 0) 287 if (chunk->sctp_hdr->vtag != 0)
288 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 288 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
@@ -326,7 +326,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
326 } 326 }
327 } 327 }
328 328
329 /* Grab the INIT header. */ 329 /* Grab the INIT header. */
330 chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data; 330 chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data;
331 331
332 /* Tag the variable length parameters. */ 332 /* Tag the variable length parameters. */
@@ -594,7 +594,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
594 /* "Decode" the chunk. We have no optional parameters so we 594 /* "Decode" the chunk. We have no optional parameters so we
595 * are in good shape. 595 * are in good shape.
596 */ 596 */
597 chunk->subh.cookie_hdr = 597 chunk->subh.cookie_hdr =
598 (struct sctp_signed_cookie *)chunk->skb->data; 598 (struct sctp_signed_cookie *)chunk->skb->data;
599 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - 599 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
600 sizeof(sctp_chunkhdr_t))) 600 sizeof(sctp_chunkhdr_t)))
@@ -665,7 +665,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
665 if (!ev) 665 if (!ev)
666 goto nomem_ev; 666 goto nomem_ev;
667 667
668 /* Sockets API Draft Section 5.3.1.6 668 /* Sockets API Draft Section 5.3.1.6
669 * When a peer sends a Adaptation Layer Indication parameter , SCTP 669 * When a peer sends a Adaptation Layer Indication parameter , SCTP
670 * delivers this notification to inform the application that of the 670 * delivers this notification to inform the application that of the
671 * peers requested adaptation layer. 671 * peers requested adaptation layer.
@@ -891,7 +891,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
891 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, 891 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
892 SCTP_TRANSPORT(transport)); 892 SCTP_TRANSPORT(transport));
893 893
894 return SCTP_DISPOSITION_CONSUME; 894 return SCTP_DISPOSITION_CONSUME;
895} 895}
896 896
897/* 897/*
@@ -1280,7 +1280,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1280 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 1280 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
1281 1281
1282 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 1282 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
1283 * Tag. 1283 * Tag.
1284 */ 1284 */
1285 if (chunk->sctp_hdr->vtag != 0) 1285 if (chunk->sctp_hdr->vtag != 0)
1286 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 1286 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
@@ -1548,7 +1548,7 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
1548 /* Per the above section, we'll discard the chunk if we have an 1548 /* Per the above section, we'll discard the chunk if we have an
1549 * endpoint. If this is an OOTB INIT-ACK, treat it as such. 1549 * endpoint. If this is an OOTB INIT-ACK, treat it as such.
1550 */ 1550 */
1551 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 1551 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
1552 return sctp_sf_ootb(ep, asoc, type, arg, commands); 1552 return sctp_sf_ootb(ep, asoc, type, arg, commands);
1553 else 1553 else
1554 return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); 1554 return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
@@ -1760,9 +1760,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
1760 1760
1761 /* Clarification from Implementor's Guide: 1761 /* Clarification from Implementor's Guide:
1762 * D) When both local and remote tags match the endpoint should 1762 * D) When both local and remote tags match the endpoint should
1763 * enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state. 1763 * enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
1764 * It should stop any cookie timer that may be running and send 1764 * It should stop any cookie timer that may be running and send
1765 * a COOKIE ACK. 1765 * a COOKIE ACK.
1766 */ 1766 */
1767 1767
1768 /* Don't accidentally move back into established state. */ 1768 /* Don't accidentally move back into established state. */
@@ -1786,7 +1786,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
1786 SCTP_COMM_UP, 0, 1786 SCTP_COMM_UP, 0,
1787 asoc->c.sinit_num_ostreams, 1787 asoc->c.sinit_num_ostreams,
1788 asoc->c.sinit_max_instreams, 1788 asoc->c.sinit_max_instreams,
1789 GFP_ATOMIC); 1789 GFP_ATOMIC);
1790 if (!ev) 1790 if (!ev)
1791 goto nomem; 1791 goto nomem;
1792 1792
@@ -1870,7 +1870,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
1870 /* "Decode" the chunk. We have no optional parameters so we 1870 /* "Decode" the chunk. We have no optional parameters so we
1871 * are in good shape. 1871 * are in good shape.
1872 */ 1872 */
1873 chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; 1873 chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
1874 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - 1874 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
1875 sizeof(sctp_chunkhdr_t))) 1875 sizeof(sctp_chunkhdr_t)))
1876 goto nomem; 1876 goto nomem;
@@ -1936,7 +1936,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
1936 default: /* Discard packet for all others. */ 1936 default: /* Discard packet for all others. */
1937 retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands); 1937 retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
1938 break; 1938 break;
1939 }; 1939 };
1940 1940
1941 /* Delete the tempory new association. */ 1941 /* Delete the tempory new association. */
1942 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 1942 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
@@ -2083,7 +2083,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
2083 */ 2083 */
2084 sctp_walk_errors(err, chunk->chunk_hdr) { 2084 sctp_walk_errors(err, chunk->chunk_hdr) {
2085 if (SCTP_ERROR_STALE_COOKIE == err->cause) 2085 if (SCTP_ERROR_STALE_COOKIE == err->cause)
2086 return sctp_sf_do_5_2_6_stale(ep, asoc, type, 2086 return sctp_sf_do_5_2_6_stale(ep, asoc, type,
2087 arg, commands); 2087 arg, commands);
2088 } 2088 }
2089 2089
@@ -2185,10 +2185,10 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
2185 */ 2185 */
2186 sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL()); 2186 sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
2187 2187
2188 /* If we've sent any data bundled with COOKIE-ECHO we will need to 2188 /* If we've sent any data bundled with COOKIE-ECHO we will need to
2189 * resend 2189 * resend
2190 */ 2190 */
2191 sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, 2191 sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN,
2192 SCTP_TRANSPORT(asoc->peer.primary_path)); 2192 SCTP_TRANSPORT(asoc->peer.primary_path));
2193 2193
2194 /* Cast away the const modifier, as we want to just 2194 /* Cast away the const modifier, as we want to just
@@ -2274,7 +2274,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2274 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2274 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2275 2275
2276 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); 2276 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2277 /* ASSOC_FAILED will DELETE_TCB. */ 2277 /* ASSOC_FAILED will DELETE_TCB. */
2278 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error)); 2278 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
2279 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 2279 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
2280 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 2280 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -2439,7 +2439,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
2439 ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC); 2439 ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
2440 if (!ev) { 2440 if (!ev) {
2441 disposition = SCTP_DISPOSITION_NOMEM; 2441 disposition = SCTP_DISPOSITION_NOMEM;
2442 goto out; 2442 goto out;
2443 } 2443 }
2444 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 2444 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
2445 2445
@@ -2553,7 +2553,7 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
2553 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) 2553 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
2554 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2554 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
2555 commands); 2555 commands);
2556 2556
2557 cwr = (sctp_cwrhdr_t *) chunk->skb->data; 2557 cwr = (sctp_cwrhdr_t *) chunk->skb->data;
2558 skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t)); 2558 skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t));
2559 2559
@@ -2661,7 +2661,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2661 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 2661 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
2662 SCTP_NULL()); 2662 SCTP_NULL());
2663 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2663 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
2664 } 2664 }
2665 2665
2666 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) 2666 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
2667 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2667 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
@@ -2743,7 +2743,7 @@ discard_noforce:
2743 return SCTP_DISPOSITION_DISCARD; 2743 return SCTP_DISPOSITION_DISCARD;
2744consume: 2744consume:
2745 return SCTP_DISPOSITION_CONSUME; 2745 return SCTP_DISPOSITION_CONSUME;
2746 2746
2747} 2747}
2748 2748
2749/* 2749/*
@@ -2930,7 +2930,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
2930 /* Make an ABORT. The T bit will be set if the asoc 2930 /* Make an ABORT. The T bit will be set if the asoc
2931 * is NULL. 2931 * is NULL.
2932 */ 2932 */
2933 abort = sctp_make_abort(asoc, chunk, 0); 2933 abort = sctp_make_abort(asoc, chunk, 0);
2934 if (!abort) { 2934 if (!abort) {
2935 sctp_ootb_pkt_free(packet); 2935 sctp_ootb_pkt_free(packet);
2936 return SCTP_DISPOSITION_NOMEM; 2936 return SCTP_DISPOSITION_NOMEM;
@@ -2994,7 +2994,7 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
2994 } 2994 }
2995 2995
2996 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 2996 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
2997 SCTP_CHUNK(chunk)); 2997 SCTP_CHUNK(chunk));
2998 } 2998 }
2999 return SCTP_DISPOSITION_CONSUME; 2999 return SCTP_DISPOSITION_CONSUME;
3000 3000
@@ -3128,7 +3128,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3128 */ 3128 */
3129 if (SCTP_CID_ABORT == ch->type) 3129 if (SCTP_CID_ABORT == ch->type)
3130 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3130 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3131 3131
3132 ch = (sctp_chunkhdr_t *) ch_end; 3132 ch = (sctp_chunkhdr_t *) ch_end;
3133 } while (ch_end < skb->tail); 3133 } while (ch_end < skb->tail);
3134 3134
@@ -3175,8 +3175,8 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
3175 3175
3176 if (packet) { 3176 if (packet) {
3177 /* Make an SHUTDOWN_COMPLETE. 3177 /* Make an SHUTDOWN_COMPLETE.
3178 * The T bit will be set if the asoc is NULL. 3178 * The T bit will be set if the asoc is NULL.
3179 */ 3179 */
3180 shut = sctp_make_shutdown_complete(asoc, chunk); 3180 shut = sctp_make_shutdown_complete(asoc, chunk);
3181 if (!shut) { 3181 if (!shut) {
3182 sctp_ootb_pkt_free(packet); 3182 sctp_ootb_pkt_free(packet);
@@ -3261,10 +3261,10 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3261 3261
3262 /* ADDIP 4.2 C1) Compare the value of the serial number to the value 3262 /* ADDIP 4.2 C1) Compare the value of the serial number to the value
3263 * the endpoint stored in a new association variable 3263 * the endpoint stored in a new association variable
3264 * 'Peer-Serial-Number'. 3264 * 'Peer-Serial-Number'.
3265 */ 3265 */
3266 if (serial == asoc->peer.addip_serial + 1) { 3266 if (serial == asoc->peer.addip_serial + 1) {
3267 /* ADDIP 4.2 C2) If the value found in the serial number is 3267 /* ADDIP 4.2 C2) If the value found in the serial number is
3268 * equal to the ('Peer-Serial-Number' + 1), the endpoint MUST 3268 * equal to the ('Peer-Serial-Number' + 1), the endpoint MUST
3269 * do V1-V5. 3269 * do V1-V5.
3270 */ 3270 */
@@ -3285,9 +3285,9 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3285 else 3285 else
3286 return SCTP_DISPOSITION_DISCARD; 3286 return SCTP_DISPOSITION_DISCARD;
3287 } else { 3287 } else {
3288 /* ADDIP 4.2 C4) Otherwise, the ASCONF Chunk is discarded since 3288 /* ADDIP 4.2 C4) Otherwise, the ASCONF Chunk is discarded since
3289 * it must be either a stale packet or from an attacker. 3289 * it must be either a stale packet or from an attacker.
3290 */ 3290 */
3291 return SCTP_DISPOSITION_DISCARD; 3291 return SCTP_DISPOSITION_DISCARD;
3292 } 3292 }
3293 3293
@@ -3296,7 +3296,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3296 * being responded to. 3296 * being responded to.
3297 */ 3297 */
3298 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); 3298 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
3299 3299
3300 return SCTP_DISPOSITION_CONSUME; 3300 return SCTP_DISPOSITION_CONSUME;
3301} 3301}
3302 3302
@@ -3307,7 +3307,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3307 */ 3307 */
3308sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, 3308sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3309 const struct sctp_association *asoc, 3309 const struct sctp_association *asoc,
3310 const sctp_subtype_t type, void *arg, 3310 const sctp_subtype_t type, void *arg,
3311 sctp_cmd_seq_t *commands) 3311 sctp_cmd_seq_t *commands)
3312{ 3312{
3313 struct sctp_chunk *asconf_ack = arg; 3313 struct sctp_chunk *asconf_ack = arg;
@@ -3359,7 +3359,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3359 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3359 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3360 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 3360 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
3361 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3361 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3362 SCTP_ERROR(ECONNABORTED)); 3362 SCTP_ERROR(ECONNABORTED));
3363 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3363 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3364 SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); 3364 SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
3365 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3365 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3387,7 +3387,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3387 */ 3387 */
3388 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 3388 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
3389 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3389 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3390 SCTP_ERROR(ECONNABORTED)); 3390 SCTP_ERROR(ECONNABORTED));
3391 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3391 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3392 SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); 3392 SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
3393 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3393 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3451,17 +3451,17 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
3451 3451
3452 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); 3452 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
3453 if (len > sizeof(struct sctp_fwdtsn_hdr)) 3453 if (len > sizeof(struct sctp_fwdtsn_hdr))
3454 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, 3454 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
3455 SCTP_CHUNK(chunk)); 3455 SCTP_CHUNK(chunk));
3456 3456
3457 /* Count this as receiving DATA. */ 3457 /* Count this as receiving DATA. */
3458 if (asoc->autoclose) { 3458 if (asoc->autoclose) {
3459 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 3459 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
3460 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 3460 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
3461 } 3461 }
3462 3462
3463 /* FIXME: For now send a SACK, but DATA processing may 3463 /* FIXME: For now send a SACK, but DATA processing may
3464 * send another. 3464 * send another.
3465 */ 3465 */
3466 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); 3466 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
3467 3467
@@ -3511,9 +3511,9 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
3511 3511
3512 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); 3512 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
3513 if (len > sizeof(struct sctp_fwdtsn_hdr)) 3513 if (len > sizeof(struct sctp_fwdtsn_hdr))
3514 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, 3514 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
3515 SCTP_CHUNK(chunk)); 3515 SCTP_CHUNK(chunk));
3516 3516
3517 /* Go a head and force a SACK, since we are shutting down. */ 3517 /* Go a head and force a SACK, since we are shutting down. */
3518gen_shutdown: 3518gen_shutdown:
3519 /* Implementor's Guide. 3519 /* Implementor's Guide.
@@ -3527,7 +3527,7 @@ gen_shutdown:
3527 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 3527 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
3528 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); 3528 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
3529 3529
3530 return SCTP_DISPOSITION_CONSUME; 3530 return SCTP_DISPOSITION_CONSUME;
3531} 3531}
3532 3532
3533/* 3533/*
@@ -3706,7 +3706,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
3706 * if it's length is set to be smaller then the size of sctp_sack_chunk_t. 3706 * if it's length is set to be smaller then the size of sctp_sack_chunk_t.
3707 * 3707 *
3708 * We inform the other end by sending an ABORT with a Protocol Violation 3708 * We inform the other end by sending an ABORT with a Protocol Violation
3709 * error code. 3709 * error code.
3710 * 3710 *
3711 * Section: Not specified 3711 * Section: Not specified
3712 * Verification Tag: Nothing to do 3712 * Verification Tag: Nothing to do
@@ -3747,7 +3747,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
3747 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 3747 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
3748 } else { 3748 } else {
3749 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3749 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3750 SCTP_ERROR(ECONNABORTED)); 3750 SCTP_ERROR(ECONNABORTED));
3751 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3751 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3752 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 3752 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
3753 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3753 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -3756,7 +3756,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
3756 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 3756 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
3757 3757
3758 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3758 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
3759 3759
3760 return SCTP_DISPOSITION_ABORT; 3760 return SCTP_DISPOSITION_ABORT;
3761 3761
3762nomem: 3762nomem:
@@ -4437,7 +4437,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
4437 /* sctp-implguide 2.10 Issues with Heartbeating and failover 4437 /* sctp-implguide 2.10 Issues with Heartbeating and failover
4438 * 4438 *
4439 * HEARTBEAT ... is discontinued after sending either SHUTDOWN 4439 * HEARTBEAT ... is discontinued after sending either SHUTDOWN
4440 * or SHUTDOWN-ACK. 4440 * or SHUTDOWN-ACK.
4441 */ 4441 */
4442 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); 4442 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
4443 4443
@@ -4515,7 +4515,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
4515 /* sctp-implguide 2.10 Issues with Heartbeating and failover 4515 /* sctp-implguide 2.10 Issues with Heartbeating and failover
4516 * 4516 *
4517 * HEARTBEAT ... is discontinued after sending either SHUTDOWN 4517 * HEARTBEAT ... is discontinued after sending either SHUTDOWN
4518 * or SHUTDOWN-ACK. 4518 * or SHUTDOWN-ACK.
4519 */ 4519 */
4520 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); 4520 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
4521 4521
@@ -4874,7 +4874,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
4874 /* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible 4874 /* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible
4875 * choose an alternate destination address (please refer to RFC2960 4875 * choose an alternate destination address (please refer to RFC2960
4876 * [5] section 6.4.1). An endpoint MUST NOT add new parameters to this 4876 * [5] section 6.4.1). An endpoint MUST NOT add new parameters to this
4877 * chunk, it MUST be the same (including its serial number) as the last 4877 * chunk, it MUST be the same (including its serial number) as the last
4878 * ASCONF sent. 4878 * ASCONF sent.
4879 */ 4879 */
4880 sctp_chunk_hold(asoc->addip_last_asconf); 4880 sctp_chunk_hold(asoc->addip_last_asconf);
@@ -4953,7 +4953,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
4953 /* sctpimpguide-05 Section 2.12.2 4953 /* sctpimpguide-05 Section 2.12.2
4954 * The sender of the SHUTDOWN MAY also start an overall guard timer 4954 * The sender of the SHUTDOWN MAY also start an overall guard timer
4955 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. 4955 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
4956 */ 4956 */
4957 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 4957 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
4958 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 4958 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
4959 disposition = SCTP_DISPOSITION_CONSUME; 4959 disposition = SCTP_DISPOSITION_CONSUME;
@@ -5127,7 +5127,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
5127 vtag = ntohl(init->init_hdr.init_tag); 5127 vtag = ntohl(init->init_hdr.init_tag);
5128 break; 5128 break;
5129 } 5129 }
5130 default: 5130 default:
5131 vtag = ntohl(chunk->sctp_hdr->vtag); 5131 vtag = ntohl(chunk->sctp_hdr->vtag);
5132 break; 5132 break;
5133 } 5133 }
@@ -5176,7 +5176,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
5176 /* Override the OOTB vtag from the cookie. */ 5176 /* Override the OOTB vtag from the cookie. */
5177 cookie = chunk->subh.cookie_hdr; 5177 cookie = chunk->subh.cookie_hdr;
5178 packet->vtag = cookie->c.peer_vtag; 5178 packet->vtag = cookie->c.peer_vtag;
5179 5179
5180 /* Set the skb to the belonging sock for accounting. */ 5180 /* Set the skb to the belonging sock for accounting. */
5181 err_chunk->skb->sk = ep->base.sk; 5181 err_chunk->skb->sk = ep->base.sk;
5182 sctp_packet_append_chunk(packet, err_chunk); 5182 sctp_packet_append_chunk(packet, err_chunk);
@@ -5310,7 +5310,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5310 sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL()); 5310 sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
5311 } 5311 }
5312 5312
5313 /* Spill over rwnd a little bit. Note: While allowed, this spill over 5313 /* Spill over rwnd a little bit. Note: While allowed, this spill over
5314 * seems a bit troublesome in that frag_point varies based on 5314 * seems a bit troublesome in that frag_point varies based on
5315 * PMTU. In cases, such as loopback, this might be a rather 5315 * PMTU. In cases, such as loopback, this might be a rather
5316 * large spill over. 5316 * large spill over.
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 5f6cc7aa661b..5e54b17377f4 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -954,7 +954,7 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S
954 TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, 954 TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
955}; 955};
956 956
957static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, 957static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
958 sctp_state_t state) 958 sctp_state_t state)
959{ 959{
960 if (state > SCTP_STATE_MAX) 960 if (state > SCTP_STATE_MAX)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 388d0fb1a377..536298c2eda2 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -381,12 +381,12 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
381 381
382 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 382 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
383 * 383 *
384 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 384 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
385 * at any one time. If a sender, after sending an ASCONF chunk, decides 385 * at any one time. If a sender, after sending an ASCONF chunk, decides
386 * it needs to transfer another ASCONF Chunk, it MUST wait until the 386 * it needs to transfer another ASCONF Chunk, it MUST wait until the
387 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 387 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
388 * subsequent ASCONF. Note this restriction binds each side, so at any 388 * subsequent ASCONF. Note this restriction binds each side, so at any
389 * time two ASCONF may be in-transit on any given association (one sent 389 * time two ASCONF may be in-transit on any given association (one sent
390 * from each endpoint). 390 * from each endpoint).
391 */ 391 */
392static int sctp_send_asconf(struct sctp_association *asoc, 392static int sctp_send_asconf(struct sctp_association *asoc,
@@ -396,10 +396,10 @@ static int sctp_send_asconf(struct sctp_association *asoc,
396 396
397 /* If there is an outstanding ASCONF chunk, queue it for later 397 /* If there is an outstanding ASCONF chunk, queue it for later
398 * transmission. 398 * transmission.
399 */ 399 */
400 if (asoc->addip_last_asconf) { 400 if (asoc->addip_last_asconf) {
401 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 401 list_add_tail(&chunk->list, &asoc->addip_chunk_list);
402 goto out; 402 goto out;
403 } 403 }
404 404
405 /* Hold the chunk until an ASCONF_ACK is received. */ 405 /* Hold the chunk until an ASCONF_ACK is received. */
@@ -449,7 +449,7 @@ int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
449 goto err_bindx_add; 449 goto err_bindx_add;
450 } 450 }
451 451
452 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 452 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
453 af->sockaddr_len); 453 af->sockaddr_len);
454 454
455 addr_buf += af->sockaddr_len; 455 addr_buf += af->sockaddr_len;
@@ -470,13 +470,13 @@ err_bindx_add:
470 * associations that are part of the endpoint indicating that a list of local 470 * associations that are part of the endpoint indicating that a list of local
471 * addresses are added to the endpoint. 471 * addresses are added to the endpoint.
472 * 472 *
473 * If any of the addresses is already in the bind address list of the 473 * If any of the addresses is already in the bind address list of the
474 * association, we do not send the chunk for that association. But it will not 474 * association, we do not send the chunk for that association. But it will not
475 * affect other associations. 475 * affect other associations.
476 * 476 *
477 * Only sctp_setsockopt_bindx() is supposed to call this function. 477 * Only sctp_setsockopt_bindx() is supposed to call this function.
478 */ 478 */
479static int sctp_send_asconf_add_ip(struct sock *sk, 479static int sctp_send_asconf_add_ip(struct sock *sk,
480 struct sockaddr *addrs, 480 struct sockaddr *addrs,
481 int addrcnt) 481 int addrcnt)
482{ 482{
@@ -517,8 +517,8 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
517 continue; 517 continue;
518 518
519 /* Check if any address in the packed array of addresses is 519 /* Check if any address in the packed array of addresses is
520 * in the bind address list of the association. If so, 520 * in the bind address list of the association. If so,
521 * do not send the asconf chunk to its peer, but continue with 521 * do not send the asconf chunk to its peer, but continue with
522 * other associations. 522 * other associations.
523 */ 523 */
524 addr_buf = addrs; 524 addr_buf = addrs;
@@ -664,7 +664,7 @@ err_bindx_rem:
664 * the associations that are part of the endpoint indicating that a list of 664 * the associations that are part of the endpoint indicating that a list of
665 * local addresses are removed from the endpoint. 665 * local addresses are removed from the endpoint.
666 * 666 *
667 * If any of the addresses is already in the bind address list of the 667 * If any of the addresses is already in the bind address list of the
668 * association, we do not send the chunk for that association. But it will not 668 * association, we do not send the chunk for that association. But it will not
669 * affect other associations. 669 * affect other associations.
670 * 670 *
@@ -710,7 +710,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
710 continue; 710 continue;
711 711
712 /* Check if any address in the packed array of addresses is 712 /* Check if any address in the packed array of addresses is
713 * not present in the bind address list of the association. 713 * not present in the bind address list of the association.
714 * If so, do not send the asconf chunk to its peer, but 714 * If so, do not send the asconf chunk to its peer, but
715 * continue with other associations. 715 * continue with other associations.
716 */ 716 */
@@ -898,7 +898,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
898 return -EFAULT; 898 return -EFAULT;
899 } 899 }
900 900
901 /* Walk through the addrs buffer and count the number of addresses. */ 901 /* Walk through the addrs buffer and count the number of addresses. */
902 addr_buf = kaddrs; 902 addr_buf = kaddrs;
903 while (walk_size < addrs_size) { 903 while (walk_size < addrs_size) {
904 sa_addr = (struct sockaddr *)addr_buf; 904 sa_addr = (struct sockaddr *)addr_buf;
@@ -906,7 +906,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
906 906
907 /* If the address family is not supported or if this address 907 /* If the address family is not supported or if this address
908 * causes the address buffer to overflow return EINVAL. 908 * causes the address buffer to overflow return EINVAL.
909 */ 909 */
910 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 910 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
911 kfree(kaddrs); 911 kfree(kaddrs);
912 return -EINVAL; 912 return -EINVAL;
@@ -935,7 +935,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
935 default: 935 default:
936 err = -EINVAL; 936 err = -EINVAL;
937 break; 937 break;
938 }; 938 };
939 939
940out: 940out:
941 kfree(kaddrs); 941 kfree(kaddrs);
@@ -1035,10 +1035,10 @@ static int __sctp_connect(struct sock* sk,
1035 } 1035 }
1036 } else { 1036 } else {
1037 /* 1037 /*
1038 * If an unprivileged user inherits a 1-many 1038 * If an unprivileged user inherits a 1-many
1039 * style socket with open associations on a 1039 * style socket with open associations on a
1040 * privileged port, it MAY be permitted to 1040 * privileged port, it MAY be permitted to
1041 * accept new associations, but it SHOULD NOT 1041 * accept new associations, but it SHOULD NOT
1042 * be permitted to open new associations. 1042 * be permitted to open new associations.
1043 */ 1043 */
1044 if (ep->base.bind_addr.port < PROT_SOCK && 1044 if (ep->base.bind_addr.port < PROT_SOCK &&
@@ -1094,8 +1094,8 @@ static int __sctp_connect(struct sock* sk,
1094out_free: 1094out_free:
1095 1095
1096 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" 1096 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
1097 " kaddrs: %p err: %d\n", 1097 " kaddrs: %p err: %d\n",
1098 asoc, kaddrs, err); 1098 asoc, kaddrs, err);
1099 if (asoc) 1099 if (asoc)
1100 sctp_association_free(asoc); 1100 sctp_association_free(asoc);
1101 return err; 1101 return err;
@@ -1435,7 +1435,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1435 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1435 * length messages when SCTP_EOF|SCTP_ABORT is not set.
1436 * If SCTP_ABORT is set, the message length could be non zero with 1436 * If SCTP_ABORT is set, the message length could be non zero with
1437 * the msg_iov set to the user abort reason. 1437 * the msg_iov set to the user abort reason.
1438 */ 1438 */
1439 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1439 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
1440 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1440 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
1441 err = -EINVAL; 1441 err = -EINVAL;
@@ -1599,7 +1599,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1599 = sinit->sinit_max_attempts; 1599 = sinit->sinit_max_attempts;
1600 } 1600 }
1601 if (sinit->sinit_max_init_timeo) { 1601 if (sinit->sinit_max_init_timeo) {
1602 asoc->max_init_timeo = 1602 asoc->max_init_timeo =
1603 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1603 msecs_to_jiffies(sinit->sinit_max_init_timeo);
1604 } 1604 }
1605 } 1605 }
@@ -2298,7 +2298,7 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
2298 /* Get association, if assoc_id != 0 and the socket is a one 2298 /* Get association, if assoc_id != 0 and the socket is a one
2299 * to many style socket, and an association was not found, then 2299 * to many style socket, and an association was not found, then
2300 * the id was invalid. 2300 * the id was invalid.
2301 */ 2301 */
2302 asoc = sctp_id2assoc(sk, params.assoc_id); 2302 asoc = sctp_id2assoc(sk, params.assoc_id);
2303 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 2303 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
2304 return -EINVAL; 2304 return -EINVAL;
@@ -2307,22 +2307,22 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
2307 if (asoc) { 2307 if (asoc) {
2308 asoc->sackdelay = 2308 asoc->sackdelay =
2309 msecs_to_jiffies(params.assoc_value); 2309 msecs_to_jiffies(params.assoc_value);
2310 asoc->param_flags = 2310 asoc->param_flags =
2311 (asoc->param_flags & ~SPP_SACKDELAY) | 2311 (asoc->param_flags & ~SPP_SACKDELAY) |
2312 SPP_SACKDELAY_ENABLE; 2312 SPP_SACKDELAY_ENABLE;
2313 } else { 2313 } else {
2314 sp->sackdelay = params.assoc_value; 2314 sp->sackdelay = params.assoc_value;
2315 sp->param_flags = 2315 sp->param_flags =
2316 (sp->param_flags & ~SPP_SACKDELAY) | 2316 (sp->param_flags & ~SPP_SACKDELAY) |
2317 SPP_SACKDELAY_ENABLE; 2317 SPP_SACKDELAY_ENABLE;
2318 } 2318 }
2319 } else { 2319 } else {
2320 if (asoc) { 2320 if (asoc) {
2321 asoc->param_flags = 2321 asoc->param_flags =
2322 (asoc->param_flags & ~SPP_SACKDELAY) | 2322 (asoc->param_flags & ~SPP_SACKDELAY) |
2323 SPP_SACKDELAY_DISABLE; 2323 SPP_SACKDELAY_DISABLE;
2324 } else { 2324 } else {
2325 sp->param_flags = 2325 sp->param_flags =
2326 (sp->param_flags & ~SPP_SACKDELAY) | 2326 (sp->param_flags & ~SPP_SACKDELAY) |
2327 SPP_SACKDELAY_DISABLE; 2327 SPP_SACKDELAY_DISABLE;
2328 } 2328 }
@@ -2338,17 +2338,17 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
2338 if (params.assoc_value) { 2338 if (params.assoc_value) {
2339 trans->sackdelay = 2339 trans->sackdelay =
2340 msecs_to_jiffies(params.assoc_value); 2340 msecs_to_jiffies(params.assoc_value);
2341 trans->param_flags = 2341 trans->param_flags =
2342 (trans->param_flags & ~SPP_SACKDELAY) | 2342 (trans->param_flags & ~SPP_SACKDELAY) |
2343 SPP_SACKDELAY_ENABLE; 2343 SPP_SACKDELAY_ENABLE;
2344 } else { 2344 } else {
2345 trans->param_flags = 2345 trans->param_flags =
2346 (trans->param_flags & ~SPP_SACKDELAY) | 2346 (trans->param_flags & ~SPP_SACKDELAY) |
2347 SPP_SACKDELAY_DISABLE; 2347 SPP_SACKDELAY_DISABLE;
2348 } 2348 }
2349 } 2349 }
2350 } 2350 }
2351 2351
2352 return 0; 2352 return 0;
2353} 2353}
2354 2354
@@ -2374,13 +2374,13 @@ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int opt
2374 return -EFAULT; 2374 return -EFAULT;
2375 2375
2376 if (sinit.sinit_num_ostreams) 2376 if (sinit.sinit_num_ostreams)
2377 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2377 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
2378 if (sinit.sinit_max_instreams) 2378 if (sinit.sinit_max_instreams)
2379 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2379 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
2380 if (sinit.sinit_max_attempts) 2380 if (sinit.sinit_max_attempts)
2381 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2381 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
2382 if (sinit.sinit_max_init_timeo) 2382 if (sinit.sinit_max_init_timeo)
2383 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2383 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
2384 2384
2385 return 0; 2385 return 0;
2386} 2386}
@@ -2511,7 +2511,7 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int opt
2511 2511
2512 if (asoc) { 2512 if (asoc) {
2513 if (rtoinfo.srto_initial != 0) 2513 if (rtoinfo.srto_initial != 0)
2514 asoc->rto_initial = 2514 asoc->rto_initial =
2515 msecs_to_jiffies(rtoinfo.srto_initial); 2515 msecs_to_jiffies(rtoinfo.srto_initial);
2516 if (rtoinfo.srto_max != 0) 2516 if (rtoinfo.srto_max != 0)
2517 asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); 2517 asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max);
@@ -2665,7 +2665,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl
2665 /* Update the frag_point of the existing associations. */ 2665 /* Update the frag_point of the existing associations. */
2666 list_for_each(pos, &(sp->ep->asocs)) { 2666 list_for_each(pos, &(sp->ep->asocs)) {
2667 asoc = list_entry(pos, struct sctp_association, asocs); 2667 asoc = list_entry(pos, struct sctp_association, asocs);
2668 asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); 2668 asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu);
2669 } 2669 }
2670 2670
2671 return 0; 2671 return 0;
@@ -2703,7 +2703,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
2703 return -EFAULT; 2703 return -EFAULT;
2704 2704
2705 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 2705 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
2706 if (!asoc) 2706 if (!asoc)
2707 return -EINVAL; 2707 return -EINVAL;
2708 2708
2709 if (!asoc->peer.asconf_capable) 2709 if (!asoc->peer.asconf_capable)
@@ -3015,7 +3015,7 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
3015 3015
3016out: 3016out:
3017 sctp_release_sock(sk); 3017 sctp_release_sock(sk);
3018 *err = error; 3018 *err = error;
3019 return newsk; 3019 return newsk;
3020} 3020}
3021 3021
@@ -3087,7 +3087,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3087 sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life; 3087 sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
3088 3088
3089 /* Initialize default event subscriptions. By default, all the 3089 /* Initialize default event subscriptions. By default, all the
3090 * options are off. 3090 * options are off.
3091 */ 3091 */
3092 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 3092 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
3093 3093
@@ -3099,8 +3099,8 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3099 sp->pathmtu = 0; // allow default discovery 3099 sp->pathmtu = 0; // allow default discovery
3100 sp->sackdelay = sctp_sack_timeout; 3100 sp->sackdelay = sctp_sack_timeout;
3101 sp->param_flags = SPP_HB_ENABLE | 3101 sp->param_flags = SPP_HB_ENABLE |
3102 SPP_PMTUD_ENABLE | 3102 SPP_PMTUD_ENABLE |
3103 SPP_SACKDELAY_ENABLE; 3103 SPP_SACKDELAY_ENABLE;
3104 3104
3105 /* If enabled no SCTP message fragmentation will be performed. 3105 /* If enabled no SCTP message fragmentation will be performed.
3106 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 3106 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
@@ -3680,7 +3680,7 @@ static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len,
3680 /* Get association, if assoc_id != 0 and the socket is a one 3680 /* Get association, if assoc_id != 0 and the socket is a one
3681 * to many style socket, and an association was not found, then 3681 * to many style socket, and an association was not found, then
3682 * the id was invalid. 3682 * the id was invalid.
3683 */ 3683 */
3684 asoc = sctp_id2assoc(sk, params.assoc_id); 3684 asoc = sctp_id2assoc(sk, params.assoc_id);
3685 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3685 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
3686 return -EINVAL; 3686 return -EINVAL;
@@ -3756,7 +3756,7 @@ static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len,
3756 return cnt; 3756 return cnt;
3757} 3757}
3758 3758
3759/* 3759/*
3760 * Old API for getting list of peer addresses. Does not work for 32-bit 3760 * Old API for getting list of peer addresses. Does not work for 32-bit
3761 * programs running on a 64-bit kernel 3761 * programs running on a 64-bit kernel
3762 */ 3762 */
@@ -3833,7 +3833,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
3833 return -EINVAL; 3833 return -EINVAL;
3834 3834
3835 to = optval + offsetof(struct sctp_getaddrs,addrs); 3835 to = optval + offsetof(struct sctp_getaddrs,addrs);
3836 space_left = len - sizeof(struct sctp_getaddrs) - 3836 space_left = len - sizeof(struct sctp_getaddrs) -
3837 offsetof(struct sctp_getaddrs,addrs); 3837 offsetof(struct sctp_getaddrs,addrs);
3838 3838
3839 list_for_each(pos, &asoc->peer.transport_addr_list) { 3839 list_for_each(pos, &asoc->peer.transport_addr_list) {
@@ -3907,7 +3907,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
3907 addr = list_entry(pos, 3907 addr = list_entry(pos,
3908 struct sctp_sockaddr_entry, 3908 struct sctp_sockaddr_entry,
3909 list); 3909 list);
3910 if ((PF_INET == sk->sk_family) && 3910 if ((PF_INET == sk->sk_family) &&
3911 (AF_INET6 == addr->a.sa.sa_family)) 3911 (AF_INET6 == addr->a.sa.sa_family))
3912 continue; 3912 continue;
3913 cnt++; 3913 cnt++;
@@ -3941,7 +3941,7 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
3941 3941
3942 list_for_each_safe(pos, next, &sctp_local_addr_list) { 3942 list_for_each_safe(pos, next, &sctp_local_addr_list) {
3943 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 3943 addr = list_entry(pos, struct sctp_sockaddr_entry, list);
3944 if ((PF_INET == sk->sk_family) && 3944 if ((PF_INET == sk->sk_family) &&
3945 (AF_INET6 == addr->a.sa.sa_family)) 3945 (AF_INET6 == addr->a.sa.sa_family))
3946 continue; 3946 continue;
3947 memcpy(&temp, &addr->a, sizeof(temp)); 3947 memcpy(&temp, &addr->a, sizeof(temp));
@@ -3970,7 +3970,7 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
3970 3970
3971 list_for_each_safe(pos, next, &sctp_local_addr_list) { 3971 list_for_each_safe(pos, next, &sctp_local_addr_list) {
3972 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 3972 addr = list_entry(pos, struct sctp_sockaddr_entry, list);
3973 if ((PF_INET == sk->sk_family) && 3973 if ((PF_INET == sk->sk_family) &&
3974 (AF_INET6 == addr->a.sa.sa_family)) 3974 (AF_INET6 == addr->a.sa.sa_family))
3975 continue; 3975 continue;
3976 memcpy(&temp, &addr->a, sizeof(temp)); 3976 memcpy(&temp, &addr->a, sizeof(temp));
@@ -4051,7 +4051,7 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4051 err = cnt; 4051 err = cnt;
4052 goto unlock; 4052 goto unlock;
4053 } 4053 }
4054 goto copy_getaddrs; 4054 goto copy_getaddrs;
4055 } 4055 }
4056 } 4056 }
4057 4057
@@ -4139,7 +4139,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4139 err = cnt; 4139 err = cnt;
4140 goto unlock; 4140 goto unlock;
4141 } 4141 }
4142 goto copy_getaddrs; 4142 goto copy_getaddrs;
4143 } 4143 }
4144 } 4144 }
4145 4145
@@ -4196,7 +4196,7 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
4196 4196
4197 if (!asoc->peer.primary_path) 4197 if (!asoc->peer.primary_path)
4198 return -ENOTCONN; 4198 return -ENOTCONN;
4199 4199
4200 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 4200 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
4201 asoc->peer.primary_path->af_specific->sockaddr_len); 4201 asoc->peer.primary_path->af_specific->sockaddr_len);
4202 4202
@@ -4864,7 +4864,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
4864 if (!backlog) { 4864 if (!backlog) {
4865 if (sctp_sstate(sk, CLOSED)) 4865 if (sctp_sstate(sk, CLOSED))
4866 return 0; 4866 return 0;
4867 4867
4868 sctp_unhash_endpoint(ep); 4868 sctp_unhash_endpoint(ep);
4869 sk->sk_state = SCTP_SS_CLOSED; 4869 sk->sk_state = SCTP_SS_CLOSED;
4870 } 4870 }
@@ -4872,7 +4872,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
4872 /* Return if we are already listening. */ 4872 /* Return if we are already listening. */
4873 if (sctp_sstate(sk, LISTENING)) 4873 if (sctp_sstate(sk, LISTENING))
4874 return 0; 4874 return 0;
4875 4875
4876 /* 4876 /*
4877 * If a bind() or sctp_bindx() is not called prior to a listen() 4877 * If a bind() or sctp_bindx() is not called prior to a listen()
4878 * call that allows new associations to be accepted, the system 4878 * call that allows new associations to be accepted, the system
@@ -4907,7 +4907,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
4907 if (!backlog) { 4907 if (!backlog) {
4908 if (sctp_sstate(sk, CLOSED)) 4908 if (sctp_sstate(sk, CLOSED))
4909 return 0; 4909 return 0;
4910 4910
4911 sctp_unhash_endpoint(ep); 4911 sctp_unhash_endpoint(ep);
4912 sk->sk_state = SCTP_SS_CLOSED; 4912 sk->sk_state = SCTP_SS_CLOSED;
4913 } 4913 }
@@ -5010,7 +5010,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
5010 */ 5010 */
5011 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 5011 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
5012 return (!list_empty(&sp->ep->asocs)) ? 5012 return (!list_empty(&sp->ep->asocs)) ?
5013 (POLLIN | POLLRDNORM) : 0; 5013 (POLLIN | POLLRDNORM) : 0;
5014 5014
5015 mask = 0; 5015 mask = 0;
5016 5016
@@ -5430,7 +5430,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
5430 DEFINE_WAIT(wait); 5430 DEFINE_WAIT(wait);
5431 5431
5432 SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n", 5432 SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n",
5433 asoc, (long)(*timeo_p), msg_len); 5433 asoc, (long)(*timeo_p), msg_len);
5434 5434
5435 /* Increment the association's refcnt. */ 5435 /* Increment the association's refcnt. */
5436 sctp_association_hold(asoc); 5436 sctp_association_hold(asoc);
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3e5936a5f671..a596f5308cb1 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -130,9 +130,9 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
130struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, 130struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
131 gfp_t gfp) 131 gfp_t gfp)
132{ 132{
133 struct sctp_transport *transport; 133 struct sctp_transport *transport;
134 134
135 transport = t_new(struct sctp_transport, gfp); 135 transport = t_new(struct sctp_transport, gfp);
136 if (!transport) 136 if (!transport)
137 goto fail; 137 goto fail;
138 138
@@ -185,7 +185,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
185 if (transport->asoc) 185 if (transport->asoc)
186 sctp_association_put(transport->asoc); 186 sctp_association_put(transport->asoc);
187 187
188 sctp_packet_free(&transport->packet); 188 sctp_packet_free(&transport->packet);
189 189
190 dst_release(transport->dst); 190 dst_release(transport->dst);
191 kfree(transport); 191 kfree(transport);
@@ -268,7 +268,7 @@ void sctp_transport_route(struct sctp_transport *transport,
268 268
269 /* Initialize sk->sk_rcv_saddr, if the transport is the 269 /* Initialize sk->sk_rcv_saddr, if the transport is the
270 * association's active path for getsockname(). 270 * association's active path for getsockname().
271 */ 271 */
272 if (asoc && (transport == asoc->peer.active_path)) 272 if (asoc && (transport == asoc->peer.active_path))
273 opt->pf->af->to_sk_saddr(&transport->saddr, 273 opt->pf->af->to_sk_saddr(&transport->saddr,
274 asoc->base.sk); 274 asoc->base.sk);
@@ -459,8 +459,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
459 * destination address(es) to which the missing DATA chunks 459 * destination address(es) to which the missing DATA chunks
460 * were last sent, according to the formula described in 460 * were last sent, according to the formula described in
461 * Section 7.2.3. 461 * Section 7.2.3.
462 * 462 *
463 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet 463 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
464 * losses from SACK (see Section 7.2.4), An endpoint 464 * losses from SACK (see Section 7.2.4), An endpoint
465 * should do the following: 465 * should do the following:
466 * ssthresh = max(cwnd/2, 4*MTU) 466 * ssthresh = max(cwnd/2, 4*MTU)
@@ -488,7 +488,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
488 if ((jiffies - transport->last_time_ecne_reduced) > 488 if ((jiffies - transport->last_time_ecne_reduced) >
489 transport->rtt) { 489 transport->rtt) {
490 transport->ssthresh = max(transport->cwnd/2, 490 transport->ssthresh = max(transport->cwnd/2,
491 4*transport->asoc->pathmtu); 491 4*transport->asoc->pathmtu);
492 transport->cwnd = transport->ssthresh; 492 transport->cwnd = transport->ssthresh;
493 transport->last_time_ecne_reduced = jiffies; 493 transport->last_time_ecne_reduced = jiffies;
494 } 494 }
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 42d9498c64fa..d3192a1babcc 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -277,7 +277,7 @@ static void sctp_tsnmap_update(struct sctp_tsnmap *map)
277 /* Now tsn_map must have been all '1's, 277 /* Now tsn_map must have been all '1's,
278 * so we swap the map and check the overflow table 278 * so we swap the map and check the overflow table
279 */ 279 */
280 __u8 *tmp = map->tsn_map; 280 __u8 *tmp = map->tsn_map;
281 memset(tmp, 0, map->len); 281 memset(tmp, 0, map->len);
282 map->tsn_map = map->overflow_map; 282 map->tsn_map = map->overflow_map;
283 map->overflow_map = tmp; 283 map->overflow_map = tmp;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 445e07a7ac4b..2e11bc8d5d35 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -749,7 +749,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
749 */ 749 */
750 pd->pdapi_length = sizeof(struct sctp_pdapi_event); 750 pd->pdapi_length = sizeof(struct sctp_pdapi_event);
751 751
752 /* pdapi_indication: 32 bits (unsigned integer) 752 /* pdapi_indication: 32 bits (unsigned integer)
753 * 753 *
754 * This field holds the indication being sent to the application. 754 * This field holds the indication being sent to the application.
755 */ 755 */
@@ -790,13 +790,13 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
790 return; 790 return;
791 791
792 /* Sockets API Extensions for SCTP 792 /* Sockets API Extensions for SCTP
793 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) 793 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
794 * 794 *
795 * sinfo_stream: 16 bits (unsigned integer) 795 * sinfo_stream: 16 bits (unsigned integer)
796 * 796 *
797 * For recvmsg() the SCTP stack places the message's stream number in 797 * For recvmsg() the SCTP stack places the message's stream number in
798 * this value. 798 * this value.
799 */ 799 */
800 sinfo.sinfo_stream = event->stream; 800 sinfo.sinfo_stream = event->stream;
801 /* sinfo_ssn: 16 bits (unsigned integer) 801 /* sinfo_ssn: 16 bits (unsigned integer)
802 * 802 *
@@ -828,7 +828,7 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
828 sinfo.sinfo_flags = event->flags; 828 sinfo.sinfo_flags = event->flags;
829 /* sinfo_tsn: 32 bit (unsigned integer) 829 /* sinfo_tsn: 32 bit (unsigned integer)
830 * 830 *
831 * For the receiving side, this field holds a TSN that was 831 * For the receiving side, this field holds a TSN that was
832 * assigned to one of the SCTP Data Chunks. 832 * assigned to one of the SCTP Data Chunks.
833 */ 833 */
834 sinfo.sinfo_tsn = event->tsn; 834 sinfo.sinfo_tsn = event->tsn;
@@ -879,7 +879,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
879 * fragment of the real event. However, we still need to do rwnd 879 * fragment of the real event. However, we still need to do rwnd
880 * accounting. 880 * accounting.
881 * In general, the skb passed from IP can have only 1 level of 881 * In general, the skb passed from IP can have only 1 level of
882 * fragments. But we allow multiple levels of fragments. 882 * fragments. But we allow multiple levels of fragments.
883 */ 883 */
884 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { 884 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
885 sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); 885 sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc);
@@ -888,7 +888,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
888 888
889/* Do accounting for bytes just read by user and release the references to 889/* Do accounting for bytes just read by user and release the references to
890 * the association. 890 * the association.
891 */ 891 */
892static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) 892static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
893{ 893{
894 struct sk_buff *skb, *frag; 894 struct sk_buff *skb, *frag;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index e1d144275f97..f4759a9bdaee 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -191,7 +191,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
191 queue = &sk->sk_receive_queue; 191 queue = &sk->sk_receive_queue;
192 } else if (ulpq->pd_mode) { 192 } else if (ulpq->pd_mode) {
193 if (event->msg_flags & MSG_NOTIFICATION) 193 if (event->msg_flags & MSG_NOTIFICATION)
194 queue = &sctp_sk(sk)->pd_lobby; 194 queue = &sctp_sk(sk)->pd_lobby;
195 else { 195 else {
196 clear_pd = event->msg_flags & MSG_EOR; 196 clear_pd = event->msg_flags & MSG_EOR;
197 queue = &sk->sk_receive_queue; 197 queue = &sk->sk_receive_queue;
@@ -298,32 +298,32 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
298 */ 298 */
299 if (last) 299 if (last)
300 last->next = pos; 300 last->next = pos;
301 else { 301 else {
302 if (skb_cloned(f_frag)) { 302 if (skb_cloned(f_frag)) {
303 /* This is a cloned skb, we can't just modify 303 /* This is a cloned skb, we can't just modify
304 * the frag_list. We need a new skb to do that. 304 * the frag_list. We need a new skb to do that.
305 * Instead of calling skb_unshare(), we'll do it 305 * Instead of calling skb_unshare(), we'll do it
306 * ourselves since we need to delay the free. 306 * ourselves since we need to delay the free.
307 */ 307 */
308 new = skb_copy(f_frag, GFP_ATOMIC); 308 new = skb_copy(f_frag, GFP_ATOMIC);
309 if (!new) 309 if (!new)
310 return NULL; /* try again later */ 310 return NULL; /* try again later */
311 311
312 sctp_skb_set_owner_r(new, f_frag->sk); 312 sctp_skb_set_owner_r(new, f_frag->sk);
313 313
314 skb_shinfo(new)->frag_list = pos; 314 skb_shinfo(new)->frag_list = pos;
315 } else 315 } else
316 skb_shinfo(f_frag)->frag_list = pos; 316 skb_shinfo(f_frag)->frag_list = pos;
317 } 317 }
318 318
319 /* Remove the first fragment from the reassembly queue. */ 319 /* Remove the first fragment from the reassembly queue. */
320 __skb_unlink(f_frag, queue); 320 __skb_unlink(f_frag, queue);
321 321
322 /* if we did unshare, then free the old skb and re-assign */ 322 /* if we did unshare, then free the old skb and re-assign */
323 if (new) { 323 if (new) {
324 kfree_skb(f_frag); 324 kfree_skb(f_frag);
325 f_frag = new; 325 f_frag = new;
326 } 326 }
327 327
328 while (pos) { 328 while (pos) {
329 329
@@ -335,7 +335,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
335 335
336 /* Remove the fragment from the reassembly queue. */ 336 /* Remove the fragment from the reassembly queue. */
337 __skb_unlink(pos, queue); 337 __skb_unlink(pos, queue);
338 338
339 /* Break if we have reached the last fragment. */ 339 /* Break if we have reached the last fragment. */
340 if (pos == l_frag) 340 if (pos == l_frag)
341 break; 341 break;
@@ -624,7 +624,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
624 624
625 sid = event->stream; 625 sid = event->stream;
626 ssn = event->ssn; 626 ssn = event->ssn;
627 627
628 cevent = (struct sctp_ulpevent *) pos->cb; 628 cevent = (struct sctp_ulpevent *) pos->cb;
629 csid = cevent->stream; 629 csid = cevent->stream;
630 cssn = cevent->ssn; 630 cssn = cevent->ssn;
@@ -718,11 +718,11 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
718 if (cssn != sctp_ssn_peek(in, csid)) 718 if (cssn != sctp_ssn_peek(in, csid))
719 break; 719 break;
720 720
721 /* Found it, so mark in the ssnmap. */ 721 /* Found it, so mark in the ssnmap. */
722 sctp_ssn_next(in, csid); 722 sctp_ssn_next(in, csid);
723 723
724 __skb_unlink(pos, &ulpq->lobby); 724 __skb_unlink(pos, &ulpq->lobby);
725 if (!event) { 725 if (!event) {
726 /* Create a temporary list to collect chunks on. */ 726 /* Create a temporary list to collect chunks on. */
727 event = sctp_skb2event(pos); 727 event = sctp_skb2event(pos);
728 __skb_queue_tail(&temp, sctp_event2skb(event)); 728 __skb_queue_tail(&temp, sctp_event2skb(event));
@@ -755,7 +755,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
755 sctp_ssn_skip(in, sid, ssn); 755 sctp_ssn_skip(in, sid, ssn);
756 756
757 /* Go find any other chunks that were waiting for 757 /* Go find any other chunks that were waiting for
758 * ordering and deliver them if needed. 758 * ordering and deliver them if needed.
759 */ 759 */
760 sctp_ulpq_reap_ordered(ulpq); 760 sctp_ulpq_reap_ordered(ulpq);
761 return; 761 return;
@@ -849,7 +849,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
849 if (chunk) { 849 if (chunk) {
850 needed = ntohs(chunk->chunk_hdr->length); 850 needed = ntohs(chunk->chunk_hdr->length);
851 needed -= sizeof(sctp_data_chunk_t); 851 needed -= sizeof(sctp_data_chunk_t);
852 } else 852 } else
853 needed = SCTP_DEFAULT_MAXWINDOW; 853 needed = SCTP_DEFAULT_MAXWINDOW;
854 854
855 freed = 0; 855 freed = 0;
@@ -866,7 +866,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
866 tsn = ntohl(chunk->subh.data_hdr->tsn); 866 tsn = ntohl(chunk->subh.data_hdr->tsn);
867 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn); 867 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
868 sctp_ulpq_tail_data(ulpq, chunk, gfp); 868 sctp_ulpq_tail_data(ulpq, chunk, gfp);
869 869
870 sctp_ulpq_partial_delivery(ulpq, chunk, gfp); 870 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
871 } 871 }
872 872
diff --git a/net/socket.c b/net/socket.c
index 5f374e1ff526..a92f59580234 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2180,7 +2180,7 @@ done:
2180} 2180}
2181 2181
2182int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, 2182int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
2183 int flags) 2183 int flags)
2184{ 2184{
2185 return sock->ops->connect(sock, addr, addrlen, flags); 2185 return sock->ops->connect(sock, addr, addrlen, flags);
2186} 2186}
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 993ff1a5d945..76f7eac4082d 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -213,7 +213,7 @@ retry:
213 rpcauth_gc_credcache(auth, &free); 213 rpcauth_gc_credcache(auth, &free);
214 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { 214 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) {
215 struct rpc_cred *entry; 215 struct rpc_cred *entry;
216 entry = hlist_entry(pos, struct rpc_cred, cr_hash); 216 entry = hlist_entry(pos, struct rpc_cred, cr_hash);
217 if (entry->cr_ops->crmatch(acred, entry, flags)) { 217 if (entry->cr_ops->crmatch(acred, entry, flags)) {
218 hlist_del(&entry->cr_hash); 218 hlist_del(&entry->cr_hash);
219 cred = entry; 219 cred = entry;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index e1a104abb782..718fb94ad0f7 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -2,7 +2,7 @@
2 * linux/net/sunrpc/auth_gss/auth_gss.c 2 * linux/net/sunrpc/auth_gss/auth_gss.c
3 * 3 *
4 * RPCSEC_GSS client authentication. 4 * RPCSEC_GSS client authentication.
5 * 5 *
6 * Copyright (c) 2000 The Regents of the University of Michigan. 6 * Copyright (c) 2000 The Regents of the University of Michigan.
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
@@ -74,7 +74,7 @@ static struct rpc_credops gss_credops;
74* as it is passed to gssd to signal the use of 74* as it is passed to gssd to signal the use of
75* machine creds should be part of the shared rpc interface */ 75* machine creds should be part of the shared rpc interface */
76 76
77#define CA_RUN_AS_MACHINE 0x00000200 77#define CA_RUN_AS_MACHINE 0x00000200
78 78
79/* dump the buffer in `emacs-hexl' style */ 79/* dump the buffer in `emacs-hexl' style */
80#define isprint(c) ((c > 0x1f) && (c < 0x7f)) 80#define isprint(c) ((c > 0x1f) && (c < 0x7f))
@@ -607,8 +607,8 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
607 } 607 }
608} 608}
609 609
610/* 610/*
611 * NOTE: we have the opportunity to use different 611 * NOTE: we have the opportunity to use different
612 * parameters based on the input flavor (which must be a pseudoflavor) 612 * parameters based on the input flavor (which must be a pseudoflavor)
613 */ 613 */
614static struct rpc_auth * 614static struct rpc_auth *
@@ -869,7 +869,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
869 869
870 flav = ntohl(*p++); 870 flav = ntohl(*p++);
871 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 871 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
872 goto out_bad; 872 goto out_bad;
873 if (flav != RPC_AUTH_GSS) 873 if (flav != RPC_AUTH_GSS)
874 goto out_bad; 874 goto out_bad;
875 seq = htonl(task->tk_rqstp->rq_seqno); 875 seq = htonl(task->tk_rqstp->rq_seqno);
@@ -925,7 +925,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
925 *integ_len = htonl(integ_buf.len); 925 *integ_len = htonl(integ_buf.len);
926 926
927 /* guess whether we're in the head or the tail: */ 927 /* guess whether we're in the head or the tail: */
928 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 928 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
929 iov = snd_buf->tail; 929 iov = snd_buf->tail;
930 else 930 else
931 iov = snd_buf->head; 931 iov = snd_buf->head;
@@ -1030,7 +1030,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1030 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1030 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1031 /* RPC_SLACK_SPACE should prevent this ever happening: */ 1031 /* RPC_SLACK_SPACE should prevent this ever happening: */
1032 BUG_ON(snd_buf->len > snd_buf->buflen); 1032 BUG_ON(snd_buf->len > snd_buf->buflen);
1033 status = -EIO; 1033 status = -EIO;
1034 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1034 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1035 * done anyway, so it's safe to put the request on the wire: */ 1035 * done anyway, so it's safe to put the request on the wire: */
1036 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1036 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
@@ -1079,7 +1079,7 @@ gss_wrap_req(struct rpc_task *task,
1079 status = gss_wrap_req_integ(cred, ctx, encode, 1079 status = gss_wrap_req_integ(cred, ctx, encode,
1080 rqstp, p, obj); 1080 rqstp, p, obj);
1081 break; 1081 break;
1082 case RPC_GSS_SVC_PRIVACY: 1082 case RPC_GSS_SVC_PRIVACY:
1083 status = gss_wrap_req_priv(cred, ctx, encode, 1083 status = gss_wrap_req_priv(cred, ctx, encode,
1084 rqstp, p, obj); 1084 rqstp, p, obj);
1085 break; 1085 break;
@@ -1179,7 +1179,7 @@ gss_unwrap_resp(struct rpc_task *task,
1179 if (status) 1179 if (status)
1180 goto out; 1180 goto out;
1181 break; 1181 break;
1182 case RPC_GSS_SVC_PRIVACY: 1182 case RPC_GSS_SVC_PRIVACY:
1183 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1183 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1184 if (status) 1184 if (status)
1185 goto out; 1185 goto out;
@@ -1196,7 +1196,7 @@ out:
1196 status); 1196 status);
1197 return status; 1197 return status;
1198} 1198}
1199 1199
1200static struct rpc_authops authgss_ops = { 1200static struct rpc_authops authgss_ops = {
1201 .owner = THIS_MODULE, 1201 .owner = THIS_MODULE,
1202 .au_flavor = RPC_AUTH_GSS, 1202 .au_flavor = RPC_AUTH_GSS,
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index 826df44e7fca..ea8c92ecdae5 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -11,7 +11,7 @@
11 11
12/* 12/*
13 * Copyright 1993 by OpenVision Technologies, Inc. 13 * Copyright 1993 by OpenVision Technologies, Inc.
14 * 14 *
15 * Permission to use, copy, modify, distribute, and sell this software 15 * Permission to use, copy, modify, distribute, and sell this software
16 * and its documentation for any purpose is hereby granted without fee, 16 * and its documentation for any purpose is hereby granted without fee,
17 * provided that the above copyright notice appears in all copies and 17 * provided that the above copyright notice appears in all copies and
@@ -21,7 +21,7 @@
21 * without specific, written prior permission. OpenVision makes no 21 * without specific, written prior permission. OpenVision makes no
22 * representations about the suitability of this software for any 22 * representations about the suitability of this software for any
23 * purpose. It is provided "as is" without express or implied warranty. 23 * purpose. It is provided "as is" without express or implied warranty.
24 * 24 *
25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR 27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
@@ -201,7 +201,7 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
201 return(G_BAD_TOK_HEADER); 201 return(G_BAD_TOK_HEADER);
202 if (*buf++ != 0x06) 202 if (*buf++ != 0x06)
203 return(G_BAD_TOK_HEADER); 203 return(G_BAD_TOK_HEADER);
204 204
205 if ((toksize-=1) < 0) 205 if ((toksize-=1) < 0)
206 return(G_BAD_TOK_HEADER); 206 return(G_BAD_TOK_HEADER);
207 toid.len = *buf++; 207 toid.len = *buf++;
@@ -211,9 +211,9 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
211 toid.data = buf; 211 toid.data = buf;
212 buf+=toid.len; 212 buf+=toid.len;
213 213
214 if (! g_OID_equal(&toid, mech)) 214 if (! g_OID_equal(&toid, mech))
215 ret = G_WRONG_MECH; 215 ret = G_WRONG_MECH;
216 216
217 /* G_WRONG_MECH is not returned immediately because it's more important 217 /* G_WRONG_MECH is not returned immediately because it's more important
218 to return G_BAD_TOK_HEADER if the token header is in fact bad */ 218 to return G_BAD_TOK_HEADER if the token header is in fact bad */
219 219
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d926cda88623..0a9948de0992 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -58,7 +58,7 @@ krb5_encrypt(
58 int length) 58 int length)
59{ 59{
60 u32 ret = -EINVAL; 60 u32 ret = -EINVAL;
61 struct scatterlist sg[1]; 61 struct scatterlist sg[1];
62 u8 local_iv[16] = {0}; 62 u8 local_iv[16] = {0};
63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
64 64
@@ -67,7 +67,7 @@ krb5_encrypt(
67 67
68 if (crypto_blkcipher_ivsize(tfm) > 16) { 68 if (crypto_blkcipher_ivsize(tfm) > 16) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
70 crypto_blkcipher_ivsize(tfm)); 70 crypto_blkcipher_ivsize(tfm));
71 goto out; 71 goto out;
72 } 72 }
73 73
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c604baf3a5f6..3e315a68efaa 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -11,7 +11,7 @@
11 11
12/* 12/*
13 * Copyright 1993 by OpenVision Technologies, Inc. 13 * Copyright 1993 by OpenVision Technologies, Inc.
14 * 14 *
15 * Permission to use, copy, modify, distribute, and sell this software 15 * Permission to use, copy, modify, distribute, and sell this software
16 * and its documentation for any purpose is hereby granted without fee, 16 * and its documentation for any purpose is hereby granted without fee,
17 * provided that the above copyright notice appears in all copies and 17 * provided that the above copyright notice appears in all copies and
@@ -21,7 +21,7 @@
21 * without specific, written prior permission. OpenVision makes no 21 * without specific, written prior permission. OpenVision makes no
22 * representations about the suitability of this software for any 22 * representations about the suitability of this software for any
23 * purpose. It is provided "as is" without express or implied warranty. 23 * purpose. It is provided "as is" without express or implied warranty.
24 * 24 *
25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR 27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 3db745379d06..3423890e4a30 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -6,14 +6,14 @@
6 * 6 *
7 * J. Bruce Fields <bfields@umich.edu> 7 * J. Bruce Fields <bfields@umich.edu>
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its 18 * 3. Neither the name of the University nor the names of its
19 * contributors may be used to endorse or promote products derived 19 * contributors may be used to endorse or promote products derived
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 35188b6ea8f7..8400b621971e 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -59,7 +59,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
59 59
60 /* count trailing 0's */ 60 /* count trailing 0's */
61 for(i = in->len; i > 0; i--) { 61 for(i = in->len; i > 0; i--) {
62 if (*ptr == 0) { 62 if (*ptr == 0) {
63 ptr--; 63 ptr--;
64 elen--; 64 elen--;
65 } else 65 } else
@@ -82,7 +82,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
82 82
83/* 83/*
84 * decode_asn1_bitstring() 84 * decode_asn1_bitstring()
85 * 85 *
86 * decode a bitstring into a buffer of the expected length. 86 * decode a bitstring into a buffer of the expected length.
87 * enclen = bit string length 87 * enclen = bit string length
88 * explen = expected length (define in rfc) 88 * explen = expected length (define in rfc)
@@ -97,9 +97,9 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
97 return 1; 97 return 1;
98} 98}
99 99
100/* 100/*
101 * SPKMInnerContextToken choice SPKM_MIC asn1 token layout 101 * SPKMInnerContextToken choice SPKM_MIC asn1 token layout
102 * 102 *
103 * contextid is always 16 bytes plain data. max asn1 bitstring len = 17. 103 * contextid is always 16 bytes plain data. max asn1 bitstring len = 17.
104 * 104 *
105 * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum) 105 * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum)
@@ -107,21 +107,21 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
107 * pos value 107 * pos value
108 * ---------- 108 * ----------
109 * [0] a4 SPKM-MIC tag 109 * [0] a4 SPKM-MIC tag
110 * [1] ?? innertoken length (max 44) 110 * [1] ?? innertoken length (max 44)
111 * 111 *
112 * 112 *
113 * tok_hdr piece of checksum data starts here 113 * tok_hdr piece of checksum data starts here
114 * 114 *
115 * the maximum mic-header len = 9 + 17 = 26 115 * the maximum mic-header len = 9 + 17 = 26
116 * mic-header 116 * mic-header
117 * ---------- 117 * ----------
118 * [2] 30 SEQUENCE tag 118 * [2] 30 SEQUENCE tag
119 * [3] ?? mic-header length: (max 23) = TokenID + ContextID 119 * [3] ?? mic-header length: (max 23) = TokenID + ContextID
120 * 120 *
121 * TokenID - all fields constant and can be hardcoded 121 * TokenID - all fields constant and can be hardcoded
122 * ------- 122 * -------
123 * [4] 02 Type 2 123 * [4] 02 Type 2
124 * [5] 02 Length 2 124 * [5] 02 Length 2
125 * [6][7] 01 01 TokenID (SPKM_MIC_TOK) 125 * [6][7] 01 01 TokenID (SPKM_MIC_TOK)
126 * 126 *
127 * ContextID - encoded length not constant, calculated 127 * ContextID - encoded length not constant, calculated
@@ -131,17 +131,17 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
131 * [10] ?? ctxzbit 131 * [10] ?? ctxzbit
132 * [11] contextid 132 * [11] contextid
133 * 133 *
134 * mic_header piece of checksum data ends here. 134 * mic_header piece of checksum data ends here.
135 * 135 *
136 * int-cksum - encoded length not constant, calculated 136 * int-cksum - encoded length not constant, calculated
137 * --------- 137 * ---------
138 * [??] 03 Type 3 138 * [??] 03 Type 3
139 * [??] ?? encoded length 139 * [??] ?? encoded length
140 * [??] ?? md5zbit 140 * [??] ?? md5zbit
141 * [??] int-cksum (NID_md5 = 16) 141 * [??] int-cksum (NID_md5 = 16)
142 * 142 *
143 * maximum SPKM-MIC innercontext token length = 143 * maximum SPKM-MIC innercontext token length =
144 * 10 + encoded contextid_size(17 max) + 2 + encoded 144 * 10 + encoded contextid_size(17 max) + 2 + encoded
145 * cksum_size (17 maxfor NID_md5) = 46 145 * cksum_size (17 maxfor NID_md5) = 46
146 */ 146 */
147 147
@@ -178,8 +178,8 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct
178/* 178/*
179 * spkm3_mic_innercontext_token() 179 * spkm3_mic_innercontext_token()
180 * 180 *
181 * *tokp points to the beginning of the SPKM_MIC token described 181 * *tokp points to the beginning of the SPKM_MIC token described
182 * in rfc 2025, section 3.2.1: 182 * in rfc 2025, section 3.2.1:
183 * 183 *
184 * toklen is the inner token length 184 * toklen is the inner token length
185 */ 185 */
@@ -209,7 +209,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
209 209
210 /* spkm3 innercontext token preamble */ 210 /* spkm3 innercontext token preamble */
211 if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) { 211 if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) {
212 dprintk("RPC: BAD SPKM ictoken preamble\n"); 212 dprintk("RPC: BAD SPKM ictoken preamble\n");
213 goto out; 213 goto out;
214 } 214 }
215 215
@@ -245,9 +245,9 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
245 goto out; 245 goto out;
246 246
247 /* 247 /*
248 * in the current implementation: the optional int-alg is not present 248 * in the current implementation: the optional int-alg is not present
249 * so the default int-alg (md5) is used the optional snd-seq field is 249 * so the default int-alg (md5) is used the optional snd-seq field is
250 * also not present 250 * also not present
251 */ 251 */
252 252
253 if (*mic_hdrlen != 6 + ctxelen) { 253 if (*mic_hdrlen != 6 + ctxelen) {
@@ -255,7 +255,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
255 goto out; 255 goto out;
256 } 256 }
257 /* checksum */ 257 /* checksum */
258 *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */ 258 *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
259 259
260 ret = GSS_S_COMPLETE; 260 ret = GSS_S_COMPLETE;
261out: 261out:
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index e54581ca7570..35a1b34c4a1d 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -45,7 +45,7 @@
45 45
46/* 46/*
47 * spkm3_read_token() 47 * spkm3_read_token()
48 * 48 *
49 * only SPKM_MIC_TOK with md5 intg-alg is supported 49 * only SPKM_MIC_TOK with md5 intg-alg is supported
50 */ 50 */
51u32 51u32
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 066c64a97fd8..8fde38ecaf21 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -172,8 +172,8 @@ static struct cache_head *rsi_alloc(void)
172} 172}
173 173
174static void rsi_request(struct cache_detail *cd, 174static void rsi_request(struct cache_detail *cd,
175 struct cache_head *h, 175 struct cache_head *h,
176 char **bpp, int *blen) 176 char **bpp, int *blen)
177{ 177{
178 struct rsi *rsii = container_of(h, struct rsi, h); 178 struct rsi *rsii = container_of(h, struct rsi, h);
179 179
@@ -184,7 +184,7 @@ static void rsi_request(struct cache_detail *cd,
184 184
185 185
186static int rsi_parse(struct cache_detail *cd, 186static int rsi_parse(struct cache_detail *cd,
187 char *mesg, int mlen) 187 char *mesg, int mlen)
188{ 188{
189 /* context token expiry major minor context token */ 189 /* context token expiry major minor context token */
190 char *buf = mesg; 190 char *buf = mesg;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 14274490f92e..c80df455802d 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -274,7 +274,7 @@ int cache_check(struct cache_detail *detail,
274 * 274 *
275 * A table is then only scanned if the current time is at least 275 * A table is then only scanned if the current time is at least
276 * the nextcheck time. 276 * the nextcheck time.
277 * 277 *
278 */ 278 */
279 279
280static LIST_HEAD(cache_list); 280static LIST_HEAD(cache_list);
@@ -296,16 +296,16 @@ void cache_register(struct cache_detail *cd)
296 struct proc_dir_entry *p; 296 struct proc_dir_entry *p;
297 cd->proc_ent->owner = cd->owner; 297 cd->proc_ent->owner = cd->owner;
298 cd->channel_ent = cd->content_ent = NULL; 298 cd->channel_ent = cd->content_ent = NULL;
299 299
300 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, 300 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
301 cd->proc_ent); 301 cd->proc_ent);
302 cd->flush_ent = p; 302 cd->flush_ent = p;
303 if (p) { 303 if (p) {
304 p->proc_fops = &cache_flush_operations; 304 p->proc_fops = &cache_flush_operations;
305 p->owner = cd->owner; 305 p->owner = cd->owner;
306 p->data = cd; 306 p->data = cd;
307 } 307 }
308 308
309 if (cd->cache_request || cd->cache_parse) { 309 if (cd->cache_request || cd->cache_parse) {
310 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, 310 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
311 cd->proc_ent); 311 cd->proc_ent);
@@ -316,16 +316,16 @@ void cache_register(struct cache_detail *cd)
316 p->data = cd; 316 p->data = cd;
317 } 317 }
318 } 318 }
319 if (cd->cache_show) { 319 if (cd->cache_show) {
320 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, 320 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
321 cd->proc_ent); 321 cd->proc_ent);
322 cd->content_ent = p; 322 cd->content_ent = p;
323 if (p) { 323 if (p) {
324 p->proc_fops = &content_file_operations; 324 p->proc_fops = &content_file_operations;
325 p->owner = cd->owner; 325 p->owner = cd->owner;
326 p->data = cd; 326 p->data = cd;
327 } 327 }
328 } 328 }
329 } 329 }
330 rwlock_init(&cd->hash_lock); 330 rwlock_init(&cd->hash_lock);
331 INIT_LIST_HEAD(&cd->queue); 331 INIT_LIST_HEAD(&cd->queue);
@@ -417,15 +417,15 @@ static int cache_clean(void)
417 current_index++; 417 current_index++;
418 418
419 /* find a cleanable entry in the bucket and clean it, or set to next bucket */ 419 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
420 420
421 if (current_detail && current_index < current_detail->hash_size) { 421 if (current_detail && current_index < current_detail->hash_size) {
422 struct cache_head *ch, **cp; 422 struct cache_head *ch, **cp;
423 struct cache_detail *d; 423 struct cache_detail *d;
424 424
425 write_lock(&current_detail->hash_lock); 425 write_lock(&current_detail->hash_lock);
426 426
427 /* Ok, now to clean this strand */ 427 /* Ok, now to clean this strand */
428 428
429 cp = & current_detail->hash_table[current_index]; 429 cp = & current_detail->hash_table[current_index];
430 ch = *cp; 430 ch = *cp;
431 for (; ch; cp= & ch->next, ch= *cp) { 431 for (; ch; cp= & ch->next, ch= *cp) {
@@ -477,9 +477,9 @@ static void do_cache_clean(struct work_struct *work)
477} 477}
478 478
479 479
480/* 480/*
481 * Clean all caches promptly. This just calls cache_clean 481 * Clean all caches promptly. This just calls cache_clean
482 * repeatedly until we are sure that every cache has had a chance to 482 * repeatedly until we are sure that every cache has had a chance to
483 * be fully cleaned 483 * be fully cleaned
484 */ 484 */
485void cache_flush(void) 485void cache_flush(void)
@@ -508,7 +508,7 @@ void cache_purge(struct cache_detail *detail)
508 * All deferred requests are stored in a hash table, 508 * All deferred requests are stored in a hash table,
509 * indexed by "struct cache_head *". 509 * indexed by "struct cache_head *".
510 * As it may be wasteful to store a whole request 510 * As it may be wasteful to store a whole request
511 * structure, we allow the request to provide a 511 * structure, we allow the request to provide a
512 * deferred form, which must contain a 512 * deferred form, which must contain a
513 * 'struct cache_deferred_req' 513 * 'struct cache_deferred_req'
514 * This cache_deferred_req contains a method to allow 514 * This cache_deferred_req contains a method to allow
@@ -584,7 +584,7 @@ static void cache_revisit_request(struct cache_head *item)
584 584
585 INIT_LIST_HEAD(&pending); 585 INIT_LIST_HEAD(&pending);
586 spin_lock(&cache_defer_lock); 586 spin_lock(&cache_defer_lock);
587 587
588 lp = cache_defer_hash[hash].next; 588 lp = cache_defer_hash[hash].next;
589 if (lp) { 589 if (lp) {
590 while (lp != &cache_defer_hash[hash]) { 590 while (lp != &cache_defer_hash[hash]) {
@@ -614,7 +614,7 @@ void cache_clean_deferred(void *owner)
614 614
615 INIT_LIST_HEAD(&pending); 615 INIT_LIST_HEAD(&pending);
616 spin_lock(&cache_defer_lock); 616 spin_lock(&cache_defer_lock);
617 617
618 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 618 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
619 if (dreq->owner == owner) { 619 if (dreq->owner == owner) {
620 list_del(&dreq->hash); 620 list_del(&dreq->hash);
@@ -639,7 +639,7 @@ void cache_clean_deferred(void *owner)
639 * On write, an update request is processed 639 * On write, an update request is processed
640 * Poll works if anything to read, and always allows write 640 * Poll works if anything to read, and always allows write
641 * 641 *
642 * Implemented by linked list of requests. Each open file has 642 * Implemented by linked list of requests. Each open file has
643 * a ->private that also exists in this list. New request are added 643 * a ->private that also exists in this list. New request are added
644 * to the end and may wakeup and preceding readers. 644 * to the end and may wakeup and preceding readers.
645 * New readers are added to the head. If, on read, an item is found with 645 * New readers are added to the head. If, on read, an item is found with
@@ -1059,10 +1059,10 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1059 * Messages are, like requests, separated into fields by 1059 * Messages are, like requests, separated into fields by
1060 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal 1060 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1061 * 1061 *
1062 * Message is 1062 * Message is
1063 * reply cachename expiry key ... content.... 1063 * reply cachename expiry key ... content....
1064 * 1064 *
1065 * key and content are both parsed by cache 1065 * key and content are both parsed by cache
1066 */ 1066 */
1067 1067
1068#define isodigit(c) (isdigit(c) && c <= '7') 1068#define isodigit(c) (isdigit(c) && c <= '7')
@@ -1132,7 +1132,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1132 unsigned hash, entry; 1132 unsigned hash, entry;
1133 struct cache_head *ch; 1133 struct cache_head *ch;
1134 struct cache_detail *cd = ((struct handle*)m->private)->cd; 1134 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1135 1135
1136 1136
1137 read_lock(&cd->hash_lock); 1137 read_lock(&cd->hash_lock);
1138 if (!n--) 1138 if (!n--)
@@ -1147,7 +1147,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1147 do { 1147 do {
1148 hash++; 1148 hash++;
1149 n += 1LL<<32; 1149 n += 1LL<<32;
1150 } while(hash < cd->hash_size && 1150 } while(hash < cd->hash_size &&
1151 cd->hash_table[hash]==NULL); 1151 cd->hash_table[hash]==NULL);
1152 if (hash >= cd->hash_size) 1152 if (hash >= cd->hash_size)
1153 return NULL; 1153 return NULL;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 16c9fbc1db69..c95a61736d1c 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -410,7 +410,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
410 rpc_shutdown_client(clnt); 410 rpc_shutdown_client(clnt);
411 clnt = ERR_PTR(err); 411 clnt = ERR_PTR(err);
412 } 412 }
413out: 413out:
414 return clnt; 414 return clnt;
415} 415}
416 416
@@ -431,7 +431,7 @@ static const struct rpc_call_ops rpc_default_ops = {
431 * sleeps on RPC calls 431 * sleeps on RPC calls
432 */ 432 */
433#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 433#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
434 434
435static void rpc_save_sigmask(sigset_t *oldset, int intr) 435static void rpc_save_sigmask(sigset_t *oldset, int intr)
436{ 436{
437 unsigned long sigallow = sigmask(SIGKILL); 437 unsigned long sigallow = sigmask(SIGKILL);
@@ -474,7 +474,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
474 int status; 474 int status;
475 475
476 /* If this client is slain all further I/O fails */ 476 /* If this client is slain all further I/O fails */
477 if (clnt->cl_dead) 477 if (clnt->cl_dead)
478 return -EIO; 478 return -EIO;
479 479
480 BUG_ON(flags & RPC_TASK_ASYNC); 480 BUG_ON(flags & RPC_TASK_ASYNC);
@@ -515,7 +515,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
515 515
516 /* If this client is slain all further I/O fails */ 516 /* If this client is slain all further I/O fails */
517 status = -EIO; 517 status = -EIO;
518 if (clnt->cl_dead) 518 if (clnt->cl_dead)
519 goto out_release; 519 goto out_release;
520 520
521 flags |= RPC_TASK_ASYNC; 521 flags |= RPC_TASK_ASYNC;
@@ -526,7 +526,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
526 goto out_release; 526 goto out_release;
527 527
528 /* Mask signals on GSS_AUTH upcalls */ 528 /* Mask signals on GSS_AUTH upcalls */
529 rpc_task_sigmask(task, &oldset); 529 rpc_task_sigmask(task, &oldset);
530 530
531 rpc_call_setup(task, msg, 0); 531 rpc_call_setup(task, msg, 0);
532 532
@@ -537,7 +537,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
537 else 537 else
538 rpc_put_task(task); 538 rpc_put_task(task);
539 539
540 rpc_restore_sigmask(&oldset); 540 rpc_restore_sigmask(&oldset);
541 return status; 541 return status;
542out_release: 542out_release:
543 rpc_release_calldata(tk_ops, data); 543 rpc_release_calldata(tk_ops, data);
@@ -749,7 +749,7 @@ call_allocate(struct rpc_task *task)
749 struct rpc_xprt *xprt = task->tk_xprt; 749 struct rpc_xprt *xprt = task->tk_xprt;
750 unsigned int bufsiz; 750 unsigned int bufsiz;
751 751
752 dprintk("RPC: %4d call_allocate (status %d)\n", 752 dprintk("RPC: %4d call_allocate (status %d)\n",
753 task->tk_pid, task->tk_status); 753 task->tk_pid, task->tk_status);
754 task->tk_action = call_bind; 754 task->tk_action = call_bind;
755 if (req->rq_buffer) 755 if (req->rq_buffer)
@@ -761,7 +761,7 @@ call_allocate(struct rpc_task *task)
761 761
762 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) 762 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
763 return; 763 return;
764 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 764 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
765 765
766 if (RPC_IS_ASYNC(task) || !signalled()) { 766 if (RPC_IS_ASYNC(task) || !signalled()) {
767 xprt_release(task); 767 xprt_release(task);
@@ -798,7 +798,7 @@ call_encode(struct rpc_task *task)
798 kxdrproc_t encode; 798 kxdrproc_t encode;
799 __be32 *p; 799 __be32 *p;
800 800
801 dprintk("RPC: %4d call_encode (status %d)\n", 801 dprintk("RPC: %4d call_encode (status %d)\n",
802 task->tk_pid, task->tk_status); 802 task->tk_pid, task->tk_status);
803 803
804 /* Default buffer setup */ 804 /* Default buffer setup */
@@ -933,7 +933,7 @@ call_connect_status(struct rpc_task *task)
933 struct rpc_clnt *clnt = task->tk_client; 933 struct rpc_clnt *clnt = task->tk_client;
934 int status = task->tk_status; 934 int status = task->tk_status;
935 935
936 dprintk("RPC: %5u call_connect_status (status %d)\n", 936 dprintk("RPC: %5u call_connect_status (status %d)\n",
937 task->tk_pid, task->tk_status); 937 task->tk_pid, task->tk_status);
938 938
939 task->tk_status = 0; 939 task->tk_status = 0;
@@ -966,7 +966,7 @@ call_connect_status(struct rpc_task *task)
966static void 966static void
967call_transmit(struct rpc_task *task) 967call_transmit(struct rpc_task *task)
968{ 968{
969 dprintk("RPC: %4d call_transmit (status %d)\n", 969 dprintk("RPC: %4d call_transmit (status %d)\n",
970 task->tk_pid, task->tk_status); 970 task->tk_pid, task->tk_status);
971 971
972 task->tk_action = call_status; 972 task->tk_action = call_status;
@@ -1028,7 +1028,7 @@ call_status(struct rpc_task *task)
1028 if (req->rq_received > 0 && !req->rq_bytes_sent) 1028 if (req->rq_received > 0 && !req->rq_bytes_sent)
1029 task->tk_status = req->rq_received; 1029 task->tk_status = req->rq_received;
1030 1030
1031 dprintk("RPC: %4d call_status (status %d)\n", 1031 dprintk("RPC: %4d call_status (status %d)\n",
1032 task->tk_pid, task->tk_status); 1032 task->tk_pid, task->tk_status);
1033 1033
1034 status = task->tk_status; 1034 status = task->tk_status;
@@ -1118,7 +1118,7 @@ call_decode(struct rpc_task *task)
1118 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1118 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
1119 __be32 *p; 1119 __be32 *p;
1120 1120
1121 dprintk("RPC: %4d call_decode (status %d)\n", 1121 dprintk("RPC: %4d call_decode (status %d)\n",
1122 task->tk_pid, task->tk_status); 1122 task->tk_pid, task->tk_status);
1123 1123
1124 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1124 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
@@ -1196,7 +1196,7 @@ static void
1196call_refreshresult(struct rpc_task *task) 1196call_refreshresult(struct rpc_task *task)
1197{ 1197{
1198 int status = task->tk_status; 1198 int status = task->tk_status;
1199 dprintk("RPC: %4d call_refreshresult (status %d)\n", 1199 dprintk("RPC: %4d call_refreshresult (status %d)\n",
1200 task->tk_pid, task->tk_status); 1200 task->tk_pid, task->tk_status);
1201 1201
1202 task->tk_status = 0; 1202 task->tk_status = 0;
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index 3946ec3eb517..f4e1357bc186 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -329,7 +329,7 @@ static int xdr_decode_bool(struct rpc_rqst *req, __be32 *p, unsigned int *boolp)
329static struct rpc_procinfo pmap_procedures[] = { 329static struct rpc_procinfo pmap_procedures[] = {
330[PMAP_SET] = { 330[PMAP_SET] = {
331 .p_proc = PMAP_SET, 331 .p_proc = PMAP_SET,
332 .p_encode = (kxdrproc_t) xdr_encode_mapping, 332 .p_encode = (kxdrproc_t) xdr_encode_mapping,
333 .p_decode = (kxdrproc_t) xdr_decode_bool, 333 .p_decode = (kxdrproc_t) xdr_decode_bool,
334 .p_bufsiz = 4, 334 .p_bufsiz = 4,
335 .p_count = 1, 335 .p_count = 1,
@@ -338,7 +338,7 @@ static struct rpc_procinfo pmap_procedures[] = {
338 }, 338 },
339[PMAP_UNSET] = { 339[PMAP_UNSET] = {
340 .p_proc = PMAP_UNSET, 340 .p_proc = PMAP_UNSET,
341 .p_encode = (kxdrproc_t) xdr_encode_mapping, 341 .p_encode = (kxdrproc_t) xdr_encode_mapping,
342 .p_decode = (kxdrproc_t) xdr_decode_bool, 342 .p_decode = (kxdrproc_t) xdr_decode_bool,
343 .p_bufsiz = 4, 343 .p_bufsiz = 4,
344 .p_count = 1, 344 .p_count = 1,
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fc083f0b3544..54a6b92525ea 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -4,7 +4,7 @@
4 * Scheduling for synchronous and asynchronous RPC requests. 4 * Scheduling for synchronous and asynchronous RPC requests.
5 * 5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 * 7 *
8 * TCP NFS related read + write fixes 8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */ 10 */
@@ -307,7 +307,7 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task);
307/* 307/*
308 * Make an RPC task runnable. 308 * Make an RPC task runnable.
309 * 309 *
310 * Note: If the task is ASYNC, this must be called with 310 * Note: If the task is ASYNC, this must be called with
311 * the spinlock held to protect the wait queue operation. 311 * the spinlock held to protect the wait queue operation.
312 */ 312 */
313static void rpc_make_runnable(struct rpc_task *task) 313static void rpc_make_runnable(struct rpc_task *task)
@@ -646,8 +646,8 @@ static int __rpc_execute(struct rpc_task *task)
646 if (RPC_DO_CALLBACK(task)) { 646 if (RPC_DO_CALLBACK(task)) {
647 /* Define a callback save pointer */ 647 /* Define a callback save pointer */
648 void (*save_callback)(struct rpc_task *); 648 void (*save_callback)(struct rpc_task *);
649 649
650 /* 650 /*
651 * If a callback exists, save it, reset it, 651 * If a callback exists, save it, reset it,
652 * call it. 652 * call it.
653 * The save is needed to stop from resetting 653 * The save is needed to stop from resetting
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c1f878131ac6..b00511d39b65 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -397,7 +397,7 @@ svc_destroy(struct svc_serv *serv)
397 sk_list); 397 sk_list);
398 svc_close_socket(svsk); 398 svc_close_socket(svsk);
399 } 399 }
400 400
401 cache_clean_deferred(serv); 401 cache_clean_deferred(serv);
402 402
403 /* Unregister service with the portmapper */ 403 /* Unregister service with the portmapper */
@@ -415,7 +415,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
415{ 415{
416 int pages; 416 int pages;
417 int arghi; 417 int arghi;
418 418
419 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 419 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
420 * We assume one is at most one page 420 * We assume one is at most one page
421 */ 421 */
@@ -514,7 +514,7 @@ choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
514 if (pool != NULL) 514 if (pool != NULL)
515 return pool; 515 return pool;
516 516
517 return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 517 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
518} 518}
519 519
520/* 520/*
@@ -530,13 +530,13 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
530 spin_lock_bh(&pool->sp_lock); 530 spin_lock_bh(&pool->sp_lock);
531 } else { 531 } else {
532 /* choose a pool in round-robin fashion */ 532 /* choose a pool in round-robin fashion */
533 for (i = 0; i < serv->sv_nrpools; i++) { 533 for (i = 0; i < serv->sv_nrpools; i++) {
534 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 534 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
535 spin_lock_bh(&pool->sp_lock); 535 spin_lock_bh(&pool->sp_lock);
536 if (!list_empty(&pool->sp_all_threads)) 536 if (!list_empty(&pool->sp_all_threads))
537 goto found_pool; 537 goto found_pool;
538 spin_unlock_bh(&pool->sp_lock); 538 spin_unlock_bh(&pool->sp_lock);
539 } 539 }
540 return NULL; 540 return NULL;
541 } 541 }
542 542
@@ -551,7 +551,7 @@ found_pool:
551 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); 551 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
552 list_del_init(&rqstp->rq_all); 552 list_del_init(&rqstp->rq_all);
553 task = rqstp->rq_task; 553 task = rqstp->rq_task;
554 } 554 }
555 spin_unlock_bh(&pool->sp_lock); 555 spin_unlock_bh(&pool->sp_lock);
556 556
557 return task; 557 return task;
@@ -636,7 +636,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
636 636
637/* 637/*
638 * Register an RPC service with the local portmapper. 638 * Register an RPC service with the local portmapper.
639 * To unregister a service, call this routine with 639 * To unregister a service, call this routine with
640 * proto and port == 0. 640 * proto and port == 0.
641 */ 641 */
642int 642int
@@ -709,7 +709,7 @@ svc_process(struct svc_rqst *rqstp)
709 goto err_short_len; 709 goto err_short_len;
710 710
711 /* setup response xdr_buf. 711 /* setup response xdr_buf.
712 * Initially it has just one page 712 * Initially it has just one page
713 */ 713 */
714 rqstp->rq_resused = 1; 714 rqstp->rq_resused = 1;
715 resv->iov_base = page_address(rqstp->rq_respages[0]); 715 resv->iov_base = page_address(rqstp->rq_respages[0]);
@@ -811,7 +811,7 @@ svc_process(struct svc_rqst *rqstp)
811 memset(rqstp->rq_argp, 0, procp->pc_argsize); 811 memset(rqstp->rq_argp, 0, procp->pc_argsize);
812 memset(rqstp->rq_resp, 0, procp->pc_ressize); 812 memset(rqstp->rq_resp, 0, procp->pc_ressize);
813 813
814 /* un-reserve some of the out-queue now that we have a 814 /* un-reserve some of the out-queue now that we have a
815 * better idea of reply size 815 * better idea of reply size
816 */ 816 */
817 if (procp->pc_xdrressize) 817 if (procp->pc_xdrressize)
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index c7bb5f7f21a5..811a24c83262 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -2,7 +2,7 @@
2 * linux/net/sunrpc/svcauth.c 2 * linux/net/sunrpc/svcauth.c
3 * 3 *
4 * The generic interface for RPC authentication on the server side. 4 * The generic interface for RPC authentication on the server side.
5 * 5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 * 7 *
8 * CHANGES 8 * CHANGES
@@ -74,7 +74,7 @@ int svc_authorise(struct svc_rqst *rqstp)
74 int rv = 0; 74 int rv = 0;
75 75
76 rqstp->rq_authop = NULL; 76 rqstp->rq_authop = NULL;
77 77
78 if (aops) { 78 if (aops) {
79 rv = aops->release(rqstp); 79 rv = aops->release(rqstp);
80 module_put(aops->owner); 80 module_put(aops->owner);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 0d1e8fb83b93..987244f95909 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -151,7 +151,7 @@ static void ip_map_request(struct cache_detail *cd,
151 char text_addr[20]; 151 char text_addr[20];
152 struct ip_map *im = container_of(h, struct ip_map, h); 152 struct ip_map *im = container_of(h, struct ip_map, h);
153 __be32 addr = im->m_addr.s_addr; 153 __be32 addr = im->m_addr.s_addr;
154 154
155 snprintf(text_addr, 20, "%u.%u.%u.%u", 155 snprintf(text_addr, 20, "%u.%u.%u.%u",
156 ntohl(addr) >> 24 & 0xff, 156 ntohl(addr) >> 24 & 0xff,
157 ntohl(addr) >> 16 & 0xff, 157 ntohl(addr) >> 16 & 0xff,
@@ -198,7 +198,7 @@ static int ip_map_parse(struct cache_detail *cd,
198 198
199 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4) 199 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
200 return -EINVAL; 200 return -EINVAL;
201 201
202 expiry = get_expiry(&mesg); 202 expiry = get_expiry(&mesg);
203 if (expiry ==0) 203 if (expiry ==0)
204 return -EINVAL; 204 return -EINVAL;
@@ -248,7 +248,7 @@ static int ip_map_show(struct seq_file *m,
248 /* class addr domain */ 248 /* class addr domain */
249 addr = im->m_addr; 249 addr = im->m_addr;
250 250
251 if (test_bit(CACHE_VALID, &h->flags) && 251 if (test_bit(CACHE_VALID, &h->flags) &&
252 !test_bit(CACHE_NEGATIVE, &h->flags)) 252 !test_bit(CACHE_NEGATIVE, &h->flags))
253 dom = im->m_client->h.name; 253 dom = im->m_client->h.name;
254 254
@@ -262,7 +262,7 @@ static int ip_map_show(struct seq_file *m,
262 ); 262 );
263 return 0; 263 return 0;
264} 264}
265 265
266 266
267struct cache_detail ip_map_cache = { 267struct cache_detail ip_map_cache = {
268 .owner = THIS_MODULE, 268 .owner = THIS_MODULE,
@@ -343,7 +343,7 @@ int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom)
343int auth_unix_forget_old(struct auth_domain *dom) 343int auth_unix_forget_old(struct auth_domain *dom)
344{ 344{
345 struct unix_domain *udom; 345 struct unix_domain *udom;
346 346
347 if (dom->flavour != &svcauth_unix) 347 if (dom->flavour != &svcauth_unix)
348 return -EINVAL; 348 return -EINVAL;
349 udom = container_of(dom, struct unix_domain, h); 349 udom = container_of(dom, struct unix_domain, h);
@@ -465,7 +465,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
465 if (argv->iov_len < 3*4) 465 if (argv->iov_len < 3*4)
466 return SVC_GARBAGE; 466 return SVC_GARBAGE;
467 467
468 if (svc_getu32(argv) != 0) { 468 if (svc_getu32(argv) != 0) {
469 dprintk("svc: bad null cred\n"); 469 dprintk("svc: bad null cred\n");
470 *authp = rpc_autherr_badcred; 470 *authp = rpc_autherr_badcred;
471 return SVC_DENIED; 471 return SVC_DENIED;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cf93cd1d857b..2fd0ba2b20df 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -58,7 +58,7 @@
58 * providing that certain rules are followed: 58 * providing that certain rules are followed:
59 * 59 *
60 * SK_CONN, SK_DATA, can be set or cleared at any time. 60 * SK_CONN, SK_DATA, can be set or cleared at any time.
61 * after a set, svc_sock_enqueue must be called. 61 * after a set, svc_sock_enqueue must be called.
62 * after a clear, the socket must be read/accepted 62 * after a clear, the socket must be read/accepted
63 * if this succeeds, it must be set again. 63 * if this succeeds, it must be set again.
64 * SK_CLOSE can set at any time. It is never cleared. 64 * SK_CLOSE can set at any time. It is never cleared.
@@ -252,7 +252,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
252 svsk->sk_sk, rqstp); 252 svsk->sk_sk, rqstp);
253 svc_thread_dequeue(pool, rqstp); 253 svc_thread_dequeue(pool, rqstp);
254 if (rqstp->rq_sock) 254 if (rqstp->rq_sock)
255 printk(KERN_ERR 255 printk(KERN_ERR
256 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 256 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
257 rqstp, rqstp->rq_sock); 257 rqstp, rqstp->rq_sock);
258 rqstp->rq_sock = svsk; 258 rqstp->rq_sock = svsk;
@@ -484,7 +484,7 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
484 if (xdr->tail[0].iov_len) { 484 if (xdr->tail[0].iov_len) {
485 result = kernel_sendpage(sock, rqstp->rq_respages[0], 485 result = kernel_sendpage(sock, rqstp->rq_respages[0],
486 ((unsigned long)xdr->tail[0].iov_base) 486 ((unsigned long)xdr->tail[0].iov_base)
487 & (PAGE_SIZE-1), 487 & (PAGE_SIZE-1),
488 xdr->tail[0].iov_len, 0); 488 xdr->tail[0].iov_len, 0);
489 489
490 if (result > 0) 490 if (result > 0)
@@ -711,7 +711,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
711 tv.tv_sec = xtime.tv_sec; 711 tv.tv_sec = xtime.tv_sec;
712 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; 712 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
713 skb_set_timestamp(skb, &tv); 713 skb_set_timestamp(skb, &tv);
714 /* Don't enable netstamp, sunrpc doesn't 714 /* Don't enable netstamp, sunrpc doesn't
715 need that much accuracy */ 715 need that much accuracy */
716 } 716 }
717 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); 717 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
@@ -743,7 +743,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
743 return 0; 743 return 0;
744 } 744 }
745 local_bh_enable(); 745 local_bh_enable();
746 skb_free_datagram(svsk->sk_sk, skb); 746 skb_free_datagram(svsk->sk_sk, skb);
747 } else { 747 } else {
748 /* we can use it in-place */ 748 /* we can use it in-place */
749 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 749 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
@@ -794,7 +794,7 @@ svc_udp_init(struct svc_sock *svsk)
794 svsk->sk_sendto = svc_udp_sendto; 794 svsk->sk_sendto = svc_udp_sendto;
795 795
796 /* initialise setting must have enough space to 796 /* initialise setting must have enough space to
797 * receive and respond to one request. 797 * receive and respond to one request.
798 * svc_udp_recvfrom will re-adjust if necessary 798 * svc_udp_recvfrom will re-adjust if necessary
799 */ 799 */
800 svc_sock_setbufsize(svsk->sk_sock, 800 svc_sock_setbufsize(svsk->sk_sock,
@@ -923,7 +923,7 @@ svc_tcp_accept(struct svc_sock *svsk)
923 if (ntohs(sin.sin_port) >= 1024) { 923 if (ntohs(sin.sin_port) >= 1024) {
924 dprintk(KERN_WARNING 924 dprintk(KERN_WARNING
925 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n", 925 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
926 serv->sv_name, 926 serv->sv_name,
927 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 927 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
928 } 928 }
929 929
@@ -1038,7 +1038,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1038 * on the number of threads which will access the socket. 1038 * on the number of threads which will access the socket.
1039 * 1039 *
1040 * rcvbuf just needs to be able to hold a few requests. 1040 * rcvbuf just needs to be able to hold a few requests.
1041 * Normally they will be removed from the queue 1041 * Normally they will be removed from the queue
1042 * as soon a a complete request arrives. 1042 * as soon a a complete request arrives.
1043 */ 1043 */
1044 svc_sock_setbufsize(svsk->sk_sock, 1044 svc_sock_setbufsize(svsk->sk_sock,
@@ -1063,7 +1063,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1063 1063
1064 if (len < want) { 1064 if (len < want) {
1065 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 1065 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1066 len, want); 1066 len, want);
1067 svc_sock_received(svsk); 1067 svc_sock_received(svsk);
1068 return -EAGAIN; /* record header not complete */ 1068 return -EAGAIN; /* record header not complete */
1069 } 1069 }
@@ -1221,7 +1221,7 @@ svc_tcp_init(struct svc_sock *svsk)
1221 tp->nonagle = 1; /* disable Nagle's algorithm */ 1221 tp->nonagle = 1; /* disable Nagle's algorithm */
1222 1222
1223 /* initialise setting must have enough space to 1223 /* initialise setting must have enough space to
1224 * receive and respond to one request. 1224 * receive and respond to one request.
1225 * svc_tcp_recvfrom will re-adjust if necessary 1225 * svc_tcp_recvfrom will re-adjust if necessary
1226 */ 1226 */
1227 svc_sock_setbufsize(svsk->sk_sock, 1227 svc_sock_setbufsize(svsk->sk_sock,
@@ -1230,7 +1230,7 @@ svc_tcp_init(struct svc_sock *svsk)
1230 1230
1231 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1231 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1232 set_bit(SK_DATA, &svsk->sk_flags); 1232 set_bit(SK_DATA, &svsk->sk_flags);
1233 if (sk->sk_state != TCP_ESTABLISHED) 1233 if (sk->sk_state != TCP_ESTABLISHED)
1234 set_bit(SK_CLOSE, &svsk->sk_flags); 1234 set_bit(SK_CLOSE, &svsk->sk_flags);
1235 } 1235 }
1236} 1236}
@@ -1246,7 +1246,7 @@ svc_sock_update_bufs(struct svc_serv *serv)
1246 1246
1247 spin_lock_bh(&serv->sv_lock); 1247 spin_lock_bh(&serv->sv_lock);
1248 list_for_each(le, &serv->sv_permsocks) { 1248 list_for_each(le, &serv->sv_permsocks) {
1249 struct svc_sock *svsk = 1249 struct svc_sock *svsk =
1250 list_entry(le, struct svc_sock, sk_list); 1250 list_entry(le, struct svc_sock, sk_list);
1251 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1251 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1252 } 1252 }
@@ -1278,11 +1278,11 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1278 rqstp, timeout); 1278 rqstp, timeout);
1279 1279
1280 if (rqstp->rq_sock) 1280 if (rqstp->rq_sock)
1281 printk(KERN_ERR 1281 printk(KERN_ERR
1282 "svc_recv: service %p, socket not NULL!\n", 1282 "svc_recv: service %p, socket not NULL!\n",
1283 rqstp); 1283 rqstp);
1284 if (waitqueue_active(&rqstp->rq_wait)) 1284 if (waitqueue_active(&rqstp->rq_wait))
1285 printk(KERN_ERR 1285 printk(KERN_ERR
1286 "svc_recv: service %p, wait queue active!\n", 1286 "svc_recv: service %p, wait queue active!\n",
1287 rqstp); 1287 rqstp);
1288 1288
@@ -1371,7 +1371,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1371 return len; 1371 return len;
1372} 1372}
1373 1373
1374/* 1374/*
1375 * Drop request 1375 * Drop request
1376 */ 1376 */
1377void 1377void
@@ -1651,7 +1651,7 @@ svc_delete_socket(struct svc_sock *svsk)
1651 1651
1652 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1652 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1653 list_del_init(&svsk->sk_list); 1653 list_del_init(&svsk->sk_list);
1654 /* 1654 /*
1655 * We used to delete the svc_sock from whichever list 1655 * We used to delete the svc_sock from whichever list
1656 * it's sk_ready node was on, but we don't actually 1656 * it's sk_ready node was on, but we don't actually
1657 * need to. This is because the only time we're called 1657 * need to. This is because the only time we're called
@@ -1697,7 +1697,7 @@ svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1697} 1697}
1698 1698
1699/* 1699/*
1700 * Handle defer and revisit of requests 1700 * Handle defer and revisit of requests
1701 */ 1701 */
1702 1702
1703static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1703static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
@@ -1776,7 +1776,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1776static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1776static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1777{ 1777{
1778 struct svc_deferred_req *dr = NULL; 1778 struct svc_deferred_req *dr = NULL;
1779 1779
1780 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1780 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1781 return NULL; 1781 return NULL;
1782 spin_lock_bh(&svsk->sk_defer_lock); 1782 spin_lock_bh(&svsk->sk_defer_lock);
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 82b27528d0c4..47d8df2b5eb2 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -42,7 +42,7 @@ rpc_register_sysctl(void)
42 sunrpc_table[0].de->owner = THIS_MODULE; 42 sunrpc_table[0].de->owner = THIS_MODULE;
43#endif 43#endif
44 } 44 }
45 45
46} 46}
47 47
48void 48void
@@ -126,7 +126,7 @@ static ctl_table debug_table[] = {
126 .maxlen = sizeof(int), 126 .maxlen = sizeof(int),
127 .mode = 0644, 127 .mode = 0644,
128 .proc_handler = &proc_dodebug 128 .proc_handler = &proc_dodebug
129 }, 129 },
130 { 130 {
131 .ctl_name = CTL_NFSDEBUG, 131 .ctl_name = CTL_NFSDEBUG,
132 .procname = "nfs_debug", 132 .procname = "nfs_debug",
@@ -134,7 +134,7 @@ static ctl_table debug_table[] = {
134 .maxlen = sizeof(int), 134 .maxlen = sizeof(int),
135 .mode = 0644, 135 .mode = 0644,
136 .proc_handler = &proc_dodebug 136 .proc_handler = &proc_dodebug
137 }, 137 },
138 { 138 {
139 .ctl_name = CTL_NFSDDEBUG, 139 .ctl_name = CTL_NFSDDEBUG,
140 .procname = "nfsd_debug", 140 .procname = "nfsd_debug",
@@ -142,7 +142,7 @@ static ctl_table debug_table[] = {
142 .maxlen = sizeof(int), 142 .maxlen = sizeof(int),
143 .mode = 0644, 143 .mode = 0644,
144 .proc_handler = &proc_dodebug 144 .proc_handler = &proc_dodebug
145 }, 145 },
146 { 146 {
147 .ctl_name = CTL_NLMDEBUG, 147 .ctl_name = CTL_NLMDEBUG,
148 .procname = "nlm_debug", 148 .procname = "nlm_debug",
@@ -150,7 +150,7 @@ static ctl_table debug_table[] = {
150 .maxlen = sizeof(int), 150 .maxlen = sizeof(int),
151 .mode = 0644, 151 .mode = 0644,
152 .proc_handler = &proc_dodebug 152 .proc_handler = &proc_dodebug
153 }, 153 },
154 { .ctl_name = 0 } 154 { .ctl_name = 0 }
155}; 155};
156 156
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index a0af250ca319..6a59180e1667 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -302,7 +302,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
302 * @buf: xdr_buf 302 * @buf: xdr_buf
303 * @len: bytes to remove from buf->head[0] 303 * @len: bytes to remove from buf->head[0]
304 * 304 *
305 * Shrinks XDR buffer's header kvec buf->head[0] by 305 * Shrinks XDR buffer's header kvec buf->head[0] by
306 * 'len' bytes. The extra data is not lost, but is instead 306 * 'len' bytes. The extra data is not lost, but is instead
307 * moved into the inlined pages and/or the tail. 307 * moved into the inlined pages and/or the tail.
308 */ 308 */
@@ -375,7 +375,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
375 * @buf: xdr_buf 375 * @buf: xdr_buf
376 * @len: bytes to remove from buf->pages 376 * @len: bytes to remove from buf->pages
377 * 377 *
378 * Shrinks XDR buffer's page array buf->pages by 378 * Shrinks XDR buffer's page array buf->pages by
379 * 'len' bytes. The extra data is not lost, but is instead 379 * 'len' bytes. The extra data is not lost, but is instead
380 * moved into the tail. 380 * moved into the tail.
381 */ 381 */
@@ -1024,7 +1024,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1024 1024
1025int 1025int
1026xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1026xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1027 int (*actor)(struct scatterlist *, void *), void *data) 1027 int (*actor)(struct scatterlist *, void *), void *data)
1028{ 1028{
1029 int i, ret = 0; 1029 int i, ret = 0;
1030 unsigned page_len, thislen, page_offset; 1030 unsigned page_len, thislen, page_offset;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 7a3999f0a4a2..e7c71a1ea3d4 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -410,7 +410,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task)
410/* 410/*
411 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 411 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
412 * @task: task whose timeout is to be set 412 * @task: task whose timeout is to be set
413 * 413 *
414 * Set a request's retransmit timeout using the RTT estimator. 414 * Set a request's retransmit timeout using the RTT estimator.
415 */ 415 */
416void xprt_set_retrans_timeout_rtt(struct rpc_task *task) 416void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
@@ -873,7 +873,7 @@ void xprt_release(struct rpc_task *task)
873 */ 873 */
874void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) 874void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
875{ 875{
876 to->to_initval = 876 to->to_initval =
877 to->to_increment = incr; 877 to->to_increment = incr;
878 to->to_maxval = to->to_initval + (incr * retr); 878 to->to_maxval = to->to_initval + (incr * retr);
879 to->to_retries = retr; 879 to->to_retries = retr;
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 0be25e175b93..e5207a11edf6 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/addr.c: TIPC address utility routines 2 * net/tipc/addr.c: TIPC address utility routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -48,10 +48,10 @@ u32 tipc_get_addr(void)
48 48
49/** 49/**
50 * tipc_addr_domain_valid - validates a network domain address 50 * tipc_addr_domain_valid - validates a network domain address
51 * 51 *
52 * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>, 52 * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
53 * where Z, C, and N are non-zero and do not exceed the configured limits. 53 * where Z, C, and N are non-zero and do not exceed the configured limits.
54 * 54 *
55 * Returns 1 if domain address is valid, otherwise 0 55 * Returns 1 if domain address is valid, otherwise 0
56 */ 56 */
57 57
@@ -80,10 +80,10 @@ int tipc_addr_domain_valid(u32 addr)
80 80
81/** 81/**
82 * tipc_addr_node_valid - validates a proposed network address for this node 82 * tipc_addr_node_valid - validates a proposed network address for this node
83 * 83 *
84 * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed 84 * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
85 * the configured limits. 85 * the configured limits.
86 * 86 *
87 * Returns 1 if address can be used, otherwise 0 87 * Returns 1 if address can be used, otherwise 0
88 */ 88 */
89 89
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index bcfebb3cbbf3..e4bd5335e48d 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/addr.h: Include file for TIPC address utility routines 2 * net/tipc/addr.h: Include file for TIPC address utility routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -100,8 +100,8 @@ static inline int addr_scope(u32 domain)
100 100
101/** 101/**
102 * addr_domain - convert 2-bit scope value to equivalent message lookup domain 102 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
103 * 103 *
104 * Needed when address of a named message must be looked up a second time 104 * Needed when address of a named message must be looked up a second time
105 * after a network hop. 105 * after a network hop.
106 */ 106 */
107 107
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 730c5c47ed8d..e7880172ef19 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/bcast.c: TIPC broadcast code 2 * net/tipc/bcast.c: TIPC broadcast code
3 * 3 *
4 * Copyright (c) 2004-2006, Ericsson AB 4 * Copyright (c) 2004-2006, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation. 5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, Wind River Systems 6 * Copyright (c) 2005, Wind River Systems
@@ -59,15 +59,15 @@
59 * Loss rate for incoming broadcast frames; used to test retransmission code. 59 * Loss rate for incoming broadcast frames; used to test retransmission code.
60 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any. 60 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
61 */ 61 */
62 62
63#define TIPC_BCAST_LOSS_RATE 0 63#define TIPC_BCAST_LOSS_RATE 0
64 64
65/** 65/**
66 * struct bcbearer_pair - a pair of bearers used by broadcast link 66 * struct bcbearer_pair - a pair of bearers used by broadcast link
67 * @primary: pointer to primary bearer 67 * @primary: pointer to primary bearer
68 * @secondary: pointer to secondary bearer 68 * @secondary: pointer to secondary bearer
69 * 69 *
70 * Bearers must have same priority and same set of reachable destinations 70 * Bearers must have same priority and same set of reachable destinations
71 * to be paired. 71 * to be paired.
72 */ 72 */
73 73
@@ -84,7 +84,7 @@ struct bcbearer_pair {
84 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort() 84 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
85 * @remains: temporary node map used by tipc_bcbearer_send() 85 * @remains: temporary node map used by tipc_bcbearer_send()
86 * @remains_new: temporary node map used tipc_bcbearer_send() 86 * @remains_new: temporary node map used tipc_bcbearer_send()
87 * 87 *
88 * Note: The fields labelled "temporary" are incorporated into the bearer 88 * Note: The fields labelled "temporary" are incorporated into the bearer
89 * to avoid consuming potentially limited stack space through the use of 89 * to avoid consuming potentially limited stack space through the use of
90 * large local variables within multicast routines. Concurrent access is 90 * large local variables within multicast routines. Concurrent access is
@@ -104,7 +104,7 @@ struct bcbearer {
104 * struct bclink - link used for broadcast messages 104 * struct bclink - link used for broadcast messages
105 * @link: (non-standard) broadcast link structure 105 * @link: (non-standard) broadcast link structure
106 * @node: (non-standard) node structure representing b'cast link's peer node 106 * @node: (non-standard) node structure representing b'cast link's peer node
107 * 107 *
108 * Handles sequence numbering, fragmentation, bundling, etc. 108 * Handles sequence numbering, fragmentation, bundling, etc.
109 */ 109 */
110 110
@@ -125,7 +125,7 @@ char tipc_bclink_name[] = "multicast-link";
125static u32 buf_seqno(struct sk_buff *buf) 125static u32 buf_seqno(struct sk_buff *buf)
126{ 126{
127 return msg_seqno(buf_msg(buf)); 127 return msg_seqno(buf_msg(buf));
128} 128}
129 129
130static u32 bcbuf_acks(struct sk_buff *buf) 130static u32 bcbuf_acks(struct sk_buff *buf)
131{ 131{
@@ -143,9 +143,9 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
143} 143}
144 144
145 145
146/** 146/**
147 * bclink_set_gap - set gap according to contents of current deferred pkt queue 147 * bclink_set_gap - set gap according to contents of current deferred pkt queue
148 * 148 *
149 * Called with 'node' locked, bc_lock unlocked 149 * Called with 'node' locked, bc_lock unlocked
150 */ 150 */
151 151
@@ -159,14 +159,14 @@ static void bclink_set_gap(struct node *n_ptr)
159 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1); 159 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
160} 160}
161 161
162/** 162/**
163 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment 163 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
164 * 164 *
165 * This mechanism endeavours to prevent all nodes in network from trying 165 * This mechanism endeavours to prevent all nodes in network from trying
166 * to ACK or NACK at the same time. 166 * to ACK or NACK at the same time.
167 * 167 *
168 * Note: TIPC uses a different trigger to distribute ACKs than it does to 168 * Note: TIPC uses a different trigger to distribute ACKs than it does to
169 * distribute NACKs, but tries to use the same spacing (divide by 16). 169 * distribute NACKs, but tries to use the same spacing (divide by 16).
170 */ 170 */
171 171
172static int bclink_ack_allowed(u32 n) 172static int bclink_ack_allowed(u32 n)
@@ -175,11 +175,11 @@ static int bclink_ack_allowed(u32 n)
175} 175}
176 176
177 177
178/** 178/**
179 * bclink_retransmit_pkt - retransmit broadcast packets 179 * bclink_retransmit_pkt - retransmit broadcast packets
180 * @after: sequence number of last packet to *not* retransmit 180 * @after: sequence number of last packet to *not* retransmit
181 * @to: sequence number of last packet to retransmit 181 * @to: sequence number of last packet to retransmit
182 * 182 *
183 * Called with bc_lock locked 183 * Called with bc_lock locked
184 */ 184 */
185 185
@@ -189,16 +189,16 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
189 189
190 buf = bcl->first_out; 190 buf = bcl->first_out;
191 while (buf && less_eq(buf_seqno(buf), after)) { 191 while (buf && less_eq(buf_seqno(buf), after)) {
192 buf = buf->next; 192 buf = buf->next;
193 } 193 }
194 tipc_link_retransmit(bcl, buf, mod(to - after)); 194 tipc_link_retransmit(bcl, buf, mod(to - after));
195} 195}
196 196
197/** 197/**
198 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets 198 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
199 * @n_ptr: node that sent acknowledgement info 199 * @n_ptr: node that sent acknowledgement info
200 * @acked: broadcast sequence # that has been acknowledged 200 * @acked: broadcast sequence # that has been acknowledged
201 * 201 *
202 * Node is locked, bc_lock unlocked. 202 * Node is locked, bc_lock unlocked.
203 */ 203 */
204 204
@@ -244,9 +244,9 @@ void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
244 spin_unlock_bh(&bc_lock); 244 spin_unlock_bh(&bc_lock);
245} 245}
246 246
247/** 247/**
248 * bclink_send_ack - unicast an ACK msg 248 * bclink_send_ack - unicast an ACK msg
249 * 249 *
250 * tipc_net_lock and node lock set 250 * tipc_net_lock and node lock set
251 */ 251 */
252 252
@@ -258,9 +258,9 @@ static void bclink_send_ack(struct node *n_ptr)
258 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 258 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
259} 259}
260 260
261/** 261/**
262 * bclink_send_nack- broadcast a NACK msg 262 * bclink_send_nack- broadcast a NACK msg
263 * 263 *
264 * tipc_net_lock and node lock set 264 * tipc_net_lock and node lock set
265 */ 265 */
266 266
@@ -278,7 +278,7 @@ static void bclink_send_nack(struct node *n_ptr)
278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
279 TIPC_OK, INT_H_SIZE, n_ptr->addr); 279 TIPC_OK, INT_H_SIZE, n_ptr->addr);
280 msg_set_mc_netid(msg, tipc_net_id); 280 msg_set_mc_netid(msg, tipc_net_id);
281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
283 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 283 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
284 msg_set_bcast_tag(msg, tipc_own_tag); 284 msg_set_bcast_tag(msg, tipc_own_tag);
@@ -292,17 +292,17 @@ static void bclink_send_nack(struct node *n_ptr)
292 bcl->stats.bearer_congs++; 292 bcl->stats.bearer_congs++;
293 } 293 }
294 294
295 /* 295 /*
296 * Ensure we doesn't send another NACK msg to the node 296 * Ensure we doesn't send another NACK msg to the node
297 * until 16 more deferred messages arrive from it 297 * until 16 more deferred messages arrive from it
298 * (i.e. helps prevent all nodes from NACK'ing at same time) 298 * (i.e. helps prevent all nodes from NACK'ing at same time)
299 */ 299 */
300 300
301 n_ptr->bclink.nack_sync = tipc_own_tag; 301 n_ptr->bclink.nack_sync = tipc_own_tag;
302 } 302 }
303} 303}
304 304
305/** 305/**
306 * tipc_bclink_check_gap - send a NACK if a sequence gap exists 306 * tipc_bclink_check_gap - send a NACK if a sequence gap exists
307 * 307 *
308 * tipc_net_lock and node lock set 308 * tipc_net_lock and node lock set
@@ -320,9 +320,9 @@ void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
320 bclink_send_nack(n_ptr); 320 bclink_send_nack(n_ptr);
321} 321}
322 322
323/** 323/**
324 * tipc_bclink_peek_nack - process a NACK msg meant for another node 324 * tipc_bclink_peek_nack - process a NACK msg meant for another node
325 * 325 *
326 * Only tipc_net_lock set. 326 * Only tipc_net_lock set.
327 */ 327 */
328 328
@@ -349,7 +349,7 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
349 if (less_eq(my_to, gap_to)) 349 if (less_eq(my_to, gap_to))
350 n_ptr->bclink.gap_to = gap_after; 350 n_ptr->bclink.gap_to = gap_after;
351 } else { 351 } else {
352 /* 352 /*
353 * Expand gap if missing bufs not in deferred queue: 353 * Expand gap if missing bufs not in deferred queue:
354 */ 354 */
355 struct sk_buff *buf = n_ptr->bclink.deferred_head; 355 struct sk_buff *buf = n_ptr->bclink.deferred_head;
@@ -371,7 +371,7 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
371 } 371 }
372 /* 372 /*
373 * Some nodes may send a complementary NACK now: 373 * Some nodes may send a complementary NACK now:
374 */ 374 */
375 if (bclink_ack_allowed(sender_tag + 1)) { 375 if (bclink_ack_allowed(sender_tag + 1)) {
376 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) { 376 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
377 bclink_send_nack(n_ptr); 377 bclink_send_nack(n_ptr);
@@ -408,7 +408,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
408 408
409/** 409/**
410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
411 * 411 *
412 * tipc_net_lock is read_locked, no other locks set 412 * tipc_net_lock is read_locked, no other locks set
413 */ 413 */
414 414
@@ -425,7 +425,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
425 425
426 msg_dbg(msg, "<BC<<<"); 426 msg_dbg(msg, "<BC<<<");
427 427
428 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || 428 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
429 (msg_mc_netid(msg) != tipc_net_id))) { 429 (msg_mc_netid(msg) != tipc_net_id))) {
430 buf_discard(buf); 430 buf_discard(buf);
431 return; 431 return;
@@ -443,7 +443,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
443 bclink_retransmit_pkt(msg_bcgap_after(msg), 443 bclink_retransmit_pkt(msg_bcgap_after(msg),
444 msg_bcgap_to(msg)); 444 msg_bcgap_to(msg));
445 bcl->owner->next = NULL; 445 bcl->owner->next = NULL;
446 spin_unlock_bh(&bc_lock); 446 spin_unlock_bh(&bc_lock);
447 } else { 447 } else {
448 tipc_bclink_peek_nack(msg_destnode(msg), 448 tipc_bclink_peek_nack(msg_destnode(msg),
449 msg_bcast_tag(msg), 449 msg_bcast_tag(msg),
@@ -547,10 +547,10 @@ u32 tipc_bclink_acks_missing(struct node *n_ptr)
547 547
548/** 548/**
549 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer 549 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
550 * 550 *
551 * Send through as many bearers as necessary to reach all nodes 551 * Send through as many bearers as necessary to reach all nodes
552 * that support TIPC multicasting. 552 * that support TIPC multicasting.
553 * 553 *
554 * Returns 0 if packet sent successfully, non-zero if not 554 * Returns 0 if packet sent successfully, non-zero if not
555 */ 555 */
556 556
@@ -581,7 +581,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
581 send_count = 0; 581 send_count = 0;
582 582
583 /* Send buffer over bearers until all targets reached */ 583 /* Send buffer over bearers until all targets reached */
584 584
585 bcbearer->remains = tipc_cltr_bcast_nodes; 585 bcbearer->remains = tipc_cltr_bcast_nodes;
586 586
587 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 587 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
@@ -615,7 +615,7 @@ update:
615 615
616 bcbearer->remains = bcbearer->remains_new; 616 bcbearer->remains = bcbearer->remains_new;
617 } 617 }
618 618
619 /* Unable to reach all targets */ 619 /* Unable to reach all targets */
620 620
621 bcbearer->bearer.publ.blocked = 1; 621 bcbearer->bearer.publ.blocked = 1;
@@ -682,7 +682,7 @@ void tipc_bcbearer_sort(void)
682 682
683/** 683/**
684 * tipc_bcbearer_push - resolve bearer congestion 684 * tipc_bcbearer_push - resolve bearer congestion
685 * 685 *
686 * Forces bclink to push out any unsent packets, until all packets are gone 686 * Forces bclink to push out any unsent packets, until all packets are gone
687 * or congestion reoccurs. 687 * or congestion reoccurs.
688 * No locks set when function called 688 * No locks set when function called
@@ -714,27 +714,27 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
714 spin_lock_bh(&bc_lock); 714 spin_lock_bh(&bc_lock);
715 715
716 tipc_printf(&pb, "Link <%s>\n" 716 tipc_printf(&pb, "Link <%s>\n"
717 " Window:%u packets\n", 717 " Window:%u packets\n",
718 bcl->name, bcl->queue_limit[0]); 718 bcl->name, bcl->queue_limit[0]);
719 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 719 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
720 bcl->stats.recv_info, 720 bcl->stats.recv_info,
721 bcl->stats.recv_fragments, 721 bcl->stats.recv_fragments,
722 bcl->stats.recv_fragmented, 722 bcl->stats.recv_fragmented,
723 bcl->stats.recv_bundles, 723 bcl->stats.recv_bundles,
724 bcl->stats.recv_bundled); 724 bcl->stats.recv_bundled);
725 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 725 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
726 bcl->stats.sent_info, 726 bcl->stats.sent_info,
727 bcl->stats.sent_fragments, 727 bcl->stats.sent_fragments,
728 bcl->stats.sent_fragmented, 728 bcl->stats.sent_fragmented,
729 bcl->stats.sent_bundles, 729 bcl->stats.sent_bundles,
730 bcl->stats.sent_bundled); 730 bcl->stats.sent_bundled);
731 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n", 731 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
732 bcl->stats.recv_nacks, 732 bcl->stats.recv_nacks,
733 bcl->stats.deferred_recv, 733 bcl->stats.deferred_recv,
734 bcl->stats.duplicates); 734 bcl->stats.duplicates);
735 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n", 735 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
736 bcl->stats.sent_nacks, 736 bcl->stats.sent_nacks,
737 bcl->stats.sent_acks, 737 bcl->stats.sent_acks,
738 bcl->stats.retransmitted); 738 bcl->stats.retransmitted);
739 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 739 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
740 bcl->stats.bearer_congs, 740 bcl->stats.bearer_congs,
@@ -778,7 +778,7 @@ int tipc_bclink_init(void)
778 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); 778 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
779 if (!bcbearer || !bclink) { 779 if (!bcbearer || !bclink) {
780 nomem: 780 nomem:
781 warn("Multicast link creation failed, no memory\n"); 781 warn("Multicast link creation failed, no memory\n");
782 kfree(bcbearer); 782 kfree(bcbearer);
783 bcbearer = NULL; 783 bcbearer = NULL;
784 kfree(bclink); 784 kfree(bclink);
@@ -796,7 +796,7 @@ int tipc_bclink_init(void)
796 bcl->next_out_no = 1; 796 bcl->next_out_no = 1;
797 spin_lock_init(&bclink->node.lock); 797 spin_lock_init(&bclink->node.lock);
798 bcl->owner = &bclink->node; 798 bcl->owner = &bclink->node;
799 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 799 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
800 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 800 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
801 bcl->b_ptr = &bcbearer->bearer; 801 bcl->b_ptr = &bcbearer->bearer;
802 bcl->state = WORKING_WORKING; 802 bcl->state = WORKING_WORKING;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index b243d9d495f0..f910ed29d055 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/bcast.h: Include file for TIPC broadcast code 2 * net/tipc/bcast.h: Include file for TIPC broadcast code
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -102,7 +102,7 @@ static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node)
102 } 102 }
103} 103}
104 104
105/** 105/**
106 * nmap_remove - remove a node from a node map 106 * nmap_remove - remove a node from a node map
107 */ 107 */
108 108
@@ -190,7 +190,7 @@ static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
190 190
191/** 191/**
192 * port_list_free - free dynamically created entries in port_list chain 192 * port_list_free - free dynamically created entries in port_list chain
193 * 193 *
194 * Note: First item is on stack, so it doesn't need to be released 194 * Note: First item is on stack, so it doesn't need to be released
195 */ 195 */
196 196
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 39744a33bd36..271a375b49b7 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/bearer.c: TIPC bearer code 2 * net/tipc/bearer.c: TIPC bearer code
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -52,7 +52,7 @@ struct bearer *tipc_bearers = NULL;
52 52
53/** 53/**
54 * media_name_valid - validate media name 54 * media_name_valid - validate media name
55 * 55 *
56 * Returns 1 if media name is valid, otherwise 0. 56 * Returns 1 if media name is valid, otherwise 0.
57 */ 57 */
58 58
@@ -84,17 +84,17 @@ static struct media *media_find(const char *name)
84 84
85/** 85/**
86 * tipc_register_media - register a media type 86 * tipc_register_media - register a media type
87 * 87 *
88 * Bearers for this media type must be activated separately at a later stage. 88 * Bearers for this media type must be activated separately at a later stage.
89 */ 89 */
90 90
91int tipc_register_media(u32 media_type, 91int tipc_register_media(u32 media_type,
92 char *name, 92 char *name,
93 int (*enable)(struct tipc_bearer *), 93 int (*enable)(struct tipc_bearer *),
94 void (*disable)(struct tipc_bearer *), 94 void (*disable)(struct tipc_bearer *),
95 int (*send_msg)(struct sk_buff *, 95 int (*send_msg)(struct sk_buff *,
96 struct tipc_bearer *, 96 struct tipc_bearer *,
97 struct tipc_media_addr *), 97 struct tipc_media_addr *),
98 char *(*addr2str)(struct tipc_media_addr *a, 98 char *(*addr2str)(struct tipc_media_addr *a,
99 char *str_buf, int str_size), 99 char *str_buf, int str_size),
100 struct tipc_media_addr *bcast_addr, 100 struct tipc_media_addr *bcast_addr,
@@ -121,11 +121,11 @@ int tipc_register_media(u32 media_type,
121 } 121 }
122 if ((bearer_priority < TIPC_MIN_LINK_PRI) && 122 if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
123 (bearer_priority > TIPC_MAX_LINK_PRI)) { 123 (bearer_priority > TIPC_MAX_LINK_PRI)) {
124 warn("Media <%s> rejected, illegal priority (%u)\n", name, 124 warn("Media <%s> rejected, illegal priority (%u)\n", name,
125 bearer_priority); 125 bearer_priority);
126 goto exit; 126 goto exit;
127 } 127 }
128 if ((link_tolerance < TIPC_MIN_LINK_TOL) || 128 if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
129 (link_tolerance > TIPC_MAX_LINK_TOL)) { 129 (link_tolerance > TIPC_MAX_LINK_TOL)) {
130 warn("Media <%s> rejected, illegal tolerance (%u)\n", name, 130 warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
131 link_tolerance); 131 link_tolerance);
@@ -219,7 +219,7 @@ struct sk_buff *tipc_media_get_names(void)
219 219
220 read_lock_bh(&tipc_net_lock); 220 read_lock_bh(&tipc_net_lock);
221 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { 221 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
222 tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, 222 tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
223 strlen(m_ptr->name) + 1); 223 strlen(m_ptr->name) + 1);
224 } 224 }
225 read_unlock_bh(&tipc_net_lock); 225 read_unlock_bh(&tipc_net_lock);
@@ -230,11 +230,11 @@ struct sk_buff *tipc_media_get_names(void)
230 * bearer_name_validate - validate & (optionally) deconstruct bearer name 230 * bearer_name_validate - validate & (optionally) deconstruct bearer name
231 * @name - ptr to bearer name string 231 * @name - ptr to bearer name string
232 * @name_parts - ptr to area for bearer name components (or NULL if not needed) 232 * @name_parts - ptr to area for bearer name components (or NULL if not needed)
233 * 233 *
234 * Returns 1 if bearer name is valid, otherwise 0. 234 * Returns 1 if bearer name is valid, otherwise 0.
235 */ 235 */
236 236
237static int bearer_name_validate(const char *name, 237static int bearer_name_validate(const char *name,
238 struct bearer_name *name_parts) 238 struct bearer_name *name_parts)
239{ 239{
240 char name_copy[TIPC_MAX_BEARER_NAME]; 240 char name_copy[TIPC_MAX_BEARER_NAME];
@@ -262,8 +262,8 @@ static int bearer_name_validate(const char *name,
262 262
263 /* validate component parts of bearer name */ 263 /* validate component parts of bearer name */
264 264
265 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || 265 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
266 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || 266 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
267 (strspn(media_name, tipc_alphabet) != (media_len - 1)) || 267 (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
268 (strspn(if_name, tipc_alphabet) != (if_len - 1))) 268 (strspn(if_name, tipc_alphabet) != (if_len - 1)))
269 return 0; 269 return 0;
@@ -336,8 +336,8 @@ struct sk_buff *tipc_bearer_get_names(void)
336 for (j = 0; j < MAX_BEARERS; j++) { 336 for (j = 0; j < MAX_BEARERS; j++) {
337 b_ptr = &tipc_bearers[j]; 337 b_ptr = &tipc_bearers[j];
338 if (b_ptr->active && (b_ptr->media == m_ptr)) { 338 if (b_ptr->active && (b_ptr->media == m_ptr)) {
339 tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, 339 tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
340 b_ptr->publ.name, 340 b_ptr->publ.name,
341 strlen(b_ptr->publ.name) + 1); 341 strlen(b_ptr->publ.name) + 1);
342 } 342 }
343 } 343 }
@@ -401,8 +401,8 @@ void tipc_bearer_lock_push(struct bearer *b_ptr)
401 401
402 402
403/* 403/*
404 * Interrupt enabling new requests after bearer congestion or blocking: 404 * Interrupt enabling new requests after bearer congestion or blocking:
405 * See bearer_send(). 405 * See bearer_send().
406 */ 406 */
407void tipc_continue(struct tipc_bearer *tb_ptr) 407void tipc_continue(struct tipc_bearer *tb_ptr)
408{ 408{
@@ -417,9 +417,9 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
417} 417}
418 418
419/* 419/*
420 * Schedule link for sending of messages after the bearer 420 * Schedule link for sending of messages after the bearer
421 * has been deblocked by 'continue()'. This method is called 421 * has been deblocked by 'continue()'. This method is called
422 * when somebody tries to send a message via this link while 422 * when somebody tries to send a message via this link while
423 * the bearer is congested. 'tipc_net_lock' is in read_lock here 423 * the bearer is congested. 'tipc_net_lock' is in read_lock here
424 * bearer.lock is busy 424 * bearer.lock is busy
425 */ 425 */
@@ -430,9 +430,9 @@ static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_p
430} 430}
431 431
432/* 432/*
433 * Schedule link for sending of messages after the bearer 433 * Schedule link for sending of messages after the bearer
434 * has been deblocked by 'continue()'. This method is called 434 * has been deblocked by 'continue()'. This method is called
435 * when somebody tries to send a message via this link while 435 * when somebody tries to send a message via this link while
436 * the bearer is congested. 'tipc_net_lock' is in read_lock here, 436 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
437 * bearer.lock is free 437 * bearer.lock is free
438 */ 438 */
@@ -468,7 +468,7 @@ int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
468 468
469/** 469/**
470 * tipc_enable_bearer - enable bearer with the given name 470 * tipc_enable_bearer - enable bearer with the given name
471 */ 471 */
472 472
473int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority) 473int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
474{ 474{
@@ -490,7 +490,7 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
490 warn("Bearer <%s> rejected, illegal name\n", name); 490 warn("Bearer <%s> rejected, illegal name\n", name);
491 return -EINVAL; 491 return -EINVAL;
492 } 492 }
493 if (!tipc_addr_domain_valid(bcast_scope) || 493 if (!tipc_addr_domain_valid(bcast_scope) ||
494 !in_scope(bcast_scope, tipc_own_addr)) { 494 !in_scope(bcast_scope, tipc_own_addr)) {
495 warn("Bearer <%s> rejected, illegal broadcast scope\n", name); 495 warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
496 return -EINVAL; 496 return -EINVAL;
@@ -539,7 +539,7 @@ restart:
539 } 539 }
540 } 540 }
541 if (bearer_id >= MAX_BEARERS) { 541 if (bearer_id >= MAX_BEARERS) {
542 warn("Bearer <%s> rejected, bearer limit reached (%u)\n", 542 warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
543 name, MAX_BEARERS); 543 name, MAX_BEARERS);
544 goto failed; 544 goto failed;
545 } 545 }
@@ -612,7 +612,7 @@ int tipc_block_bearer(const char *name)
612 612
613/** 613/**
614 * bearer_disable - 614 * bearer_disable -
615 * 615 *
616 * Note: This routine assumes caller holds tipc_net_lock. 616 * Note: This routine assumes caller holds tipc_net_lock.
617 */ 617 */
618 618
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index c4e7c1c3655b..6a36b6600e6c 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/bearer.h: Include file for TIPC bearer code 2 * net/tipc/bearer.h: Include file for TIPC bearer code
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -58,14 +58,14 @@
58 * @type_id: TIPC media identifier [defined in tipc_bearer.h] 58 * @type_id: TIPC media identifier [defined in tipc_bearer.h]
59 * @name: media name 59 * @name: media name
60 */ 60 */
61 61
62struct media { 62struct media {
63 int (*send_msg)(struct sk_buff *buf, 63 int (*send_msg)(struct sk_buff *buf,
64 struct tipc_bearer *b_ptr, 64 struct tipc_bearer *b_ptr,
65 struct tipc_media_addr *dest); 65 struct tipc_media_addr *dest);
66 int (*enable_bearer)(struct tipc_bearer *b_ptr); 66 int (*enable_bearer)(struct tipc_bearer *b_ptr);
67 void (*disable_bearer)(struct tipc_bearer *b_ptr); 67 void (*disable_bearer)(struct tipc_bearer *b_ptr);
68 char *(*addr2str)(struct tipc_media_addr *a, 68 char *(*addr2str)(struct tipc_media_addr *a,
69 char *str_buf, int str_size); 69 char *str_buf, int str_size);
70 struct tipc_media_addr bcast_addr; 70 struct tipc_media_addr bcast_addr;
71 int bcast; 71 int bcast;
@@ -91,7 +91,7 @@ struct media {
91 * @net_plane: network plane ('A' through 'H') currently associated with bearer 91 * @net_plane: network plane ('A' through 'H') currently associated with bearer
92 * @nodes: indicates which nodes in cluster can be reached through bearer 92 * @nodes: indicates which nodes in cluster can be reached through bearer
93 */ 93 */
94 94
95struct bearer { 95struct bearer {
96 struct tipc_bearer publ; 96 struct tipc_bearer publ;
97 struct media *media; 97 struct media *media;
@@ -131,21 +131,21 @@ void tipc_bearer_lock_push(struct bearer *b_ptr);
131 131
132 132
133/** 133/**
134 * tipc_bearer_send- sends buffer to destination over bearer 134 * tipc_bearer_send- sends buffer to destination over bearer
135 * 135 *
136 * Returns true (1) if successful, or false (0) if unable to send 136 * Returns true (1) if successful, or false (0) if unable to send
137 * 137 *
138 * IMPORTANT: 138 * IMPORTANT:
139 * The media send routine must not alter the buffer being passed in 139 * The media send routine must not alter the buffer being passed in
140 * as it may be needed for later retransmission! 140 * as it may be needed for later retransmission!
141 * 141 *
142 * If the media send routine returns a non-zero value (indicating that 142 * If the media send routine returns a non-zero value (indicating that
143 * it was unable to send the buffer), it must: 143 * it was unable to send the buffer), it must:
144 * 1) mark the bearer as blocked, 144 * 1) mark the bearer as blocked,
145 * 2) call tipc_continue() once the bearer is able to send again. 145 * 2) call tipc_continue() once the bearer is able to send again.
146 * Media types that are unable to meet these two critera must ensure their 146 * Media types that are unable to meet these two critera must ensure their
147 * send routine always returns success -- even if the buffer was not sent -- 147 * send routine always returns success -- even if the buffer was not sent --
148 * and let TIPC's link code deal with the undelivered message. 148 * and let TIPC's link code deal with the undelivered message.
149 */ 149 */
150 150
151static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf, 151static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index b46b5188a9fd..95b373913aa0 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/cluster.c: TIPC cluster management routines 2 * net/tipc/cluster.c: TIPC cluster management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -56,7 +56,7 @@ struct cluster *tipc_cltr_create(u32 addr)
56{ 56{
57 struct _zone *z_ptr; 57 struct _zone *z_ptr;
58 struct cluster *c_ptr; 58 struct cluster *c_ptr;
59 int max_nodes; 59 int max_nodes;
60 60
61 c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC); 61 c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
62 if (c_ptr == NULL) { 62 if (c_ptr == NULL) {
@@ -81,7 +81,7 @@ struct cluster *tipc_cltr_create(u32 addr)
81 tipc_local_nodes = c_ptr->nodes; 81 tipc_local_nodes = c_ptr->nodes;
82 c_ptr->highest_slave = LOWEST_SLAVE - 1; 82 c_ptr->highest_slave = LOWEST_SLAVE - 1;
83 c_ptr->highest_node = 0; 83 c_ptr->highest_node = 0;
84 84
85 z_ptr = tipc_zone_find(tipc_zone(addr)); 85 z_ptr = tipc_zone_find(tipc_zone(addr));
86 if (!z_ptr) { 86 if (!z_ptr) {
87 z_ptr = tipc_zone_create(addr); 87 z_ptr = tipc_zone_create(addr);
@@ -150,7 +150,7 @@ void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr)
150 150
151/** 151/**
152 * tipc_cltr_select_router - select router to a cluster 152 * tipc_cltr_select_router - select router to a cluster
153 * 153 *
154 * Uses deterministic and fair algorithm. 154 * Uses deterministic and fair algorithm.
155 */ 155 */
156 156
@@ -192,7 +192,7 @@ u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
192 192
193/** 193/**
194 * tipc_cltr_select_node - select destination node within a remote cluster 194 * tipc_cltr_select_node - select destination node within a remote cluster
195 * 195 *
196 * Uses deterministic and fair algorithm. 196 * Uses deterministic and fair algorithm.
197 */ 197 */
198 198
@@ -295,7 +295,7 @@ void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
295 msg_set_remote_node(msg, c_ptr->addr); 295 msg_set_remote_node(msg, c_ptr->addr);
296 msg_set_type(msg, SLAVE_ROUTING_TABLE); 296 msg_set_type(msg, SLAVE_ROUTING_TABLE);
297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) { 297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
298 if (c_ptr->nodes[n_num] && 298 if (c_ptr->nodes[n_num] &&
299 tipc_node_has_active_links(c_ptr->nodes[n_num])) { 299 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
300 send = 1; 300 send = 1;
301 msg_set_dataoctet(msg, n_num); 301 msg_set_dataoctet(msg, n_num);
@@ -329,7 +329,7 @@ void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
329 msg_set_remote_node(msg, c_ptr->addr); 329 msg_set_remote_node(msg, c_ptr->addr);
330 msg_set_type(msg, EXT_ROUTING_TABLE); 330 msg_set_type(msg, EXT_ROUTING_TABLE);
331 for (n_num = 1; n_num <= highest; n_num++) { 331 for (n_num = 1; n_num <= highest; n_num++) {
332 if (c_ptr->nodes[n_num] && 332 if (c_ptr->nodes[n_num] &&
333 tipc_node_has_active_links(c_ptr->nodes[n_num])) { 333 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
334 send = 1; 334 send = 1;
335 msg_set_dataoctet(msg, n_num); 335 msg_set_dataoctet(msg, n_num);
@@ -360,7 +360,7 @@ void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
360 msg_set_remote_node(msg, c_ptr->addr); 360 msg_set_remote_node(msg, c_ptr->addr);
361 msg_set_type(msg, LOCAL_ROUTING_TABLE); 361 msg_set_type(msg, LOCAL_ROUTING_TABLE);
362 for (n_num = 1; n_num <= highest; n_num++) { 362 for (n_num = 1; n_num <= highest; n_num++) {
363 if (c_ptr->nodes[n_num] && 363 if (c_ptr->nodes[n_num] &&
364 tipc_node_has_active_links(c_ptr->nodes[n_num])) { 364 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
365 send = 1; 365 send = 1;
366 msg_set_dataoctet(msg, n_num); 366 msg_set_dataoctet(msg, n_num);
@@ -492,7 +492,7 @@ void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
492} 492}
493 493
494/** 494/**
495 * tipc_cltr_multicast - multicast message to local nodes 495 * tipc_cltr_multicast - multicast message to local nodes
496 */ 496 */
497 497
498static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf, 498static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
@@ -554,9 +554,9 @@ void tipc_cltr_broadcast(struct sk_buff *buf)
554 buf_copy = skb_copy(buf, GFP_ATOMIC); 554 buf_copy = skb_copy(buf, GFP_ATOMIC);
555 if (buf_copy == NULL) 555 if (buf_copy == NULL)
556 goto exit; 556 goto exit;
557 msg_set_destnode(buf_msg(buf_copy), 557 msg_set_destnode(buf_msg(buf_copy),
558 n_ptr->addr); 558 n_ptr->addr);
559 tipc_link_send(buf_copy, n_ptr->addr, 559 tipc_link_send(buf_copy, n_ptr->addr,
560 n_ptr->addr); 560 n_ptr->addr);
561 } 561 }
562 } 562 }
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
index 1b4cd309495a..62df074afaec 100644
--- a/net/tipc/cluster.h
+++ b/net/tipc/cluster.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/cluster.h: Include file for TIPC cluster management routines 2 * net/tipc/cluster.h: Include file for TIPC cluster management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -50,7 +50,7 @@
50 * @highest_node: id of highest numbered node within cluster 50 * @highest_node: id of highest numbered node within cluster
51 * @highest_slave: (used for secondary node support) 51 * @highest_slave: (used for secondary node support)
52 */ 52 */
53 53
54struct cluster { 54struct cluster {
55 u32 addr; 55 u32 addr;
56 struct _zone *owner; 56 struct _zone *owner;
diff --git a/net/tipc/config.c b/net/tipc/config.c
index baf55c459c8b..14789a82de53 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/config.c: TIPC configuration management code 2 * net/tipc/config.c: TIPC configuration management code
3 * 3 *
4 * Copyright (c) 2002-2006, Ericsson AB 4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -86,7 +86,7 @@ struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
86 return buf; 86 return buf;
87} 87}
88 88
89int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, 89int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
90 void *tlv_data, int tlv_data_size) 90 void *tlv_data, int tlv_data_size)
91{ 91{
92 struct tlv_desc *tlv = (struct tlv_desc *)buf->tail; 92 struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
@@ -112,7 +112,7 @@ struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
112 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value))); 112 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
113 if (buf) { 113 if (buf) {
114 value_net = htonl(value); 114 value_net = htonl(value);
115 tipc_cfg_append_tlv(buf, tlv_type, &value_net, 115 tipc_cfg_append_tlv(buf, tlv_type, &value_net,
116 sizeof(value_net)); 116 sizeof(value_net));
117 } 117 }
118 return buf; 118 return buf;
@@ -182,7 +182,7 @@ int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
182 182
183static void cfg_cmd_event(struct tipc_cmd_msg *msg, 183static void cfg_cmd_event(struct tipc_cmd_msg *msg,
184 char *data, 184 char *data,
185 u32 sz, 185 u32 sz,
186 struct tipc_portid const *orig) 186 struct tipc_portid const *orig)
187{ 187{
188 int rv = -EINVAL; 188 int rv = -EINVAL;
@@ -192,7 +192,7 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg,
192 192
193 msg->cmd = ntohl(msg->cmd); 193 msg->cmd = ntohl(msg->cmd);
194 194
195 cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect, 195 cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect,
196 data, 0); 196 data, 0);
197 if (ntohl(msg->magic) != TIPC_MAGIC) 197 if (ntohl(msg->magic) != TIPC_MAGIC)
198 goto exit; 198 goto exit;
@@ -295,7 +295,7 @@ static struct sk_buff *cfg_set_own_addr(void)
295 " (cannot change node address once assigned)"); 295 " (cannot change node address once assigned)");
296 tipc_own_addr = addr; 296 tipc_own_addr = addr;
297 297
298 /* 298 /*
299 * Must release all spinlocks before calling start_net() because 299 * Must release all spinlocks before calling start_net() because
300 * Linux version of TIPC calls eth_media_start() which calls 300 * Linux version of TIPC calls eth_media_start() which calls
301 * register_netdevice_notifier() which may block! 301 * register_netdevice_notifier() which may block!
@@ -619,7 +619,7 @@ static void cfg_named_msg_event(void *userdata,
619 struct sk_buff **buf, 619 struct sk_buff **buf,
620 const unchar *msg, 620 const unchar *msg,
621 u32 size, 621 u32 size,
622 u32 importance, 622 u32 importance,
623 struct tipc_portid const *orig, 623 struct tipc_portid const *orig,
624 struct tipc_name_seq const *dest) 624 struct tipc_name_seq const *dest)
625{ 625{
@@ -640,7 +640,7 @@ static void cfg_named_msg_event(void *userdata,
640 /* Generate reply for request (if can't, return request) */ 640 /* Generate reply for request (if can't, return request) */
641 641
642 rep_buf = tipc_cfg_do_cmd(orig->node, 642 rep_buf = tipc_cfg_do_cmd(orig->node,
643 ntohs(req_hdr->tcm_type), 643 ntohs(req_hdr->tcm_type),
644 msg + sizeof(*req_hdr), 644 msg + sizeof(*req_hdr),
645 size - sizeof(*req_hdr), 645 size - sizeof(*req_hdr),
646 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr)); 646 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 7a728f954d84..5cd7cc56c54d 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/config.h: Include file for TIPC configuration service code 2 * net/tipc/config.h: Include file for TIPC configuration service code
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -43,7 +43,7 @@
43#include "link.h" 43#include "link.h"
44 44
45struct sk_buff *tipc_cfg_reply_alloc(int payload_size); 45struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
46int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, 46int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
47 void *tlv_data, int tlv_data_size); 47 void *tlv_data, int tlv_data_size);
48struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value); 48struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value);
49struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string); 49struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
@@ -68,8 +68,8 @@ static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
68 return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string); 68 return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
69} 69}
70 70
71struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, 71struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
72 const void *req_tlv_area, int req_tlv_space, 72 const void *req_tlv_area, int req_tlv_space,
73 int headroom); 73 int headroom);
74 74
75void tipc_cfg_link_event(u32 addr, char *name, int up); 75void tipc_cfg_link_event(u32 addr, char *name, int up);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 6f5b7ee31180..d2d7d32c02c7 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -89,7 +89,7 @@ int tipc_mode = TIPC_NOT_RUNNING;
89int tipc_random; 89int tipc_random;
90atomic_t tipc_user_count = ATOMIC_INIT(0); 90atomic_t tipc_user_count = ATOMIC_INIT(0);
91 91
92const char tipc_alphabet[] = 92const char tipc_alphabet[] =
93 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_."; 93 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
94 94
95/* configurable TIPC parameters */ 95/* configurable TIPC parameters */
@@ -171,13 +171,13 @@ int tipc_core_start(void)
171 get_random_bytes(&tipc_random, sizeof(tipc_random)); 171 get_random_bytes(&tipc_random, sizeof(tipc_random));
172 tipc_mode = TIPC_NODE_MODE; 172 tipc_mode = TIPC_NODE_MODE;
173 173
174 if ((res = tipc_handler_start()) || 174 if ((res = tipc_handler_start()) ||
175 (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions, 175 (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions,
176 tipc_random)) || 176 tipc_random)) ||
177 (res = tipc_reg_start()) || 177 (res = tipc_reg_start()) ||
178 (res = tipc_nametbl_init()) || 178 (res = tipc_nametbl_init()) ||
179 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) || 179 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
180 (res = tipc_k_signal((Handler)tipc_cfg_init, 0)) || 180 (res = tipc_k_signal((Handler)tipc_cfg_init, 0)) ||
181 (res = tipc_netlink_start()) || 181 (res = tipc_netlink_start()) ||
182 (res = tipc_socket_init())) { 182 (res = tipc_socket_init())) {
183 tipc_core_stop(); 183 tipc_core_stop();
@@ -191,7 +191,7 @@ static int __init tipc_init(void)
191 int res; 191 int res;
192 192
193 tipc_log_reinit(CONFIG_TIPC_LOG); 193 tipc_log_reinit(CONFIG_TIPC_LOG);
194 info("Activated (version " TIPC_MOD_VER 194 info("Activated (version " TIPC_MOD_VER
195 " compiled " __DATE__ " " __TIME__ ")\n"); 195 " compiled " __DATE__ " " __TIME__ ")\n");
196 196
197 tipc_own_addr = 0; 197 tipc_own_addr = 0;
@@ -207,9 +207,9 @@ static int __init tipc_init(void)
207 207
208 if ((res = tipc_core_start())) 208 if ((res = tipc_core_start()))
209 err("Unable to start in single node mode\n"); 209 err("Unable to start in single node mode\n");
210 else 210 else
211 info("Started in single node mode\n"); 211 info("Started in single node mode\n");
212 return res; 212 return res;
213} 213}
214 214
215static void __exit tipc_exit(void) 215static void __exit tipc_exit(void)
@@ -268,11 +268,11 @@ EXPORT_SYMBOL(tipc_available_nodes);
268/* TIPC API for external bearers (see tipc_bearer.h) */ 268/* TIPC API for external bearers (see tipc_bearer.h) */
269 269
270EXPORT_SYMBOL(tipc_block_bearer); 270EXPORT_SYMBOL(tipc_block_bearer);
271EXPORT_SYMBOL(tipc_continue); 271EXPORT_SYMBOL(tipc_continue);
272EXPORT_SYMBOL(tipc_disable_bearer); 272EXPORT_SYMBOL(tipc_disable_bearer);
273EXPORT_SYMBOL(tipc_enable_bearer); 273EXPORT_SYMBOL(tipc_enable_bearer);
274EXPORT_SYMBOL(tipc_recv_msg); 274EXPORT_SYMBOL(tipc_recv_msg);
275EXPORT_SYMBOL(tipc_register_media); 275EXPORT_SYMBOL(tipc_register_media);
276 276
277/* TIPC API for external APIs (see tipc_port.h) */ 277/* TIPC API for external APIs (see tipc_port.h) */
278 278
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 4638947c2326..e40ada964d6e 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/core.h: Include file for TIPC global declarations 2 * net/tipc/core.h: Include file for TIPC global declarations
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -54,7 +54,7 @@
54#include <asm/atomic.h> 54#include <asm/atomic.h>
55#include <asm/hardirq.h> 55#include <asm/hardirq.h>
56#include <linux/netdevice.h> 56#include <linux/netdevice.h>
57#include <linux/in.h> 57#include <linux/in.h>
58#include <linux/list.h> 58#include <linux/list.h>
59#include <linux/vmalloc.h> 59#include <linux/vmalloc.h>
60 60
@@ -88,7 +88,7 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
88#define dump(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0) 88#define dump(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
89 89
90 90
91/* 91/*
92 * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer, 92 * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer,
93 * while DBG_OUTPUT is the null print buffer. These defaults can be changed 93 * while DBG_OUTPUT is the null print buffer. These defaults can be changed
94 * here, or on a per .c file basis, by redefining these symbols. The following 94 * here, or on a per .c file basis, by redefining these symbols. The following
@@ -126,9 +126,9 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
126#define dump(fmt,arg...) do {} while (0) 126#define dump(fmt,arg...) do {} while (0)
127 127
128 128
129/* 129/*
130 * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is 130 * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is
131 * the null print buffer. Thes ensures that any system or debug messages 131 * the null print buffer. Thes ensures that any system or debug messages
132 * that are generated without using the above macros are handled correctly. 132 * that are generated without using the above macros are handled correctly.
133 */ 133 */
134 134
@@ -138,10 +138,10 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
138#undef DBG_OUTPUT 138#undef DBG_OUTPUT
139#define DBG_OUTPUT TIPC_NULL 139#define DBG_OUTPUT TIPC_NULL
140 140
141#endif 141#endif
142 142
143 143
144/* 144/*
145 * TIPC-specific error codes 145 * TIPC-specific error codes
146 */ 146 */
147 147
@@ -204,11 +204,11 @@ u32 tipc_k_signal(Handler routine, unsigned long argument);
204 * @timer: pointer to timer structure 204 * @timer: pointer to timer structure
205 * @routine: pointer to routine to invoke when timer expires 205 * @routine: pointer to routine to invoke when timer expires
206 * @argument: value to pass to routine when timer expires 206 * @argument: value to pass to routine when timer expires
207 * 207 *
208 * Timer must be initialized before use (and terminated when no longer needed). 208 * Timer must be initialized before use (and terminated when no longer needed).
209 */ 209 */
210 210
211static inline void k_init_timer(struct timer_list *timer, Handler routine, 211static inline void k_init_timer(struct timer_list *timer, Handler routine,
212 unsigned long argument) 212 unsigned long argument)
213{ 213{
214 dbg("initializing timer %p\n", timer); 214 dbg("initializing timer %p\n", timer);
@@ -221,13 +221,13 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine,
221 * k_start_timer - start a timer 221 * k_start_timer - start a timer
222 * @timer: pointer to timer structure 222 * @timer: pointer to timer structure
223 * @msec: time to delay (in ms) 223 * @msec: time to delay (in ms)
224 * 224 *
225 * Schedules a previously initialized timer for later execution. 225 * Schedules a previously initialized timer for later execution.
226 * If timer is already running, the new timeout overrides the previous request. 226 * If timer is already running, the new timeout overrides the previous request.
227 * 227 *
228 * To ensure the timer doesn't expire before the specified delay elapses, 228 * To ensure the timer doesn't expire before the specified delay elapses,
229 * the amount of delay is rounded up when converting to the jiffies 229 * the amount of delay is rounded up when converting to the jiffies
230 * then an additional jiffy is added to account for the fact that 230 * then an additional jiffy is added to account for the fact that
231 * the starting time may be in the middle of the current jiffy. 231 * the starting time may be in the middle of the current jiffy.
232 */ 232 */
233 233
@@ -240,10 +240,10 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
240/** 240/**
241 * k_cancel_timer - cancel a timer 241 * k_cancel_timer - cancel a timer
242 * @timer: pointer to timer structure 242 * @timer: pointer to timer structure
243 * 243 *
244 * Cancels a previously initialized timer. 244 * Cancels a previously initialized timer.
245 * Can be called safely even if the timer is already inactive. 245 * Can be called safely even if the timer is already inactive.
246 * 246 *
247 * WARNING: Must not be called when holding locks required by the timer's 247 * WARNING: Must not be called when holding locks required by the timer's
248 * timeout routine, otherwise deadlock can occur on SMP systems! 248 * timeout routine, otherwise deadlock can occur on SMP systems!
249 */ 249 */
@@ -257,11 +257,11 @@ static inline void k_cancel_timer(struct timer_list *timer)
257/** 257/**
258 * k_term_timer - terminate a timer 258 * k_term_timer - terminate a timer
259 * @timer: pointer to timer structure 259 * @timer: pointer to timer structure
260 * 260 *
261 * Prevents further use of a previously initialized timer. 261 * Prevents further use of a previously initialized timer.
262 * 262 *
263 * WARNING: Caller must ensure timer isn't currently running. 263 * WARNING: Caller must ensure timer isn't currently running.
264 * 264 *
265 * (Do not "enhance" this routine to automatically cancel an active timer, 265 * (Do not "enhance" this routine to automatically cancel an active timer,
266 * otherwise deadlock can arise when a timeout routine calls k_term_timer.) 266 * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
267 */ 267 */
@@ -302,7 +302,7 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
302 * @size: message size (including TIPC header) 302 * @size: message size (including TIPC header)
303 * 303 *
304 * Returns a new buffer with data pointers set to the specified size. 304 * Returns a new buffer with data pointers set to the specified size.
305 * 305 *
306 * NOTE: Headroom is reserved to allow prepending of a data link header. 306 * NOTE: Headroom is reserved to allow prepending of a data link header.
307 * There may also be unrequested tailroom present at the buffer's end. 307 * There may also be unrequested tailroom present at the buffer's end.
308 */ 308 */
@@ -334,4 +334,4 @@ static inline void buf_discard(struct sk_buff *skb)
334 kfree_skb(skb); 334 kfree_skb(skb);
335} 335}
336 336
337#endif 337#endif
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 627f99b7afdf..e809d2a2ce06 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/dbg.c: TIPC print buffer routines for debugging 2 * net/tipc/dbg.c: TIPC print buffer routines for debugging
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -123,34 +123,34 @@ int tipc_printbuf_empty(struct print_buf *pb)
123/** 123/**
124 * tipc_printbuf_validate - check for print buffer overflow 124 * tipc_printbuf_validate - check for print buffer overflow
125 * @pb: pointer to print buffer structure 125 * @pb: pointer to print buffer structure
126 * 126 *
127 * Verifies that a print buffer has captured all data written to it. 127 * Verifies that a print buffer has captured all data written to it.
128 * If data has been lost, linearize buffer and prepend an error message 128 * If data has been lost, linearize buffer and prepend an error message
129 * 129 *
130 * Returns length of print buffer data string (including trailing NUL) 130 * Returns length of print buffer data string (including trailing NUL)
131 */ 131 */
132 132
133int tipc_printbuf_validate(struct print_buf *pb) 133int tipc_printbuf_validate(struct print_buf *pb)
134{ 134{
135 char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n"; 135 char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
136 char *cp_buf; 136 char *cp_buf;
137 struct print_buf cb; 137 struct print_buf cb;
138 138
139 if (!pb->buf) 139 if (!pb->buf)
140 return 0; 140 return 0;
141 141
142 if (pb->buf[pb->size - 1] == 0) { 142 if (pb->buf[pb->size - 1] == 0) {
143 cp_buf = kmalloc(pb->size, GFP_ATOMIC); 143 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
144 if (cp_buf != NULL){ 144 if (cp_buf != NULL){
145 tipc_printbuf_init(&cb, cp_buf, pb->size); 145 tipc_printbuf_init(&cb, cp_buf, pb->size);
146 tipc_printbuf_move(&cb, pb); 146 tipc_printbuf_move(&cb, pb);
147 tipc_printbuf_move(pb, &cb); 147 tipc_printbuf_move(pb, &cb);
148 kfree(cp_buf); 148 kfree(cp_buf);
149 memcpy(pb->buf, err, strlen(err)); 149 memcpy(pb->buf, err, strlen(err));
150 } else { 150 } else {
151 tipc_printbuf_reset(pb); 151 tipc_printbuf_reset(pb);
152 tipc_printf(pb, err); 152 tipc_printf(pb, err);
153 } 153 }
154 } 154 }
155 return (pb->crs - pb->buf + 1); 155 return (pb->crs - pb->buf + 1);
156} 156}
@@ -159,7 +159,7 @@ int tipc_printbuf_validate(struct print_buf *pb)
159 * tipc_printbuf_move - move print buffer contents to another print buffer 159 * tipc_printbuf_move - move print buffer contents to another print buffer
160 * @pb_to: pointer to destination print buffer structure 160 * @pb_to: pointer to destination print buffer structure
161 * @pb_from: pointer to source print buffer structure 161 * @pb_from: pointer to source print buffer structure
162 * 162 *
163 * Current contents of destination print buffer (if any) are discarded. 163 * Current contents of destination print buffer (if any) are discarded.
164 * Source print buffer becomes empty if a successful move occurs. 164 * Source print buffer becomes empty if a successful move occurs.
165 */ 165 */
@@ -234,13 +234,13 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
234 pb->crs = pb->buf + pb->size - 1; 234 pb->crs = pb->buf + pb->size - 1;
235 } else { 235 } else {
236 strcpy(pb->buf, print_string + chars_left); 236 strcpy(pb->buf, print_string + chars_left);
237 save_char = print_string[chars_left]; 237 save_char = print_string[chars_left];
238 print_string[chars_left] = 0; 238 print_string[chars_left] = 0;
239 strcpy(pb->crs, print_string); 239 strcpy(pb->crs, print_string);
240 print_string[chars_left] = save_char; 240 print_string[chars_left] = save_char;
241 pb->crs = pb->buf + chars_to_add - chars_left; 241 pb->crs = pb->buf + chars_to_add - chars_left;
242 } 242 }
243 } 243 }
244 pb_next = pb->next; 244 pb_next = pb->next;
245 pb->next = NULL; 245 pb->next = NULL;
246 pb = pb_next; 246 pb = pb_next;
@@ -249,7 +249,7 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
249} 249}
250 250
251/** 251/**
252 * TIPC_TEE - perform next output operation on both print buffers 252 * TIPC_TEE - perform next output operation on both print buffers
253 * @b0: pointer to chain of print buffers (may be NULL) 253 * @b0: pointer to chain of print buffers (may be NULL)
254 * @b1: pointer to print buffer to add to chain 254 * @b1: pointer to print buffer to add to chain
255 * 255 *
@@ -350,7 +350,7 @@ void tipc_dump(struct print_buf *pb, const char *fmt, ...)
350} 350}
351 351
352/** 352/**
353 * tipc_log_stop - free up TIPC log print buffer 353 * tipc_log_stop - free up TIPC log print buffer
354 */ 354 */
355 355
356void tipc_log_stop(void) 356void tipc_log_stop(void)
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
index 467c0bc78a79..c01b085000e0 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/dbg.h: Include file for TIPC print buffer routines 2 * net/tipc/dbg.h: Include file for TIPC print buffer routines
3 * 3 *
4 * Copyright (c) 1997-2006, Ericsson AB 4 * Copyright (c) 1997-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 3b0cd12f37da..5d643e5721eb 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/discover.c 2 * net/tipc/discover.c
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -56,10 +56,10 @@
56#define CHECK_LINK_COUNT 306 56#define CHECK_LINK_COUNT 306
57#endif 57#endif
58 58
59/* 59/*
60 * TODO: Most of the inter-cluster setup stuff should be 60 * TODO: Most of the inter-cluster setup stuff should be
61 * rewritten, and be made conformant with specification. 61 * rewritten, and be made conformant with specification.
62 */ 62 */
63 63
64 64
65/** 65/**
@@ -80,10 +80,10 @@ struct link_req {
80 80
81 81
82#if 0 82#if 0
83int disc_create_link(const struct tipc_link_create *argv) 83int disc_create_link(const struct tipc_link_create *argv)
84{ 84{
85 /* 85 /*
86 * Code for inter cluster link setup here 86 * Code for inter cluster link setup here
87 */ 87 */
88 return TIPC_OK; 88 return TIPC_OK;
89} 89}
@@ -93,16 +93,16 @@ int disc_create_link(const struct tipc_link_create *argv)
93 * disc_lost_link(): A link has lost contact 93 * disc_lost_link(): A link has lost contact
94 */ 94 */
95 95
96void tipc_disc_link_event(u32 addr, char *name, int up) 96void tipc_disc_link_event(u32 addr, char *name, int up)
97{ 97{
98 if (in_own_cluster(addr)) 98 if (in_own_cluster(addr))
99 return; 99 return;
100 /* 100 /*
101 * Code for inter cluster link setup here 101 * Code for inter cluster link setup here
102 */ 102 */
103} 103}
104 104
105/** 105/**
106 * tipc_disc_init_msg - initialize a link setup message 106 * tipc_disc_init_msg - initialize a link setup message
107 * @type: message type (request or response) 107 * @type: message type (request or response)
108 * @req_links: number of links associated with message 108 * @req_links: number of links associated with message
@@ -210,7 +210,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
210 dbg("creating link\n"); 210 dbg("creating link\n");
211 link = tipc_link_create(b_ptr, orig, &media_addr); 211 link = tipc_link_create(b_ptr, orig, &media_addr);
212 if (!link) { 212 if (!link) {
213 spin_unlock_bh(&n_ptr->lock); 213 spin_unlock_bh(&n_ptr->lock);
214 return; 214 return;
215 } 215 }
216 } 216 }
@@ -224,10 +224,10 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
224 warn("Resetting link <%s>, peer interface address changed\n", 224 warn("Resetting link <%s>, peer interface address changed\n",
225 link->name); 225 link->name);
226 memcpy(addr, &media_addr, sizeof(*addr)); 226 memcpy(addr, &media_addr, sizeof(*addr));
227 tipc_link_reset(link); 227 tipc_link_reset(link);
228 } 228 }
229 link_fully_up = (link->state == WORKING_WORKING); 229 link_fully_up = (link->state == WORKING_WORKING);
230 spin_unlock_bh(&n_ptr->lock); 230 spin_unlock_bh(&n_ptr->lock);
231 if ((type == DSC_RESP_MSG) || link_fully_up) 231 if ((type == DSC_RESP_MSG) || link_fully_up)
232 return; 232 return;
233 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr); 233 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
@@ -244,23 +244,23 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
244 * @req: ptr to link request structure 244 * @req: ptr to link request structure
245 */ 245 */
246 246
247void tipc_disc_stop_link_req(struct link_req *req) 247void tipc_disc_stop_link_req(struct link_req *req)
248{ 248{
249 if (!req) 249 if (!req)
250 return; 250 return;
251 251
252 k_cancel_timer(&req->timer); 252 k_cancel_timer(&req->timer);
253 k_term_timer(&req->timer); 253 k_term_timer(&req->timer);
254 buf_discard(req->buf); 254 buf_discard(req->buf);
255 kfree(req); 255 kfree(req);
256} 256}
257 257
258/** 258/**
259 * tipc_disc_update_link_req - update frequency of periodic link setup requests 259 * tipc_disc_update_link_req - update frequency of periodic link setup requests
260 * @req: ptr to link request structure 260 * @req: ptr to link request structure
261 */ 261 */
262 262
263void tipc_disc_update_link_req(struct link_req *req) 263void tipc_disc_update_link_req(struct link_req *req)
264{ 264{
265 if (!req) 265 if (!req)
266 return; 266 return;
@@ -278,16 +278,16 @@ void tipc_disc_update_link_req(struct link_req *req)
278 } else { 278 } else {
279 /* leave timer "as is" if haven't yet reached a "normal" rate */ 279 /* leave timer "as is" if haven't yet reached a "normal" rate */
280 } 280 }
281} 281}
282 282
283/** 283/**
284 * disc_timeout - send a periodic link setup request 284 * disc_timeout - send a periodic link setup request
285 * @req: ptr to link request structure 285 * @req: ptr to link request structure
286 * 286 *
287 * Called whenever a link setup request timer associated with a bearer expires. 287 * Called whenever a link setup request timer associated with a bearer expires.
288 */ 288 */
289 289
290static void disc_timeout(struct link_req *req) 290static void disc_timeout(struct link_req *req)
291{ 291{
292 spin_lock_bh(&req->bearer->publ.lock); 292 spin_lock_bh(&req->bearer->publ.lock);
293 293
@@ -300,7 +300,7 @@ static void disc_timeout(struct link_req *req)
300 req->timer_intv *= 2; 300 req->timer_intv *= 2;
301 if (req->timer_intv > TIPC_LINK_REQ_FAST) 301 if (req->timer_intv > TIPC_LINK_REQ_FAST)
302 req->timer_intv = TIPC_LINK_REQ_FAST; 302 req->timer_intv = TIPC_LINK_REQ_FAST;
303 if ((req->timer_intv == TIPC_LINK_REQ_FAST) && 303 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
304 (req->bearer->nodes.count)) 304 (req->bearer->nodes.count))
305 req->timer_intv = TIPC_LINK_REQ_SLOW; 305 req->timer_intv = TIPC_LINK_REQ_SLOW;
306 } 306 }
@@ -315,14 +315,14 @@ static void disc_timeout(struct link_req *req)
315 * @dest: destination address for request messages 315 * @dest: destination address for request messages
316 * @dest_domain: network domain of node(s) which should respond to message 316 * @dest_domain: network domain of node(s) which should respond to message
317 * @req_links: max number of desired links 317 * @req_links: max number of desired links
318 * 318 *
319 * Returns pointer to link request structure, or NULL if unable to create. 319 * Returns pointer to link request structure, or NULL if unable to create.
320 */ 320 */
321 321
322struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr, 322struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
323 const struct tipc_media_addr *dest, 323 const struct tipc_media_addr *dest,
324 u32 dest_domain, 324 u32 dest_domain,
325 u32 req_links) 325 u32 req_links)
326{ 326{
327 struct link_req *req; 327 struct link_req *req;
328 328
@@ -342,5 +342,5 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
342 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req); 342 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
343 k_start_timer(&req->timer, req->timer_intv); 343 k_start_timer(&req->timer, req->timer_intv);
344 return req; 344 return req;
345} 345}
346 346
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 0454fd1ae7f3..9fd7587b143a 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -41,7 +41,7 @@
41 41
42struct link_req; 42struct link_req;
43 43
44struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr, 44struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
45 const struct tipc_media_addr *dest, 45 const struct tipc_media_addr *dest,
46 u32 dest_domain, 46 u32 dest_domain,
47 u32 req_links); 47 u32 req_links);
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 682da4a28041..9be4839e32c5 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2006, Ericsson AB 4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -50,7 +50,7 @@
50 * @dev: ptr to associated Ethernet network device 50 * @dev: ptr to associated Ethernet network device
51 * @tipc_packet_type: used in binding TIPC to Ethernet driver 51 * @tipc_packet_type: used in binding TIPC to Ethernet driver
52 */ 52 */
53 53
54struct eth_bearer { 54struct eth_bearer {
55 struct tipc_bearer *bearer; 55 struct tipc_bearer *bearer;
56 struct net_device *dev; 56 struct net_device *dev;
@@ -62,10 +62,10 @@ static int eth_started = 0;
62static struct notifier_block notifier; 62static struct notifier_block notifier;
63 63
64/** 64/**
65 * send_msg - send a TIPC message out over an Ethernet interface 65 * send_msg - send a TIPC message out over an Ethernet interface
66 */ 66 */
67 67
68static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, 68static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
69 struct tipc_media_addr *dest) 69 struct tipc_media_addr *dest)
70{ 70{
71 struct sk_buff *clone; 71 struct sk_buff *clone;
@@ -76,7 +76,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
76 clone->nh.raw = clone->data; 76 clone->nh.raw = clone->data;
77 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev; 77 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
78 clone->dev = dev; 78 clone->dev = dev;
79 dev->hard_header(clone, dev, ETH_P_TIPC, 79 dev->hard_header(clone, dev, ETH_P_TIPC,
80 &dest->dev_addr.eth_addr, 80 &dest->dev_addr.eth_addr,
81 dev->dev_addr, clone->len); 81 dev->dev_addr, clone->len);
82 dev_queue_xmit(clone); 82 dev_queue_xmit(clone);
@@ -86,12 +86,12 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
86 86
87/** 87/**
88 * recv_msg - handle incoming TIPC message from an Ethernet interface 88 * recv_msg - handle incoming TIPC message from an Ethernet interface
89 * 89 *
90 * Routine truncates any Ethernet padding/CRC appended to the message, 90 * Routine truncates any Ethernet padding/CRC appended to the message,
91 * and ensures message size matches actual length 91 * and ensures message size matches actual length
92 */ 92 */
93 93
94static int recv_msg(struct sk_buff *buf, struct net_device *dev, 94static int recv_msg(struct sk_buff *buf, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev) 95 struct packet_type *pt, struct net_device *orig_dev)
96{ 96{
97 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv; 97 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
@@ -99,14 +99,14 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
99 99
100 if (likely(eb_ptr->bearer)) { 100 if (likely(eb_ptr->bearer)) {
101 if (likely(!dev->promiscuity) || 101 if (likely(!dev->promiscuity) ||
102 !memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) || 102 !memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) ||
103 !memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) { 103 !memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) {
104 size = msg_size((struct tipc_msg *)buf->data); 104 size = msg_size((struct tipc_msg *)buf->data);
105 skb_trim(buf, size); 105 skb_trim(buf, size);
106 if (likely(buf->len == size)) { 106 if (likely(buf->len == size)) {
107 buf->next = NULL; 107 buf->next = NULL;
108 tipc_recv_msg(buf, eb_ptr->bearer); 108 tipc_recv_msg(buf, eb_ptr->bearer);
109 return TIPC_OK; 109 return TIPC_OK;
110 } 110 }
111 } 111 }
112 } 112 }
@@ -115,7 +115,7 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
115} 115}
116 116
117/** 117/**
118 * enable_bearer - attach TIPC bearer to an Ethernet interface 118 * enable_bearer - attach TIPC bearer to an Ethernet interface
119 */ 119 */
120 120
121static int enable_bearer(struct tipc_bearer *tb_ptr) 121static int enable_bearer(struct tipc_bearer *tb_ptr)
@@ -127,7 +127,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
127 127
128 /* Find device with specified name */ 128 /* Find device with specified name */
129 129
130 while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) { 130 while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) {
131 dev = dev->next; 131 dev = dev->next;
132 } 132 }
133 if (!dev) 133 if (!dev)
@@ -154,14 +154,14 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
154 eb_ptr->bearer = tb_ptr; 154 eb_ptr->bearer = tb_ptr;
155 tb_ptr->usr_handle = (void *)eb_ptr; 155 tb_ptr->usr_handle = (void *)eb_ptr;
156 tb_ptr->mtu = dev->mtu; 156 tb_ptr->mtu = dev->mtu;
157 tb_ptr->blocked = 0; 157 tb_ptr->blocked = 0;
158 tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH); 158 tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
159 memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN); 159 memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN);
160 return 0; 160 return 0;
161} 161}
162 162
163/** 163/**
164 * disable_bearer - detach TIPC bearer from an Ethernet interface 164 * disable_bearer - detach TIPC bearer from an Ethernet interface
165 * 165 *
166 * We really should do dev_remove_pack() here, but this function can not be 166 * We really should do dev_remove_pack() here, but this function can not be
167 * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away 167 * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
@@ -176,11 +176,11 @@ static void disable_bearer(struct tipc_bearer *tb_ptr)
176/** 176/**
177 * recv_notification - handle device updates from OS 177 * recv_notification - handle device updates from OS
178 * 178 *
179 * Change the state of the Ethernet bearer (if any) associated with the 179 * Change the state of the Ethernet bearer (if any) associated with the
180 * specified device. 180 * specified device.
181 */ 181 */
182 182
183static int recv_notification(struct notifier_block *nb, unsigned long evt, 183static int recv_notification(struct notifier_block *nb, unsigned long evt,
184 void *dv) 184 void *dv)
185{ 185{
186 struct net_device *dev = (struct net_device *)dv; 186 struct net_device *dev = (struct net_device *)dv;
@@ -194,7 +194,7 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
194 if (!eb_ptr->bearer) 194 if (!eb_ptr->bearer)
195 return NOTIFY_DONE; /* bearer had been disabled */ 195 return NOTIFY_DONE; /* bearer had been disabled */
196 196
197 eb_ptr->bearer->mtu = dev->mtu; 197 eb_ptr->bearer->mtu = dev->mtu;
198 198
199 switch (evt) { 199 switch (evt) {
200 case NETDEV_CHANGE: 200 case NETDEV_CHANGE:
@@ -210,12 +210,12 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
210 tipc_block_bearer(eb_ptr->bearer->name); 210 tipc_block_bearer(eb_ptr->bearer->name);
211 break; 211 break;
212 case NETDEV_CHANGEMTU: 212 case NETDEV_CHANGEMTU:
213 case NETDEV_CHANGEADDR: 213 case NETDEV_CHANGEADDR:
214 tipc_block_bearer(eb_ptr->bearer->name); 214 tipc_block_bearer(eb_ptr->bearer->name);
215 tipc_continue(eb_ptr->bearer); 215 tipc_continue(eb_ptr->bearer);
216 break; 216 break;
217 case NETDEV_UNREGISTER: 217 case NETDEV_UNREGISTER:
218 case NETDEV_CHANGENAME: 218 case NETDEV_CHANGENAME:
219 tipc_disable_bearer(eb_ptr->bearer->name); 219 tipc_disable_bearer(eb_ptr->bearer->name);
220 break; 220 break;
221 } 221 }
@@ -227,7 +227,7 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
227 */ 227 */
228 228
229static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) 229static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
230{ 230{
231 unchar *addr = (unchar *)&a->dev_addr; 231 unchar *addr = (unchar *)&a->dev_addr;
232 232
233 if (str_size < 18) 233 if (str_size < 18)
@@ -246,7 +246,7 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
246 */ 246 */
247 247
248int tipc_eth_media_start(void) 248int tipc_eth_media_start(void)
249{ 249{
250 struct tipc_media_addr bcast_addr; 250 struct tipc_media_addr bcast_addr;
251 int res; 251 int res;
252 252
@@ -259,8 +259,8 @@ int tipc_eth_media_start(void)
259 memset(eth_bearers, 0, sizeof(eth_bearers)); 259 memset(eth_bearers, 0, sizeof(eth_bearers));
260 260
261 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth", 261 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
262 enable_bearer, disable_bearer, send_msg, 262 enable_bearer, disable_bearer, send_msg,
263 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY, 263 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
264 ETH_LINK_TOLERANCE, ETH_LINK_WINDOW); 264 ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
265 if (res) 265 if (res)
266 return res; 266 return res;
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index eb80778d6d9c..e1dcf663f8a6 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/handler.c: TIPC signal handling 2 * net/tipc/handler.c: TIPC signal handling
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -95,7 +95,7 @@ static void process_signal_queue(unsigned long dummy)
95 95
96int tipc_handler_start(void) 96int tipc_handler_start(void)
97{ 97{
98 tipc_queue_item_cache = 98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item), 99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
100 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 100 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
101 if (!tipc_queue_item_cache) 101 if (!tipc_queue_item_cache)
@@ -110,7 +110,7 @@ int tipc_handler_start(void)
110void tipc_handler_stop(void) 110void tipc_handler_stop(void)
111{ 111{
112 struct list_head *l, *n; 112 struct list_head *l, *n;
113 struct queue_item *item; 113 struct queue_item *item;
114 114
115 if (!handler_enabled) 115 if (!handler_enabled)
116 return; 116 return;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1bb983c8130b..71c2f2fd405c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/link.c: TIPC link code 2 * net/tipc/link.c: TIPC link code
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -50,29 +50,29 @@
50#include "bcast.h" 50#include "bcast.h"
51 51
52 52
53/* 53/*
54 * Limit for deferred reception queue: 54 * Limit for deferred reception queue:
55 */ 55 */
56 56
57#define DEF_QUEUE_LIMIT 256u 57#define DEF_QUEUE_LIMIT 256u
58 58
59/* 59/*
60 * Link state events: 60 * Link state events:
61 */ 61 */
62 62
63#define STARTING_EVT 856384768 /* link processing trigger */ 63#define STARTING_EVT 856384768 /* link processing trigger */
64#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 64#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
65#define TIMEOUT_EVT 560817u /* link timer expired */ 65#define TIMEOUT_EVT 560817u /* link timer expired */
66 66
67/* 67/*
68 * The following two 'message types' is really just implementation 68 * The following two 'message types' is really just implementation
69 * data conveniently stored in the message header. 69 * data conveniently stored in the message header.
70 * They must not be considered part of the protocol 70 * They must not be considered part of the protocol
71 */ 71 */
72#define OPEN_MSG 0 72#define OPEN_MSG 0
73#define CLOSED_MSG 1 73#define CLOSED_MSG 1
74 74
75/* 75/*
76 * State value stored in 'exp_msg_count' 76 * State value stored in 'exp_msg_count'
77 */ 77 */
78 78
@@ -97,7 +97,7 @@ struct link_name {
97 97
98/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */ 98/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
99 99
100/** 100/**
101 * struct link_event - link up/down event notification 101 * struct link_event - link up/down event notification
102 */ 102 */
103 103
@@ -121,7 +121,7 @@ static int link_send_sections_long(struct port *sender,
121static void link_check_defragm_bufs(struct link *l_ptr); 121static void link_check_defragm_bufs(struct link *l_ptr);
122static void link_state_event(struct link *l_ptr, u32 event); 122static void link_state_event(struct link *l_ptr, u32 event);
123static void link_reset_statistics(struct link *l_ptr); 123static void link_reset_statistics(struct link *l_ptr);
124static void link_print(struct link *l_ptr, struct print_buf *buf, 124static void link_print(struct link *l_ptr, struct print_buf *buf,
125 const char *str); 125 const char *str);
126 126
127/* 127/*
@@ -136,13 +136,13 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
136 * 136 *
137 * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size 137 * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
138 * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0, 138 * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0,
139 * the dbg_link_XXX() routines simply send their output to the standard 139 * the dbg_link_XXX() routines simply send their output to the standard
140 * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful 140 * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
141 * when there is only a single link in the system being debugged. 141 * when there is only a single link in the system being debugged.
142 * 142 *
143 * Notes: 143 * Notes:
144 * - When enabled, LINK_LOG_BUF_SIZE should be set to at least TIPC_PB_MIN_SIZE 144 * - When enabled, LINK_LOG_BUF_SIZE should be set to at least TIPC_PB_MIN_SIZE
145 * - "l_ptr" must be valid when using dbg_link_XXX() macros 145 * - "l_ptr" must be valid when using dbg_link_XXX() macros
146 */ 146 */
147 147
148#define LINK_LOG_BUF_SIZE 0 148#define LINK_LOG_BUF_SIZE 0
@@ -222,18 +222,18 @@ static u32 link_max_pkt(struct link *l_ptr)
222static void link_init_max_pkt(struct link *l_ptr) 222static void link_init_max_pkt(struct link *l_ptr)
223{ 223{
224 u32 max_pkt; 224 u32 max_pkt;
225 225
226 max_pkt = (l_ptr->b_ptr->publ.mtu & ~3); 226 max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
227 if (max_pkt > MAX_MSG_SIZE) 227 if (max_pkt > MAX_MSG_SIZE)
228 max_pkt = MAX_MSG_SIZE; 228 max_pkt = MAX_MSG_SIZE;
229 229
230 l_ptr->max_pkt_target = max_pkt; 230 l_ptr->max_pkt_target = max_pkt;
231 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) 231 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
232 l_ptr->max_pkt = l_ptr->max_pkt_target; 232 l_ptr->max_pkt = l_ptr->max_pkt_target;
233 else 233 else
234 l_ptr->max_pkt = MAX_PKT_DEFAULT; 234 l_ptr->max_pkt = MAX_PKT_DEFAULT;
235 235
236 l_ptr->max_pkt_probes = 0; 236 l_ptr->max_pkt_probes = 0;
237} 237}
238 238
239static u32 link_next_sent(struct link *l_ptr) 239static u32 link_next_sent(struct link *l_ptr)
@@ -269,7 +269,7 @@ int tipc_link_is_active(struct link *l_ptr)
269 * link_name_validate - validate & (optionally) deconstruct link name 269 * link_name_validate - validate & (optionally) deconstruct link name
270 * @name - ptr to link name string 270 * @name - ptr to link name string
271 * @name_parts - ptr to area for link name components (or NULL if not needed) 271 * @name_parts - ptr to area for link name components (or NULL if not needed)
272 * 272 *
273 * Returns 1 if link name is valid, otherwise 0. 273 * Returns 1 if link name is valid, otherwise 0.
274 */ 274 */
275 275
@@ -317,8 +317,8 @@ static int link_name_validate(const char *name, struct link_name *name_parts)
317 &z_peer, &c_peer, &n_peer, &dummy) != 3) || 317 &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
318 (z_local > 255) || (c_local > 4095) || (n_local > 4095) || 318 (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
319 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) || 319 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
320 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || 320 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
321 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) || 321 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
322 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) || 322 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
323 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1))) 323 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
324 return 0; 324 return 0;
@@ -337,7 +337,7 @@ static int link_name_validate(const char *name, struct link_name *name_parts)
337/** 337/**
338 * link_timeout - handle expiration of link timer 338 * link_timeout - handle expiration of link timer
339 * @l_ptr: pointer to link 339 * @l_ptr: pointer to link
340 * 340 *
341 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict 341 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
342 * with tipc_link_delete(). (There is no risk that the node will be deleted by 342 * with tipc_link_delete(). (There is no risk that the node will be deleted by
343 * another thread because tipc_link_delete() always cancels the link timer before 343 * another thread because tipc_link_delete() always cancels the link timer before
@@ -406,7 +406,7 @@ static void link_set_timer(struct link *l_ptr, u32 time)
406 * @b_ptr: pointer to associated bearer 406 * @b_ptr: pointer to associated bearer
407 * @peer: network address of node at other end of link 407 * @peer: network address of node at other end of link
408 * @media_addr: media address to use when sending messages over link 408 * @media_addr: media address to use when sending messages over link
409 * 409 *
410 * Returns pointer to link. 410 * Returns pointer to link.
411 */ 411 */
412 412
@@ -427,7 +427,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
427 if_name = strchr(b_ptr->publ.name, ':') + 1; 427 if_name = strchr(b_ptr->publ.name, ':') + 1;
428 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:", 428 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
429 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 429 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
430 tipc_node(tipc_own_addr), 430 tipc_node(tipc_own_addr),
431 if_name, 431 if_name,
432 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 432 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
433 /* note: peer i/f is appended to link name by reset/activate */ 433 /* note: peer i/f is appended to link name by reset/activate */
@@ -478,17 +478,17 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
478 478
479 dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n", 479 dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
480 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit); 480 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
481 481
482 return l_ptr; 482 return l_ptr;
483} 483}
484 484
485/** 485/**
486 * tipc_link_delete - delete a link 486 * tipc_link_delete - delete a link
487 * @l_ptr: pointer to link 487 * @l_ptr: pointer to link
488 * 488 *
489 * Note: 'tipc_net_lock' is write_locked, bearer is locked. 489 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
490 * This routine must not grab the node lock until after link timer cancellation 490 * This routine must not grab the node lock until after link timer cancellation
491 * to avoid a potential deadlock situation. 491 * to avoid a potential deadlock situation.
492 */ 492 */
493 493
494void tipc_link_delete(struct link *l_ptr) 494void tipc_link_delete(struct link *l_ptr)
@@ -501,7 +501,7 @@ void tipc_link_delete(struct link *l_ptr)
501 dbg("tipc_link_delete()\n"); 501 dbg("tipc_link_delete()\n");
502 502
503 k_cancel_timer(&l_ptr->timer); 503 k_cancel_timer(&l_ptr->timer);
504 504
505 tipc_node_lock(l_ptr->owner); 505 tipc_node_lock(l_ptr->owner);
506 tipc_link_reset(l_ptr); 506 tipc_link_reset(l_ptr);
507 tipc_node_detach_link(l_ptr->owner, l_ptr); 507 tipc_node_detach_link(l_ptr->owner, l_ptr);
@@ -521,12 +521,12 @@ void tipc_link_start(struct link *l_ptr)
521} 521}
522 522
523/** 523/**
524 * link_schedule_port - schedule port for deferred sending 524 * link_schedule_port - schedule port for deferred sending
525 * @l_ptr: pointer to link 525 * @l_ptr: pointer to link
526 * @origport: reference to sending port 526 * @origport: reference to sending port
527 * @sz: amount of data to be sent 527 * @sz: amount of data to be sent
528 * 528 *
529 * Schedules port for renewed sending of messages after link congestion 529 * Schedules port for renewed sending of messages after link congestion
530 * has abated. 530 * has abated.
531 */ 531 */
532 532
@@ -567,7 +567,7 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
567 return; 567 return;
568 if (link_congested(l_ptr)) 568 if (link_congested(l_ptr))
569 goto exit; 569 goto exit;
570 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 570 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
571 wait_list) { 571 wait_list) {
572 if (win <= 0) 572 if (win <= 0)
573 break; 573 break;
@@ -584,7 +584,7 @@ exit:
584 spin_unlock_bh(&tipc_port_list_lock); 584 spin_unlock_bh(&tipc_port_list_lock);
585} 585}
586 586
587/** 587/**
588 * link_release_outqueue - purge link's outbound message queue 588 * link_release_outqueue - purge link's outbound message queue
589 * @l_ptr: pointer to link 589 * @l_ptr: pointer to link
590 */ 590 */
@@ -621,7 +621,7 @@ void tipc_link_reset_fragments(struct link *l_ptr)
621 l_ptr->defragm_buf = NULL; 621 l_ptr->defragm_buf = NULL;
622} 622}
623 623
624/** 624/**
625 * tipc_link_stop - purge all inbound and outbound messages associated with link 625 * tipc_link_stop - purge all inbound and outbound messages associated with link
626 * @l_ptr: pointer to link 626 * @l_ptr: pointer to link
627 */ 627 */
@@ -665,7 +665,7 @@ static void link_send_event(void (*fcn)(u32 a, char *n, int up),
665 struct link *l_ptr, int up) 665 struct link *l_ptr, int up)
666{ 666{
667 struct link_event *ev; 667 struct link_event *ev;
668 668
669 ev = kmalloc(sizeof(*ev), GFP_ATOMIC); 669 ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
670 if (!ev) { 670 if (!ev) {
671 warn("Link event allocation failure\n"); 671 warn("Link event allocation failure\n");
@@ -690,15 +690,15 @@ void tipc_link_reset(struct link *l_ptr)
690 u32 prev_state = l_ptr->state; 690 u32 prev_state = l_ptr->state;
691 u32 checkpoint = l_ptr->next_in_no; 691 u32 checkpoint = l_ptr->next_in_no;
692 int was_active_link = tipc_link_is_active(l_ptr); 692 int was_active_link = tipc_link_is_active(l_ptr);
693 693
694 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1); 694 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
695 695
696 /* Link is down, accept any session: */ 696 /* Link is down, accept any session: */
697 l_ptr->peer_session = 0; 697 l_ptr->peer_session = 0;
698 698
699 /* Prepare for max packet size negotiation */ 699 /* Prepare for max packet size negotiation */
700 link_init_max_pkt(l_ptr); 700 link_init_max_pkt(l_ptr);
701 701
702 l_ptr->state = RESET_UNKNOWN; 702 l_ptr->state = RESET_UNKNOWN;
703 dbg_link_state("Resetting Link\n"); 703 dbg_link_state("Resetting Link\n");
704 704
@@ -770,7 +770,7 @@ static void link_activate(struct link *l_ptr)
770 770
771static void link_state_event(struct link *l_ptr, unsigned event) 771static void link_state_event(struct link *l_ptr, unsigned event)
772{ 772{
773 struct link *other; 773 struct link *other;
774 u32 cont_intv = l_ptr->continuity_interval; 774 u32 cont_intv = l_ptr->continuity_interval;
775 775
776 if (!l_ptr->started && (event != STARTING_EVT)) 776 if (!l_ptr->started && (event != STARTING_EVT))
@@ -799,11 +799,11 @@ static void link_state_event(struct link *l_ptr, unsigned event)
799 if (l_ptr->next_in_no != l_ptr->checkpoint) { 799 if (l_ptr->next_in_no != l_ptr->checkpoint) {
800 l_ptr->checkpoint = l_ptr->next_in_no; 800 l_ptr->checkpoint = l_ptr->next_in_no;
801 if (tipc_bclink_acks_missing(l_ptr->owner)) { 801 if (tipc_bclink_acks_missing(l_ptr->owner)) {
802 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 802 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
803 0, 0, 0, 0, 0); 803 0, 0, 0, 0, 0);
804 l_ptr->fsm_msg_cnt++; 804 l_ptr->fsm_msg_cnt++;
805 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 805 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
806 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 806 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
807 1, 0, 0, 0, 0); 807 1, 0, 0, 0, 0);
808 l_ptr->fsm_msg_cnt++; 808 l_ptr->fsm_msg_cnt++;
809 } 809 }
@@ -819,7 +819,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
819 break; 819 break;
820 case RESET_MSG: 820 case RESET_MSG:
821 dbg_link("RES -> RR\n"); 821 dbg_link("RES -> RR\n");
822 info("Resetting link <%s>, requested by peer\n", 822 info("Resetting link <%s>, requested by peer\n",
823 l_ptr->name); 823 l_ptr->name);
824 tipc_link_reset(l_ptr); 824 tipc_link_reset(l_ptr);
825 l_ptr->state = RESET_RESET; 825 l_ptr->state = RESET_RESET;
@@ -871,7 +871,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
871 dbg_link("Probing %u/%u,timer = %u ms)\n", 871 dbg_link("Probing %u/%u,timer = %u ms)\n",
872 l_ptr->fsm_msg_cnt, l_ptr->abort_limit, 872 l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
873 cont_intv / 4); 873 cont_intv / 4);
874 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 874 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
875 1, 0, 0, 0, 0); 875 1, 0, 0, 0, 0);
876 l_ptr->fsm_msg_cnt++; 876 l_ptr->fsm_msg_cnt++;
877 link_set_timer(l_ptr, cont_intv / 4); 877 link_set_timer(l_ptr, cont_intv / 4);
@@ -977,11 +977,11 @@ static void link_state_event(struct link *l_ptr, unsigned event)
977 977
978/* 978/*
979 * link_bundle_buf(): Append contents of a buffer to 979 * link_bundle_buf(): Append contents of a buffer to
980 * the tail of an existing one. 980 * the tail of an existing one.
981 */ 981 */
982 982
983static int link_bundle_buf(struct link *l_ptr, 983static int link_bundle_buf(struct link *l_ptr,
984 struct sk_buff *bundler, 984 struct sk_buff *bundler,
985 struct sk_buff *buf) 985 struct sk_buff *buf)
986{ 986{
987 struct tipc_msg *bundler_msg = buf_msg(bundler); 987 struct tipc_msg *bundler_msg = buf_msg(bundler);
@@ -1030,8 +1030,8 @@ static void link_add_to_outqueue(struct link *l_ptr,
1030 l_ptr->out_queue_size++; 1030 l_ptr->out_queue_size++;
1031} 1031}
1032 1032
1033/* 1033/*
1034 * tipc_link_send_buf() is the 'full path' for messages, called from 1034 * tipc_link_send_buf() is the 'full path' for messages, called from
1035 * inside TIPC when the 'fast path' in tipc_send_buf 1035 * inside TIPC when the 'fast path' in tipc_send_buf
1036 * has failed, and from link_send() 1036 * has failed, and from link_send()
1037 */ 1037 */
@@ -1074,7 +1074,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1074 if (queue_size > l_ptr->stats.max_queue_sz) 1074 if (queue_size > l_ptr->stats.max_queue_sz)
1075 l_ptr->stats.max_queue_sz = queue_size; 1075 l_ptr->stats.max_queue_sz = queue_size;
1076 1076
1077 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && 1077 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
1078 !link_congested(l_ptr))) { 1078 !link_congested(l_ptr))) {
1079 link_add_to_outqueue(l_ptr, buf, msg); 1079 link_add_to_outqueue(l_ptr, buf, msg);
1080 1080
@@ -1094,7 +1094,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1094 1094
1095 /* Try adding message to an existing bundle */ 1095 /* Try adding message to an existing bundle */
1096 1096
1097 if (l_ptr->next_out && 1097 if (l_ptr->next_out &&
1098 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { 1098 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1099 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 1099 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1100 return dsz; 1100 return dsz;
@@ -1109,7 +1109,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1109 if (bundler) { 1109 if (bundler) {
1110 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 1110 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1111 TIPC_OK, INT_H_SIZE, l_ptr->addr); 1111 TIPC_OK, INT_H_SIZE, l_ptr->addr);
1112 memcpy(bundler->data, (unchar *)&bundler_hdr, 1112 memcpy(bundler->data, (unchar *)&bundler_hdr,
1113 INT_H_SIZE); 1113 INT_H_SIZE);
1114 skb_trim(bundler, INT_H_SIZE); 1114 skb_trim(bundler, INT_H_SIZE);
1115 link_bundle_buf(l_ptr, bundler, buf); 1115 link_bundle_buf(l_ptr, bundler, buf);
@@ -1126,8 +1126,8 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1126 return dsz; 1126 return dsz;
1127} 1127}
1128 1128
1129/* 1129/*
1130 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has 1130 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
1131 * not been selected yet, and the the owner node is not locked 1131 * not been selected yet, and the the owner node is not locked
1132 * Called by TIPC internal users, e.g. the name distributor 1132 * Called by TIPC internal users, e.g. the name distributor
1133 */ 1133 */
@@ -1161,8 +1161,8 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
1161 return res; 1161 return res;
1162} 1162}
1163 1163
1164/* 1164/*
1165 * link_send_buf_fast: Entry for data messages where the 1165 * link_send_buf_fast: Entry for data messages where the
1166 * destination link is known and the header is complete, 1166 * destination link is known and the header is complete,
1167 * inclusive total message length. Very time critical. 1167 * inclusive total message length. Very time critical.
1168 * Link is locked. Returns user data length. 1168 * Link is locked. Returns user data length.
@@ -1197,8 +1197,8 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1197 return tipc_link_send_buf(l_ptr, buf); /* All other cases */ 1197 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
1198} 1198}
1199 1199
1200/* 1200/*
1201 * tipc_send_buf_fast: Entry for data messages where the 1201 * tipc_send_buf_fast: Entry for data messages where the
1202 * destination node is known and the header is complete, 1202 * destination node is known and the header is complete,
1203 * inclusive total message length. 1203 * inclusive total message length.
1204 * Returns user data length. 1204 * Returns user data length.
@@ -1236,15 +1236,15 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1236} 1236}
1237 1237
1238 1238
1239/* 1239/*
1240 * tipc_link_send_sections_fast: Entry for messages where the 1240 * tipc_link_send_sections_fast: Entry for messages where the
1241 * destination processor is known and the header is complete, 1241 * destination processor is known and the header is complete,
1242 * except for total message length. 1242 * except for total message length.
1243 * Returns user data length or errno. 1243 * Returns user data length or errno.
1244 */ 1244 */
1245int tipc_link_send_sections_fast(struct port *sender, 1245int tipc_link_send_sections_fast(struct port *sender,
1246 struct iovec const *msg_sect, 1246 struct iovec const *msg_sect,
1247 const u32 num_sect, 1247 const u32 num_sect,
1248 u32 destaddr) 1248 u32 destaddr)
1249{ 1249{
1250 struct tipc_msg *hdr = &sender->publ.phdr; 1250 struct tipc_msg *hdr = &sender->publ.phdr;
@@ -1287,14 +1287,14 @@ exit:
1287 1287
1288 /* Exit if link (or bearer) is congested */ 1288 /* Exit if link (or bearer) is congested */
1289 1289
1290 if (link_congested(l_ptr) || 1290 if (link_congested(l_ptr) ||
1291 !list_empty(&l_ptr->b_ptr->cong_links)) { 1291 !list_empty(&l_ptr->b_ptr->cong_links)) {
1292 res = link_schedule_port(l_ptr, 1292 res = link_schedule_port(l_ptr,
1293 sender->publ.ref, res); 1293 sender->publ.ref, res);
1294 goto exit; 1294 goto exit;
1295 } 1295 }
1296 1296
1297 /* 1297 /*
1298 * Message size exceeds max_pkt hint; update hint, 1298 * Message size exceeds max_pkt hint; update hint,
1299 * then re-try fast path or fragment the message 1299 * then re-try fast path or fragment the message
1300 */ 1300 */
@@ -1324,10 +1324,10 @@ exit:
1324 return res; 1324 return res;
1325} 1325}
1326 1326
1327/* 1327/*
1328 * link_send_sections_long(): Entry for long messages where the 1328 * link_send_sections_long(): Entry for long messages where the
1329 * destination node is known and the header is complete, 1329 * destination node is known and the header is complete,
1330 * inclusive total message length. 1330 * inclusive total message length.
1331 * Link and bearer congestion status have been checked to be ok, 1331 * Link and bearer congestion status have been checked to be ok,
1332 * and are ignored if they change. 1332 * and are ignored if they change.
1333 * 1333 *
@@ -1357,9 +1357,9 @@ static int link_send_sections_long(struct port *sender,
1357 1357
1358again: 1358again:
1359 fragm_no = 1; 1359 fragm_no = 1;
1360 max_pkt = sender->max_pkt - INT_H_SIZE; 1360 max_pkt = sender->max_pkt - INT_H_SIZE;
1361 /* leave room for tunnel header in case of link changeover */ 1361 /* leave room for tunnel header in case of link changeover */
1362 fragm_sz = max_pkt - INT_H_SIZE; 1362 fragm_sz = max_pkt - INT_H_SIZE;
1363 /* leave room for fragmentation header in each fragment */ 1363 /* leave room for fragmentation header in each fragment */
1364 rest = dsz; 1364 rest = dsz;
1365 fragm_crs = 0; 1365 fragm_crs = 0;
@@ -1440,7 +1440,7 @@ error:
1440 if (!buf) 1440 if (!buf)
1441 goto error; 1441 goto error;
1442 1442
1443 buf->next = NULL; 1443 buf->next = NULL;
1444 prev->next = buf; 1444 prev->next = buf;
1445 memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE); 1445 memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1446 fragm_crs = INT_H_SIZE; 1446 fragm_crs = INT_H_SIZE;
@@ -1450,7 +1450,7 @@ error:
1450 } 1450 }
1451 while (rest > 0); 1451 while (rest > 0);
1452 1452
1453 /* 1453 /*
1454 * Now we have a buffer chain. Select a link and check 1454 * Now we have a buffer chain. Select a link and check
1455 * that packet size is still OK 1455 * that packet size is still OK
1456 */ 1456 */
@@ -1506,7 +1506,7 @@ reject:
1506 return dsz; 1506 return dsz;
1507} 1507}
1508 1508
1509/* 1509/*
1510 * tipc_link_push_packet: Push one unsent packet to the media 1510 * tipc_link_push_packet: Push one unsent packet to the media
1511 */ 1511 */
1512u32 tipc_link_push_packet(struct link *l_ptr) 1512u32 tipc_link_push_packet(struct link *l_ptr)
@@ -1519,7 +1519,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
1519 /* consider that buffers may have been released in meantime */ 1519 /* consider that buffers may have been released in meantime */
1520 1520
1521 if (r_q_size && buf) { 1521 if (r_q_size && buf) {
1522 u32 last = lesser(mod(r_q_head + r_q_size), 1522 u32 last = lesser(mod(r_q_head + r_q_size),
1523 link_last_sent(l_ptr)); 1523 link_last_sent(l_ptr));
1524 u32 first = msg_seqno(buf_msg(buf)); 1524 u32 first = msg_seqno(buf_msg(buf));
1525 1525
@@ -1535,7 +1535,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
1535 1535
1536 if (r_q_size && buf && !skb_cloned(buf)) { 1536 if (r_q_size && buf && !skb_cloned(buf)) {
1537 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1537 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1538 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1538 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1539 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1539 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1540 msg_dbg(buf_msg(buf), ">DEF-RETR>"); 1540 msg_dbg(buf_msg(buf), ">DEF-RETR>");
1541 l_ptr->retransm_queue_head = mod(++r_q_head); 1541 l_ptr->retransm_queue_head = mod(++r_q_head);
@@ -1554,7 +1554,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
1554 buf = l_ptr->proto_msg_queue; 1554 buf = l_ptr->proto_msg_queue;
1555 if (buf) { 1555 if (buf) {
1556 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1556 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1557 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 1557 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
1558 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1558 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1559 msg_dbg(buf_msg(buf), ">DEF-PROT>"); 1559 msg_dbg(buf_msg(buf), ">DEF-PROT>");
1560 l_ptr->unacked_window = 0; 1560 l_ptr->unacked_window = 0;
@@ -1578,7 +1578,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
1578 1578
1579 if (mod(next - first) < l_ptr->queue_limit[0]) { 1579 if (mod(next - first) < l_ptr->queue_limit[0]) {
1580 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1580 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1581 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1581 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1582 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1582 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1583 if (msg_user(msg) == MSG_BUNDLER) 1583 if (msg_user(msg) == MSG_BUNDLER)
1584 msg_set_type(msg, CLOSED_MSG); 1584 msg_set_type(msg, CLOSED_MSG);
@@ -1629,12 +1629,12 @@ static void link_reset_all(unsigned long addr)
1629 1629
1630 tipc_node_lock(n_ptr); 1630 tipc_node_lock(n_ptr);
1631 1631
1632 warn("Resetting all links to %s\n", 1632 warn("Resetting all links to %s\n",
1633 addr_string_fill(addr_string, n_ptr->addr)); 1633 addr_string_fill(addr_string, n_ptr->addr));
1634 1634
1635 for (i = 0; i < MAX_BEARERS; i++) { 1635 for (i = 0; i < MAX_BEARERS; i++) {
1636 if (n_ptr->links[i]) { 1636 if (n_ptr->links[i]) {
1637 link_print(n_ptr->links[i], TIPC_OUTPUT, 1637 link_print(n_ptr->links[i], TIPC_OUTPUT,
1638 "Resetting link\n"); 1638 "Resetting link\n");
1639 tipc_link_reset(n_ptr->links[i]); 1639 tipc_link_reset(n_ptr->links[i]);
1640 } 1640 }
@@ -1689,7 +1689,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1689 } 1689 }
1690} 1690}
1691 1691
1692void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, 1692void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1693 u32 retransmits) 1693 u32 retransmits)
1694{ 1694{
1695 struct tipc_msg *msg; 1695 struct tipc_msg *msg;
@@ -1698,7 +1698,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1698 return; 1698 return;
1699 1699
1700 msg = buf_msg(buf); 1700 msg = buf_msg(buf);
1701 1701
1702 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); 1702 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1703 1703
1704 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1704 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
@@ -1728,7 +1728,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1728 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) { 1728 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1729 msg = buf_msg(buf); 1729 msg = buf_msg(buf);
1730 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1730 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1731 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1731 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1732 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1732 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1733 msg_dbg(buf_msg(buf), ">RETR>"); 1733 msg_dbg(buf_msg(buf), ">RETR>");
1734 buf = buf->next; 1734 buf = buf->next;
@@ -1746,7 +1746,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1746 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1746 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1747} 1747}
1748 1748
1749/* 1749/*
1750 * link_recv_non_seq: Receive packets which are outside 1750 * link_recv_non_seq: Receive packets which are outside
1751 * the link sequence flow 1751 * the link sequence flow
1752 */ 1752 */
@@ -1761,11 +1761,11 @@ static void link_recv_non_seq(struct sk_buff *buf)
1761 tipc_bclink_recv_pkt(buf); 1761 tipc_bclink_recv_pkt(buf);
1762} 1762}
1763 1763
1764/** 1764/**
1765 * link_insert_deferred_queue - insert deferred messages back into receive chain 1765 * link_insert_deferred_queue - insert deferred messages back into receive chain
1766 */ 1766 */
1767 1767
1768static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr, 1768static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1769 struct sk_buff *buf) 1769 struct sk_buff *buf)
1770{ 1770{
1771 u32 seq_no; 1771 u32 seq_no;
@@ -1813,11 +1813,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1813 link_recv_non_seq(buf); 1813 link_recv_non_seq(buf);
1814 continue; 1814 continue;
1815 } 1815 }
1816 1816
1817 if (unlikely(!msg_short(msg) && 1817 if (unlikely(!msg_short(msg) &&
1818 (msg_destnode(msg) != tipc_own_addr))) 1818 (msg_destnode(msg) != tipc_own_addr)))
1819 goto cont; 1819 goto cont;
1820 1820
1821 n_ptr = tipc_node_find(msg_prevnode(msg)); 1821 n_ptr = tipc_node_find(msg_prevnode(msg));
1822 if (unlikely(!n_ptr)) 1822 if (unlikely(!n_ptr))
1823 goto cont; 1823 goto cont;
@@ -1828,8 +1828,8 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1828 tipc_node_unlock(n_ptr); 1828 tipc_node_unlock(n_ptr);
1829 goto cont; 1829 goto cont;
1830 } 1830 }
1831 /* 1831 /*
1832 * Release acked messages 1832 * Release acked messages
1833 */ 1833 */
1834 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) { 1834 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1835 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported) 1835 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
@@ -1837,7 +1837,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1837 } 1837 }
1838 1838
1839 crs = l_ptr->first_out; 1839 crs = l_ptr->first_out;
1840 while ((crs != l_ptr->next_out) && 1840 while ((crs != l_ptr->next_out) &&
1841 less_eq(msg_seqno(buf_msg(crs)), ackd)) { 1841 less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1842 struct sk_buff *next = crs->next; 1842 struct sk_buff *next = crs->next;
1843 1843
@@ -1875,7 +1875,7 @@ deliver:
1875 switch (msg_user(msg)) { 1875 switch (msg_user(msg)) {
1876 case MSG_BUNDLER: 1876 case MSG_BUNDLER:
1877 l_ptr->stats.recv_bundles++; 1877 l_ptr->stats.recv_bundles++;
1878 l_ptr->stats.recv_bundled += 1878 l_ptr->stats.recv_bundled +=
1879 msg_msgcnt(msg); 1879 msg_msgcnt(msg);
1880 tipc_node_unlock(n_ptr); 1880 tipc_node_unlock(n_ptr);
1881 tipc_link_recv_bundle(buf); 1881 tipc_link_recv_bundle(buf);
@@ -1894,7 +1894,7 @@ deliver:
1894 continue; 1894 continue;
1895 case MSG_FRAGMENTER: 1895 case MSG_FRAGMENTER:
1896 l_ptr->stats.recv_fragments++; 1896 l_ptr->stats.recv_fragments++;
1897 if (tipc_link_recv_fragment(&l_ptr->defragm_buf, 1897 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1898 &buf, &msg)) { 1898 &buf, &msg)) {
1899 l_ptr->stats.recv_fragmented++; 1899 l_ptr->stats.recv_fragmented++;
1900 goto deliver; 1900 goto deliver;
@@ -1905,7 +1905,7 @@ deliver:
1905 if (link_recv_changeover_msg(&l_ptr, &buf)) { 1905 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1906 msg = buf_msg(buf); 1906 msg = buf_msg(buf);
1907 seq_no = msg_seqno(msg); 1907 seq_no = msg_seqno(msg);
1908 TIPC_SKB_CB(buf)->handle 1908 TIPC_SKB_CB(buf)->handle
1909 = b_ptr; 1909 = b_ptr;
1910 if (type == ORIGINAL_MSG) 1910 if (type == ORIGINAL_MSG)
1911 goto deliver; 1911 goto deliver;
@@ -1948,8 +1948,8 @@ cont:
1948 read_unlock_bh(&tipc_net_lock); 1948 read_unlock_bh(&tipc_net_lock);
1949} 1949}
1950 1950
1951/* 1951/*
1952 * link_defer_buf(): Sort a received out-of-sequence packet 1952 * link_defer_buf(): Sort a received out-of-sequence packet
1953 * into the deferred reception queue. 1953 * into the deferred reception queue.
1954 * Returns the increase of the queue length,i.e. 0 or 1 1954 * Returns the increase of the queue length,i.e. 0 or 1
1955 */ 1955 */
@@ -1986,7 +1986,7 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
1986 if (prev) 1986 if (prev)
1987 prev->next = buf; 1987 prev->next = buf;
1988 else 1988 else
1989 *head = buf; 1989 *head = buf;
1990 return 1; 1990 return 1;
1991 } 1991 }
1992 if (seq_no == msg_seqno(msg)) { 1992 if (seq_no == msg_seqno(msg)) {
@@ -2003,11 +2003,11 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
2003 return 0; 2003 return 0;
2004} 2004}
2005 2005
2006/** 2006/**
2007 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 2007 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
2008 */ 2008 */
2009 2009
2010static void link_handle_out_of_seq_msg(struct link *l_ptr, 2010static void link_handle_out_of_seq_msg(struct link *l_ptr,
2011 struct sk_buff *buf) 2011 struct sk_buff *buf)
2012{ 2012{
2013 u32 seq_no = msg_seqno(buf_msg(buf)); 2013 u32 seq_no = msg_seqno(buf_msg(buf));
@@ -2017,14 +2017,14 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
2017 return; 2017 return;
2018 } 2018 }
2019 2019
2020 dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n", 2020 dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n",
2021 seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no); 2021 seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
2022 2022
2023 /* Record OOS packet arrival (force mismatch on next timeout) */ 2023 /* Record OOS packet arrival (force mismatch on next timeout) */
2024 2024
2025 l_ptr->checkpoint--; 2025 l_ptr->checkpoint--;
2026 2026
2027 /* 2027 /*
2028 * Discard packet if a duplicate; otherwise add it to deferred queue 2028 * Discard packet if a duplicate; otherwise add it to deferred queue
2029 * and notify peer of gap as per protocol specification 2029 * and notify peer of gap as per protocol specification
2030 */ 2030 */
@@ -2053,13 +2053,13 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2053{ 2053{
2054 struct sk_buff *buf = NULL; 2054 struct sk_buff *buf = NULL;
2055 struct tipc_msg *msg = l_ptr->pmsg; 2055 struct tipc_msg *msg = l_ptr->pmsg;
2056 u32 msg_size = sizeof(l_ptr->proto_msg); 2056 u32 msg_size = sizeof(l_ptr->proto_msg);
2057 2057
2058 if (link_blocked(l_ptr)) 2058 if (link_blocked(l_ptr))
2059 return; 2059 return;
2060 msg_set_type(msg, msg_typ); 2060 msg_set_type(msg, msg_typ);
2061 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 2061 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
2062 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 2062 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
2063 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 2063 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
2064 2064
2065 if (msg_typ == STATE_MSG) { 2065 if (msg_typ == STATE_MSG) {
@@ -2082,23 +2082,23 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2082 msg_set_max_pkt(msg, ack_mtu); 2082 msg_set_max_pkt(msg, ack_mtu);
2083 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 2083 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
2084 msg_set_probe(msg, probe_msg != 0); 2084 msg_set_probe(msg, probe_msg != 0);
2085 if (probe_msg) { 2085 if (probe_msg) {
2086 u32 mtu = l_ptr->max_pkt; 2086 u32 mtu = l_ptr->max_pkt;
2087 2087
2088 if ((mtu < l_ptr->max_pkt_target) && 2088 if ((mtu < l_ptr->max_pkt_target) &&
2089 link_working_working(l_ptr) && 2089 link_working_working(l_ptr) &&
2090 l_ptr->fsm_msg_cnt) { 2090 l_ptr->fsm_msg_cnt) {
2091 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 2091 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2092 if (l_ptr->max_pkt_probes == 10) { 2092 if (l_ptr->max_pkt_probes == 10) {
2093 l_ptr->max_pkt_target = (msg_size - 4); 2093 l_ptr->max_pkt_target = (msg_size - 4);
2094 l_ptr->max_pkt_probes = 0; 2094 l_ptr->max_pkt_probes = 0;
2095 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 2095 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2096 } 2096 }
2097 l_ptr->max_pkt_probes++; 2097 l_ptr->max_pkt_probes++;
2098 } 2098 }
2099 2099
2100 l_ptr->stats.sent_probes++; 2100 l_ptr->stats.sent_probes++;
2101 } 2101 }
2102 l_ptr->stats.sent_states++; 2102 l_ptr->stats.sent_states++;
2103 } else { /* RESET_MSG or ACTIVATE_MSG */ 2103 } else { /* RESET_MSG or ACTIVATE_MSG */
2104 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 2104 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
@@ -2144,7 +2144,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2144 return; 2144 return;
2145 2145
2146 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg)); 2146 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2147 msg_set_size(buf_msg(buf), msg_size); 2147 msg_set_size(buf_msg(buf), msg_size);
2148 2148
2149 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 2149 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2150 l_ptr->unacked_window = 0; 2150 l_ptr->unacked_window = 0;
@@ -2160,15 +2160,15 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2160 2160
2161/* 2161/*
2162 * Receive protocol message : 2162 * Receive protocol message :
2163 * Note that network plane id propagates through the network, and may 2163 * Note that network plane id propagates through the network, and may
2164 * change at any time. The node with lowest address rules 2164 * change at any time. The node with lowest address rules
2165 */ 2165 */
2166 2166
2167static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf) 2167static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2168{ 2168{
2169 u32 rec_gap = 0; 2169 u32 rec_gap = 0;
2170 u32 max_pkt_info; 2170 u32 max_pkt_info;
2171 u32 max_pkt_ack; 2171 u32 max_pkt_ack;
2172 u32 msg_tol; 2172 u32 msg_tol;
2173 struct tipc_msg *msg = buf_msg(buf); 2173 struct tipc_msg *msg = buf_msg(buf);
2174 2174
@@ -2188,12 +2188,12 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2188 l_ptr->owner->permit_changeover = msg_redundant_link(msg); 2188 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2189 2189
2190 switch (msg_type(msg)) { 2190 switch (msg_type(msg)) {
2191 2191
2192 case RESET_MSG: 2192 case RESET_MSG:
2193 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) { 2193 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
2194 if (msg_session(msg) == l_ptr->peer_session) { 2194 if (msg_session(msg) == l_ptr->peer_session) {
2195 dbg("Duplicate RESET: %u<->%u\n", 2195 dbg("Duplicate RESET: %u<->%u\n",
2196 msg_session(msg), l_ptr->peer_session); 2196 msg_session(msg), l_ptr->peer_session);
2197 break; /* duplicate: ignore */ 2197 break; /* duplicate: ignore */
2198 } 2198 }
2199 } 2199 }
@@ -2211,13 +2211,13 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2211 l_ptr->priority = msg_linkprio(msg); 2211 l_ptr->priority = msg_linkprio(msg);
2212 2212
2213 max_pkt_info = msg_max_pkt(msg); 2213 max_pkt_info = msg_max_pkt(msg);
2214 if (max_pkt_info) { 2214 if (max_pkt_info) {
2215 if (max_pkt_info < l_ptr->max_pkt_target) 2215 if (max_pkt_info < l_ptr->max_pkt_target)
2216 l_ptr->max_pkt_target = max_pkt_info; 2216 l_ptr->max_pkt_target = max_pkt_info;
2217 if (l_ptr->max_pkt > l_ptr->max_pkt_target) 2217 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2218 l_ptr->max_pkt = l_ptr->max_pkt_target; 2218 l_ptr->max_pkt = l_ptr->max_pkt_target;
2219 } else { 2219 } else {
2220 l_ptr->max_pkt = l_ptr->max_pkt_target; 2220 l_ptr->max_pkt = l_ptr->max_pkt_target;
2221 } 2221 }
2222 l_ptr->owner->bclink.supported = (max_pkt_info != 0); 2222 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2223 2223
@@ -2235,8 +2235,8 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2235 2235
2236 if ((msg_tol = msg_link_tolerance(msg))) 2236 if ((msg_tol = msg_link_tolerance(msg)))
2237 link_set_supervision_props(l_ptr, msg_tol); 2237 link_set_supervision_props(l_ptr, msg_tol);
2238 2238
2239 if (msg_linkprio(msg) && 2239 if (msg_linkprio(msg) &&
2240 (msg_linkprio(msg) != l_ptr->priority)) { 2240 (msg_linkprio(msg) != l_ptr->priority)) {
2241 warn("Resetting link <%s>, priority change %u->%u\n", 2241 warn("Resetting link <%s>, priority change %u->%u\n",
2242 l_ptr->name, l_ptr->priority, msg_linkprio(msg)); 2242 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
@@ -2250,25 +2250,25 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2250 break; 2250 break;
2251 2251
2252 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 2252 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2253 rec_gap = mod(msg_next_sent(msg) - 2253 rec_gap = mod(msg_next_sent(msg) -
2254 mod(l_ptr->next_in_no)); 2254 mod(l_ptr->next_in_no));
2255 } 2255 }
2256 2256
2257 max_pkt_ack = msg_max_pkt(msg); 2257 max_pkt_ack = msg_max_pkt(msg);
2258 if (max_pkt_ack > l_ptr->max_pkt) { 2258 if (max_pkt_ack > l_ptr->max_pkt) {
2259 dbg("Link <%s> updated MTU %u -> %u\n", 2259 dbg("Link <%s> updated MTU %u -> %u\n",
2260 l_ptr->name, l_ptr->max_pkt, max_pkt_ack); 2260 l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
2261 l_ptr->max_pkt = max_pkt_ack; 2261 l_ptr->max_pkt = max_pkt_ack;
2262 l_ptr->max_pkt_probes = 0; 2262 l_ptr->max_pkt_probes = 0;
2263 } 2263 }
2264 2264
2265 max_pkt_ack = 0; 2265 max_pkt_ack = 0;
2266 if (msg_probe(msg)) { 2266 if (msg_probe(msg)) {
2267 l_ptr->stats.recv_probes++; 2267 l_ptr->stats.recv_probes++;
2268 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) { 2268 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
2269 max_pkt_ack = msg_size(msg); 2269 max_pkt_ack = msg_size(msg);
2270 } 2270 }
2271 } 2271 }
2272 2272
2273 /* Protocol message before retransmits, reduce loss risk */ 2273 /* Protocol message before retransmits, reduce loss risk */
2274 2274
@@ -2294,11 +2294,11 @@ exit:
2294 2294
2295 2295
2296/* 2296/*
2297 * tipc_link_tunnel(): Send one message via a link belonging to 2297 * tipc_link_tunnel(): Send one message via a link belonging to
2298 * another bearer. Owner node is locked. 2298 * another bearer. Owner node is locked.
2299 */ 2299 */
2300void tipc_link_tunnel(struct link *l_ptr, 2300void tipc_link_tunnel(struct link *l_ptr,
2301 struct tipc_msg *tunnel_hdr, 2301 struct tipc_msg *tunnel_hdr,
2302 struct tipc_msg *msg, 2302 struct tipc_msg *msg,
2303 u32 selector) 2303 u32 selector)
2304{ 2304{
@@ -2374,7 +2374,7 @@ void tipc_link_changeover(struct link *l_ptr)
2374 return; 2374 return;
2375 } 2375 }
2376 2376
2377 split_bundles = (l_ptr->owner->active_links[0] != 2377 split_bundles = (l_ptr->owner->active_links[0] !=
2378 l_ptr->owner->active_links[1]); 2378 l_ptr->owner->active_links[1]);
2379 2379
2380 while (crs) { 2380 while (crs) {
@@ -2418,7 +2418,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2418 if (msg_user(msg) == MSG_BUNDLER) 2418 if (msg_user(msg) == MSG_BUNDLER)
2419 msg_set_type(msg, CLOSED_MSG); 2419 msg_set_type(msg, CLOSED_MSG);
2420 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 2420 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2421 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 2421 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2422 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2422 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2423 outbuf = buf_acquire(length + INT_H_SIZE); 2423 outbuf = buf_acquire(length + INT_H_SIZE);
2424 if (outbuf == NULL) { 2424 if (outbuf == NULL) {
@@ -2445,7 +2445,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2445 * @skb: encapsulating message buffer 2445 * @skb: encapsulating message buffer
2446 * @from_pos: offset to extract from 2446 * @from_pos: offset to extract from
2447 * 2447 *
2448 * Returns a new message buffer containing an embedded message. The 2448 * Returns a new message buffer containing an embedded message. The
2449 * encapsulating message itself is left unchanged. 2449 * encapsulating message itself is left unchanged.
2450 */ 2450 */
2451 2451
@@ -2461,7 +2461,7 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2461 return eb; 2461 return eb;
2462} 2462}
2463 2463
2464/* 2464/*
2465 * link_recv_changeover_msg(): Receive tunneled packet sent 2465 * link_recv_changeover_msg(): Receive tunneled packet sent
2466 * via other link. Node is locked. Return extracted buffer. 2466 * via other link. Node is locked. Return extracted buffer.
2467 */ 2467 */
@@ -2482,7 +2482,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2482 goto exit; 2482 goto exit;
2483 } 2483 }
2484 if (dest_link == *l_ptr) { 2484 if (dest_link == *l_ptr) {
2485 err("Unexpected changeover message on link <%s>\n", 2485 err("Unexpected changeover message on link <%s>\n",
2486 (*l_ptr)->name); 2486 (*l_ptr)->name);
2487 goto exit; 2487 goto exit;
2488 } 2488 }
@@ -2582,9 +2582,9 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2582 */ 2582 */
2583 2583
2584 2584
2585/* 2585/*
2586 * tipc_link_send_long_buf: Entry for buffers needing fragmentation. 2586 * tipc_link_send_long_buf: Entry for buffers needing fragmentation.
2587 * The buffer is complete, inclusive total message length. 2587 * The buffer is complete, inclusive total message length.
2588 * Returns user data length. 2588 * Returns user data length.
2589 */ 2589 */
2590int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) 2590int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
@@ -2650,9 +2650,9 @@ exit:
2650 return dsz; 2650 return dsz;
2651} 2651}
2652 2652
2653/* 2653/*
2654 * A pending message being re-assembled must store certain values 2654 * A pending message being re-assembled must store certain values
2655 * to handle subsequent fragments correctly. The following functions 2655 * to handle subsequent fragments correctly. The following functions
2656 * help storing these values in unused, available fields in the 2656 * help storing these values in unused, available fields in the
2657 * pending message. This makes dynamic memory allocation unecessary. 2657 * pending message. This makes dynamic memory allocation unecessary.
2658 */ 2658 */
@@ -2692,11 +2692,11 @@ static void incr_timer_cnt(struct sk_buff *buf)
2692 msg_incr_reroute_cnt(buf_msg(buf)); 2692 msg_incr_reroute_cnt(buf_msg(buf));
2693} 2693}
2694 2694
2695/* 2695/*
2696 * tipc_link_recv_fragment(): Called with node lock on. Returns 2696 * tipc_link_recv_fragment(): Called with node lock on. Returns
2697 * the reassembled buffer if message is complete. 2697 * the reassembled buffer if message is complete.
2698 */ 2698 */
2699int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 2699int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2700 struct tipc_msg **m) 2700 struct tipc_msg **m)
2701{ 2701{
2702 struct sk_buff *prev = NULL; 2702 struct sk_buff *prev = NULL;
@@ -2737,9 +2737,9 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2737 2737
2738 /* Prepare buffer for subsequent fragments. */ 2738 /* Prepare buffer for subsequent fragments. */
2739 2739
2740 set_long_msg_seqno(pbuf, long_msg_seq_no); 2740 set_long_msg_seqno(pbuf, long_msg_seq_no);
2741 set_fragm_size(pbuf,fragm_sz); 2741 set_fragm_size(pbuf,fragm_sz);
2742 set_expected_frags(pbuf,exp_fragm_cnt - 1); 2742 set_expected_frags(pbuf,exp_fragm_cnt - 1);
2743 } else { 2743 } else {
2744 warn("Link unable to reassemble fragmented message\n"); 2744 warn("Link unable to reassemble fragmented message\n");
2745 } 2745 }
@@ -2765,7 +2765,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2765 *m = buf_msg(pbuf); 2765 *m = buf_msg(pbuf);
2766 return 1; 2766 return 1;
2767 } 2767 }
2768 set_expected_frags(pbuf,exp_frags); 2768 set_expected_frags(pbuf,exp_frags);
2769 return 0; 2769 return 0;
2770 } 2770 }
2771 dbg(" Discarding orphan fragment %x\n",fbuf); 2771 dbg(" Discarding orphan fragment %x\n",fbuf);
@@ -2849,10 +2849,10 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2849 * link_find_link - locate link by name 2849 * link_find_link - locate link by name
2850 * @name - ptr to link name string 2850 * @name - ptr to link name string
2851 * @node - ptr to area to be filled with ptr to associated node 2851 * @node - ptr to area to be filled with ptr to associated node
2852 * 2852 *
2853 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2853 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2854 * this also prevents link deletion. 2854 * this also prevents link deletion.
2855 * 2855 *
2856 * Returns pointer to link (or 0 if invalid link name). 2856 * Returns pointer to link (or 0 if invalid link name).
2857 */ 2857 */
2858 2858
@@ -2860,7 +2860,7 @@ static struct link *link_find_link(const char *name, struct node **node)
2860{ 2860{
2861 struct link_name link_name_parts; 2861 struct link_name link_name_parts;
2862 struct bearer *b_ptr; 2862 struct bearer *b_ptr;
2863 struct link *l_ptr; 2863 struct link *l_ptr;
2864 2864
2865 if (!link_name_validate(name, &link_name_parts)) 2865 if (!link_name_validate(name, &link_name_parts))
2866 return NULL; 2866 return NULL;
@@ -2869,7 +2869,7 @@ static struct link *link_find_link(const char *name, struct node **node)
2869 if (!b_ptr) 2869 if (!b_ptr)
2870 return NULL; 2870 return NULL;
2871 2871
2872 *node = tipc_node_find(link_name_parts.addr_peer); 2872 *node = tipc_node_find(link_name_parts.addr_peer);
2873 if (!*node) 2873 if (!*node)
2874 return NULL; 2874 return NULL;
2875 2875
@@ -2880,14 +2880,14 @@ static struct link *link_find_link(const char *name, struct node **node)
2880 return l_ptr; 2880 return l_ptr;
2881} 2881}
2882 2882
2883struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2883struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2884 u16 cmd) 2884 u16 cmd)
2885{ 2885{
2886 struct tipc_link_config *args; 2886 struct tipc_link_config *args;
2887 u32 new_value; 2887 u32 new_value;
2888 struct link *l_ptr; 2888 struct link *l_ptr;
2889 struct node *node; 2889 struct node *node;
2890 int res; 2890 int res;
2891 2891
2892 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2892 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2893 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2893 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -2899,40 +2899,40 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2899 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2899 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2900 (tipc_bclink_set_queue_limits(new_value) == 0)) 2900 (tipc_bclink_set_queue_limits(new_value) == 0))
2901 return tipc_cfg_reply_none(); 2901 return tipc_cfg_reply_none();
2902 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2902 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2903 " (cannot change setting on broadcast link)"); 2903 " (cannot change setting on broadcast link)");
2904 } 2904 }
2905 2905
2906 read_lock_bh(&tipc_net_lock); 2906 read_lock_bh(&tipc_net_lock);
2907 l_ptr = link_find_link(args->name, &node); 2907 l_ptr = link_find_link(args->name, &node);
2908 if (!l_ptr) { 2908 if (!l_ptr) {
2909 read_unlock_bh(&tipc_net_lock); 2909 read_unlock_bh(&tipc_net_lock);
2910 return tipc_cfg_reply_error_string("link not found"); 2910 return tipc_cfg_reply_error_string("link not found");
2911 } 2911 }
2912 2912
2913 tipc_node_lock(node); 2913 tipc_node_lock(node);
2914 res = -EINVAL; 2914 res = -EINVAL;
2915 switch (cmd) { 2915 switch (cmd) {
2916 case TIPC_CMD_SET_LINK_TOL: 2916 case TIPC_CMD_SET_LINK_TOL:
2917 if ((new_value >= TIPC_MIN_LINK_TOL) && 2917 if ((new_value >= TIPC_MIN_LINK_TOL) &&
2918 (new_value <= TIPC_MAX_LINK_TOL)) { 2918 (new_value <= TIPC_MAX_LINK_TOL)) {
2919 link_set_supervision_props(l_ptr, new_value); 2919 link_set_supervision_props(l_ptr, new_value);
2920 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2920 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2921 0, 0, new_value, 0, 0); 2921 0, 0, new_value, 0, 0);
2922 res = TIPC_OK; 2922 res = TIPC_OK;
2923 } 2923 }
2924 break; 2924 break;
2925 case TIPC_CMD_SET_LINK_PRI: 2925 case TIPC_CMD_SET_LINK_PRI:
2926 if ((new_value >= TIPC_MIN_LINK_PRI) && 2926 if ((new_value >= TIPC_MIN_LINK_PRI) &&
2927 (new_value <= TIPC_MAX_LINK_PRI)) { 2927 (new_value <= TIPC_MAX_LINK_PRI)) {
2928 l_ptr->priority = new_value; 2928 l_ptr->priority = new_value;
2929 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2929 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2930 0, 0, 0, new_value, 0); 2930 0, 0, 0, new_value, 0);
2931 res = TIPC_OK; 2931 res = TIPC_OK;
2932 } 2932 }
2933 break; 2933 break;
2934 case TIPC_CMD_SET_LINK_WINDOW: 2934 case TIPC_CMD_SET_LINK_WINDOW:
2935 if ((new_value >= TIPC_MIN_LINK_WIN) && 2935 if ((new_value >= TIPC_MIN_LINK_WIN) &&
2936 (new_value <= TIPC_MAX_LINK_WIN)) { 2936 (new_value <= TIPC_MAX_LINK_WIN)) {
2937 tipc_link_set_queue_limits(l_ptr, new_value); 2937 tipc_link_set_queue_limits(l_ptr, new_value);
2938 res = TIPC_OK; 2938 res = TIPC_OK;
@@ -2943,7 +2943,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2943 2943
2944 read_unlock_bh(&tipc_net_lock); 2944 read_unlock_bh(&tipc_net_lock);
2945 if (res) 2945 if (res)
2946 return tipc_cfg_reply_error_string("cannot change link setting"); 2946 return tipc_cfg_reply_error_string("cannot change link setting");
2947 2947
2948 return tipc_cfg_reply_none(); 2948 return tipc_cfg_reply_none();
2949} 2949}
@@ -2963,7 +2963,7 @@ static void link_reset_statistics(struct link *l_ptr)
2963struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2963struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2964{ 2964{
2965 char *link_name; 2965 char *link_name;
2966 struct link *l_ptr; 2966 struct link *l_ptr;
2967 struct node *node; 2967 struct node *node;
2968 2968
2969 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2969 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
@@ -2977,7 +2977,7 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
2977 } 2977 }
2978 2978
2979 read_lock_bh(&tipc_net_lock); 2979 read_lock_bh(&tipc_net_lock);
2980 l_ptr = link_find_link(link_name, &node); 2980 l_ptr = link_find_link(link_name, &node);
2981 if (!l_ptr) { 2981 if (!l_ptr) {
2982 read_unlock_bh(&tipc_net_lock); 2982 read_unlock_bh(&tipc_net_lock);
2983 return tipc_cfg_reply_error_string("link not found"); 2983 return tipc_cfg_reply_error_string("link not found");
@@ -3004,14 +3004,14 @@ static u32 percent(u32 count, u32 total)
3004 * @name: link name 3004 * @name: link name
3005 * @buf: print buffer area 3005 * @buf: print buffer area
3006 * @buf_size: size of print buffer area 3006 * @buf_size: size of print buffer area
3007 * 3007 *
3008 * Returns length of print buffer data string (or 0 if error) 3008 * Returns length of print buffer data string (or 0 if error)
3009 */ 3009 */
3010 3010
3011static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 3011static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
3012{ 3012{
3013 struct print_buf pb; 3013 struct print_buf pb;
3014 struct link *l_ptr; 3014 struct link *l_ptr;
3015 struct node *node; 3015 struct node *node;
3016 char *status; 3016 char *status;
3017 u32 profile_total = 0; 3017 u32 profile_total = 0;
@@ -3022,7 +3022,7 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
3022 tipc_printbuf_init(&pb, buf, buf_size); 3022 tipc_printbuf_init(&pb, buf, buf_size);
3023 3023
3024 read_lock_bh(&tipc_net_lock); 3024 read_lock_bh(&tipc_net_lock);
3025 l_ptr = link_find_link(name, &node); 3025 l_ptr = link_find_link(name, &node);
3026 if (!l_ptr) { 3026 if (!l_ptr) {
3027 read_unlock_bh(&tipc_net_lock); 3027 read_unlock_bh(&tipc_net_lock);
3028 return 0; 3028 return 0;
@@ -3036,28 +3036,28 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
3036 else 3036 else
3037 status = "DEFUNCT"; 3037 status = "DEFUNCT";
3038 tipc_printf(&pb, "Link <%s>\n" 3038 tipc_printf(&pb, "Link <%s>\n"
3039 " %s MTU:%u Priority:%u Tolerance:%u ms" 3039 " %s MTU:%u Priority:%u Tolerance:%u ms"
3040 " Window:%u packets\n", 3040 " Window:%u packets\n",
3041 l_ptr->name, status, link_max_pkt(l_ptr), 3041 l_ptr->name, status, link_max_pkt(l_ptr),
3042 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]); 3042 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
3043 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 3043 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
3044 l_ptr->next_in_no - l_ptr->stats.recv_info, 3044 l_ptr->next_in_no - l_ptr->stats.recv_info,
3045 l_ptr->stats.recv_fragments, 3045 l_ptr->stats.recv_fragments,
3046 l_ptr->stats.recv_fragmented, 3046 l_ptr->stats.recv_fragmented,
3047 l_ptr->stats.recv_bundles, 3047 l_ptr->stats.recv_bundles,
3048 l_ptr->stats.recv_bundled); 3048 l_ptr->stats.recv_bundled);
3049 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 3049 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
3050 l_ptr->next_out_no - l_ptr->stats.sent_info, 3050 l_ptr->next_out_no - l_ptr->stats.sent_info,
3051 l_ptr->stats.sent_fragments, 3051 l_ptr->stats.sent_fragments,
3052 l_ptr->stats.sent_fragmented, 3052 l_ptr->stats.sent_fragmented,
3053 l_ptr->stats.sent_bundles, 3053 l_ptr->stats.sent_bundles,
3054 l_ptr->stats.sent_bundled); 3054 l_ptr->stats.sent_bundled);
3055 profile_total = l_ptr->stats.msg_length_counts; 3055 profile_total = l_ptr->stats.msg_length_counts;
3056 if (!profile_total) 3056 if (!profile_total)
3057 profile_total = 1; 3057 profile_total = 1;
3058 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n" 3058 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
3059 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 3059 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
3060 "-16354:%u%% -32768:%u%% -66000:%u%%\n", 3060 "-16354:%u%% -32768:%u%% -66000:%u%%\n",
3061 l_ptr->stats.msg_length_counts, 3061 l_ptr->stats.msg_length_counts,
3062 l_ptr->stats.msg_lengths_total / profile_total, 3062 l_ptr->stats.msg_lengths_total / profile_total,
3063 percent(l_ptr->stats.msg_length_profile[0], profile_total), 3063 percent(l_ptr->stats.msg_length_profile[0], profile_total),
@@ -3067,21 +3067,21 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
3067 percent(l_ptr->stats.msg_length_profile[4], profile_total), 3067 percent(l_ptr->stats.msg_length_profile[4], profile_total),
3068 percent(l_ptr->stats.msg_length_profile[5], profile_total), 3068 percent(l_ptr->stats.msg_length_profile[5], profile_total),
3069 percent(l_ptr->stats.msg_length_profile[6], profile_total)); 3069 percent(l_ptr->stats.msg_length_profile[6], profile_total));
3070 tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n", 3070 tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
3071 l_ptr->stats.recv_states, 3071 l_ptr->stats.recv_states,
3072 l_ptr->stats.recv_probes, 3072 l_ptr->stats.recv_probes,
3073 l_ptr->stats.recv_nacks, 3073 l_ptr->stats.recv_nacks,
3074 l_ptr->stats.deferred_recv, 3074 l_ptr->stats.deferred_recv,
3075 l_ptr->stats.duplicates); 3075 l_ptr->stats.duplicates);
3076 tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n", 3076 tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
3077 l_ptr->stats.sent_states, 3077 l_ptr->stats.sent_states,
3078 l_ptr->stats.sent_probes, 3078 l_ptr->stats.sent_probes,
3079 l_ptr->stats.sent_nacks, 3079 l_ptr->stats.sent_nacks,
3080 l_ptr->stats.sent_acks, 3080 l_ptr->stats.sent_acks,
3081 l_ptr->stats.retransmitted); 3081 l_ptr->stats.retransmitted);
3082 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 3082 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
3083 l_ptr->stats.bearer_congs, 3083 l_ptr->stats.bearer_congs,
3084 l_ptr->stats.link_congs, 3084 l_ptr->stats.link_congs,
3085 l_ptr->stats.max_queue_sz, 3085 l_ptr->stats.max_queue_sz,
3086 l_ptr->stats.queue_sz_counts 3086 l_ptr->stats.queue_sz_counts
3087 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts) 3087 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
@@ -3113,7 +3113,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
3113 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO); 3113 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3114 if (!str_len) { 3114 if (!str_len) {
3115 buf_discard(buf); 3115 buf_discard(buf);
3116 return tipc_cfg_reply_error_string("link not found"); 3116 return tipc_cfg_reply_error_string("link not found");
3117 } 3117 }
3118 3118
3119 skb_put(buf, TLV_SPACE(str_len)); 3119 skb_put(buf, TLV_SPACE(str_len));
@@ -3164,7 +3164,7 @@ int link_control(const char *name, u32 op, u32 val)
3164 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination 3164 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
3165 * @dest: network address of destination node 3165 * @dest: network address of destination node
3166 * @selector: used to select from set of active links 3166 * @selector: used to select from set of active links
3167 * 3167 *
3168 * If no active link can be found, uses default maximum packet size. 3168 * If no active link can be found, uses default maximum packet size.
3169 */ 3169 */
3170 3170
@@ -3173,11 +3173,11 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3173 struct node *n_ptr; 3173 struct node *n_ptr;
3174 struct link *l_ptr; 3174 struct link *l_ptr;
3175 u32 res = MAX_PKT_DEFAULT; 3175 u32 res = MAX_PKT_DEFAULT;
3176 3176
3177 if (dest == tipc_own_addr) 3177 if (dest == tipc_own_addr)
3178 return MAX_MSG_SIZE; 3178 return MAX_MSG_SIZE;
3179 3179
3180 read_lock_bh(&tipc_net_lock); 3180 read_lock_bh(&tipc_net_lock);
3181 n_ptr = tipc_node_select(dest, selector); 3181 n_ptr = tipc_node_select(dest, selector);
3182 if (n_ptr) { 3182 if (n_ptr) {
3183 tipc_node_lock(n_ptr); 3183 tipc_node_lock(n_ptr);
@@ -3186,7 +3186,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3186 res = link_max_pkt(l_ptr); 3186 res = link_max_pkt(l_ptr);
3187 tipc_node_unlock(n_ptr); 3187 tipc_node_unlock(n_ptr);
3188 } 3188 }
3189 read_unlock_bh(&tipc_net_lock); 3189 read_unlock_bh(&tipc_net_lock);
3190 return res; 3190 return res;
3191} 3191}
3192 3192
@@ -3244,8 +3244,8 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
3244 tipc_printf(buf, "%u]", 3244 tipc_printf(buf, "%u]",
3245 msg_seqno(buf_msg 3245 msg_seqno(buf_msg
3246 (l_ptr->last_out)), l_ptr->out_queue_size); 3246 (l_ptr->last_out)), l_ptr->out_queue_size);
3247 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - 3247 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3248 msg_seqno(buf_msg(l_ptr->first_out))) 3248 msg_seqno(buf_msg(l_ptr->first_out)))
3249 != (l_ptr->out_queue_size - 1)) 3249 != (l_ptr->out_queue_size - 1))
3250 || (l_ptr->last_out->next != 0)) { 3250 || (l_ptr->last_out->next != 0)) {
3251 tipc_printf(buf, "\nSend queue inconsistency\n"); 3251 tipc_printf(buf, "\nSend queue inconsistency\n");
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 2d3c157f707d..52f3e7c1871f 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/link.h: Include file for TIPC link code 2 * net/tipc/link.h: Include file for TIPC link code
3 * 3 *
4 * Copyright (c) 1995-2006, Ericsson AB 4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -45,8 +45,8 @@
45#define PUSH_FAILED 1 45#define PUSH_FAILED 1
46#define PUSH_FINISHED 2 46#define PUSH_FINISHED 2
47 47
48/* 48/*
49 * Link states 49 * Link states
50 */ 50 */
51 51
52#define WORKING_WORKING 560810u 52#define WORKING_WORKING 560810u
@@ -54,7 +54,7 @@
54#define RESET_UNKNOWN 560812u 54#define RESET_UNKNOWN 560812u
55#define RESET_RESET 560813u 55#define RESET_RESET 560813u
56 56
57/* 57/*
58 * Starting value for maximum packet size negotiation on unicast links 58 * Starting value for maximum packet size negotiation on unicast links
59 * (unless bearer MTU is less) 59 * (unless bearer MTU is less)
60 */ 60 */
@@ -74,7 +74,7 @@
74 * @peer_session: link session # being used by peer end of link 74 * @peer_session: link session # being used by peer end of link
75 * @peer_bearer_id: bearer id used by link's peer endpoint 75 * @peer_bearer_id: bearer id used by link's peer endpoint
76 * @b_ptr: pointer to bearer used by link 76 * @b_ptr: pointer to bearer used by link
77 * @tolerance: minimum link continuity loss needed to reset link [in ms] 77 * @tolerance: minimum link continuity loss needed to reset link [in ms]
78 * @continuity_interval: link continuity testing interval [in ms] 78 * @continuity_interval: link continuity testing interval [in ms]
79 * @abort_limit: # of unacknowledged continuity probes needed to reset link 79 * @abort_limit: # of unacknowledged continuity probes needed to reset link
80 * @state: current state of link FSM 80 * @state: current state of link FSM
@@ -110,7 +110,7 @@
110 * @stats: collects statistics regarding link activity 110 * @stats: collects statistics regarding link activity
111 * @print_buf: print buffer used to log link activity 111 * @print_buf: print buffer used to log link activity
112 */ 112 */
113 113
114struct link { 114struct link {
115 u32 addr; 115 u32 addr;
116 char name[TIPC_MAX_LINK_NAME]; 116 char name[TIPC_MAX_LINK_NAME];
@@ -143,18 +143,18 @@ struct link {
143 u32 exp_msg_count; 143 u32 exp_msg_count;
144 u32 reset_checkpoint; 144 u32 reset_checkpoint;
145 145
146 /* Max packet negotiation */ 146 /* Max packet negotiation */
147 u32 max_pkt; 147 u32 max_pkt;
148 u32 max_pkt_target; 148 u32 max_pkt_target;
149 u32 max_pkt_probes; 149 u32 max_pkt_probes;
150 150
151 /* Sending */ 151 /* Sending */
152 u32 out_queue_size; 152 u32 out_queue_size;
153 struct sk_buff *first_out; 153 struct sk_buff *first_out;
154 struct sk_buff *last_out; 154 struct sk_buff *last_out;
155 u32 next_out_no; 155 u32 next_out_no;
156 u32 last_retransmitted; 156 u32 last_retransmitted;
157 u32 stale_count; 157 u32 stale_count;
158 158
159 /* Reception */ 159 /* Reception */
160 u32 next_in_no; 160 u32 next_in_no;
@@ -174,7 +174,7 @@ struct link {
174 u32 long_msg_seq_no; 174 u32 long_msg_seq_no;
175 struct sk_buff *defragm_buf; 175 struct sk_buff *defragm_buf;
176 176
177 /* Statistics */ 177 /* Statistics */
178 struct { 178 struct {
179 u32 sent_info; /* used in counting # sent packets */ 179 u32 sent_info; /* used in counting # sent packets */
180 u32 recv_info; /* used in counting # recv'd packets */ 180 u32 recv_info; /* used in counting # recv'd packets */
@@ -239,9 +239,9 @@ void tipc_link_reset(struct link *l_ptr);
239int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector); 239int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
240int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf); 240int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
241u32 tipc_link_get_max_pkt(u32 dest,u32 selector); 241u32 tipc_link_get_max_pkt(u32 dest,u32 selector);
242int tipc_link_send_sections_fast(struct port* sender, 242int tipc_link_send_sections_fast(struct port* sender,
243 struct iovec const *msg_sect, 243 struct iovec const *msg_sect,
244 const u32 num_sect, 244 const u32 num_sect,
245 u32 destnode); 245 u32 destnode);
246int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf); 246int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
247void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr, 247void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
@@ -250,7 +250,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf);
250int tipc_link_recv_fragment(struct sk_buff **pending, 250int tipc_link_recv_fragment(struct sk_buff **pending,
251 struct sk_buff **fb, 251 struct sk_buff **fb,
252 struct tipc_msg **msg); 252 struct tipc_msg **msg);
253void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap, 253void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
254 u32 tolerance, u32 priority, u32 acked_mtu); 254 u32 tolerance, u32 priority, u32 acked_mtu);
255void tipc_link_push_queue(struct link *l_ptr); 255void tipc_link_push_queue(struct link *l_ptr);
256u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 256u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 3bd345a344e5..782485468fb2 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/msg.c: TIPC message header routines 2 * net/tipc/msg.c: TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 6699aaf7bd4c..62d549063604 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/msg.h: Include file for TIPC message header routines 2 * net/tipc/msg.h: Include file for TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -56,10 +56,10 @@
56 56
57/* 57/*
58 TIPC user data message header format, version 2 58 TIPC user data message header format, version 2
59 59
60 - Fundamental definitions available to privileged TIPC users 60 - Fundamental definitions available to privileged TIPC users
61 are located in tipc_msg.h. 61 are located in tipc_msg.h.
62 - Remaining definitions available to TIPC internal users appear below. 62 - Remaining definitions available to TIPC internal users appear below.
63*/ 63*/
64 64
65 65
@@ -75,7 +75,7 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
75 msg_set_word(m, w, (word |= (val << pos))); 75 msg_set_word(m, w, (word |= (val << pos)));
76} 76}
77 77
78/* 78/*
79 * Word 0 79 * Word 0
80 */ 80 */
81 81
@@ -84,7 +84,7 @@ static inline u32 msg_version(struct tipc_msg *m)
84 return msg_bits(m, 0, 29, 7); 84 return msg_bits(m, 0, 29, 7);
85} 85}
86 86
87static inline void msg_set_version(struct tipc_msg *m) 87static inline void msg_set_version(struct tipc_msg *m)
88{ 88{
89 msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION); 89 msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION);
90} 90}
@@ -99,47 +99,47 @@ static inline u32 msg_isdata(struct tipc_msg *m)
99 return (msg_user(m) <= DATA_CRITICAL); 99 return (msg_user(m) <= DATA_CRITICAL);
100} 100}
101 101
102static inline void msg_set_user(struct tipc_msg *m, u32 n) 102static inline void msg_set_user(struct tipc_msg *m, u32 n)
103{ 103{
104 msg_set_bits(m, 0, 25, 0xf, n); 104 msg_set_bits(m, 0, 25, 0xf, n);
105} 105}
106 106
107static inline void msg_set_importance(struct tipc_msg *m, u32 i) 107static inline void msg_set_importance(struct tipc_msg *m, u32 i)
108{ 108{
109 msg_set_user(m, i); 109 msg_set_user(m, i);
110} 110}
111 111
112static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n) 112static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
113{ 113{
114 msg_set_bits(m, 0, 21, 0xf, n>>2); 114 msg_set_bits(m, 0, 21, 0xf, n>>2);
115} 115}
116 116
117static inline int msg_non_seq(struct tipc_msg *m) 117static inline int msg_non_seq(struct tipc_msg *m)
118{ 118{
119 return msg_bits(m, 0, 20, 1); 119 return msg_bits(m, 0, 20, 1);
120} 120}
121 121
122static inline void msg_set_non_seq(struct tipc_msg *m) 122static inline void msg_set_non_seq(struct tipc_msg *m)
123{ 123{
124 msg_set_bits(m, 0, 20, 1, 1); 124 msg_set_bits(m, 0, 20, 1, 1);
125} 125}
126 126
127static inline int msg_dest_droppable(struct tipc_msg *m) 127static inline int msg_dest_droppable(struct tipc_msg *m)
128{ 128{
129 return msg_bits(m, 0, 19, 1); 129 return msg_bits(m, 0, 19, 1);
130} 130}
131 131
132static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d) 132static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
133{ 133{
134 msg_set_bits(m, 0, 19, 1, d); 134 msg_set_bits(m, 0, 19, 1, d);
135} 135}
136 136
137static inline int msg_src_droppable(struct tipc_msg *m) 137static inline int msg_src_droppable(struct tipc_msg *m)
138{ 138{
139 return msg_bits(m, 0, 18, 1); 139 return msg_bits(m, 0, 18, 1);
140} 140}
141 141
142static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d) 142static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
143{ 143{
144 msg_set_bits(m, 0, 18, 1, d); 144 msg_set_bits(m, 0, 18, 1, d);
145} 145}
@@ -150,31 +150,31 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
150} 150}
151 151
152 152
153/* 153/*
154 * Word 1 154 * Word 1
155 */ 155 */
156 156
157static inline void msg_set_type(struct tipc_msg *m, u32 n) 157static inline void msg_set_type(struct tipc_msg *m, u32 n)
158{ 158{
159 msg_set_bits(m, 1, 29, 0x7, n); 159 msg_set_bits(m, 1, 29, 0x7, n);
160} 160}
161 161
162static inline void msg_set_errcode(struct tipc_msg *m, u32 err) 162static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
163{ 163{
164 msg_set_bits(m, 1, 25, 0xf, err); 164 msg_set_bits(m, 1, 25, 0xf, err);
165} 165}
166 166
167static inline u32 msg_reroute_cnt(struct tipc_msg *m) 167static inline u32 msg_reroute_cnt(struct tipc_msg *m)
168{ 168{
169 return msg_bits(m, 1, 21, 0xf); 169 return msg_bits(m, 1, 21, 0xf);
170} 170}
171 171
172static inline void msg_incr_reroute_cnt(struct tipc_msg *m) 172static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
173{ 173{
174 msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1); 174 msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
175} 175}
176 176
177static inline void msg_reset_reroute_cnt(struct tipc_msg *m) 177static inline void msg_reset_reroute_cnt(struct tipc_msg *m)
178{ 178{
179 msg_set_bits(m, 1, 21, 0xf, 0); 179 msg_set_bits(m, 1, 21, 0xf, 0);
180} 180}
@@ -184,12 +184,12 @@ static inline u32 msg_lookup_scope(struct tipc_msg *m)
184 return msg_bits(m, 1, 19, 0x3); 184 return msg_bits(m, 1, 19, 0x3);
185} 185}
186 186
187static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n) 187static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
188{ 188{
189 msg_set_bits(m, 1, 19, 0x3, n); 189 msg_set_bits(m, 1, 19, 0x3, n);
190} 190}
191 191
192static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz) 192static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz)
193{ 193{
194 u32 hsz = msg_hdr_sz(m); 194 u32 hsz = msg_hdr_sz(m);
195 char *to = (char *)&m->hdr[hsz/4]; 195 char *to = (char *)&m->hdr[hsz/4];
@@ -206,13 +206,13 @@ static inline u32 msg_bcast_ack(struct tipc_msg *m)
206 return msg_bits(m, 1, 0, 0xffff); 206 return msg_bits(m, 1, 0, 0xffff);
207} 207}
208 208
209static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n) 209static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
210{ 210{
211 msg_set_bits(m, 1, 0, 0xffff, n); 211 msg_set_bits(m, 1, 0, 0xffff, n);
212} 212}
213 213
214 214
215/* 215/*
216 * Word 2 216 * Word 2
217 */ 217 */
218 218
@@ -221,7 +221,7 @@ static inline u32 msg_ack(struct tipc_msg *m)
221 return msg_bits(m, 2, 16, 0xffff); 221 return msg_bits(m, 2, 16, 0xffff);
222} 222}
223 223
224static inline void msg_set_ack(struct tipc_msg *m, u32 n) 224static inline void msg_set_ack(struct tipc_msg *m, u32 n)
225{ 225{
226 msg_set_bits(m, 2, 16, 0xffff, n); 226 msg_set_bits(m, 2, 16, 0xffff, n);
227} 227}
@@ -231,48 +231,48 @@ static inline u32 msg_seqno(struct tipc_msg *m)
231 return msg_bits(m, 2, 0, 0xffff); 231 return msg_bits(m, 2, 0, 0xffff);
232} 232}
233 233
234static inline void msg_set_seqno(struct tipc_msg *m, u32 n) 234static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
235{ 235{
236 msg_set_bits(m, 2, 0, 0xffff, n); 236 msg_set_bits(m, 2, 0, 0xffff, n);
237} 237}
238 238
239 239
240/* 240/*
241 * Words 3-10 241 * Words 3-10
242 */ 242 */
243 243
244 244
245static inline void msg_set_prevnode(struct tipc_msg *m, u32 a) 245static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
246{ 246{
247 msg_set_word(m, 3, a); 247 msg_set_word(m, 3, a);
248} 248}
249 249
250static inline void msg_set_origport(struct tipc_msg *m, u32 p) 250static inline void msg_set_origport(struct tipc_msg *m, u32 p)
251{ 251{
252 msg_set_word(m, 4, p); 252 msg_set_word(m, 4, p);
253} 253}
254 254
255static inline void msg_set_destport(struct tipc_msg *m, u32 p) 255static inline void msg_set_destport(struct tipc_msg *m, u32 p)
256{ 256{
257 msg_set_word(m, 5, p); 257 msg_set_word(m, 5, p);
258} 258}
259 259
260static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p) 260static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
261{ 261{
262 msg_set_word(m, 5, p); 262 msg_set_word(m, 5, p);
263} 263}
264 264
265static inline void msg_set_orignode(struct tipc_msg *m, u32 a) 265static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
266{ 266{
267 msg_set_word(m, 6, a); 267 msg_set_word(m, 6, a);
268} 268}
269 269
270static inline void msg_set_destnode(struct tipc_msg *m, u32 a) 270static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
271{ 271{
272 msg_set_word(m, 7, a); 272 msg_set_word(m, 7, a);
273} 273}
274 274
275static inline int msg_is_dest(struct tipc_msg *m, u32 d) 275static inline int msg_is_dest(struct tipc_msg *m, u32 d)
276{ 276{
277 return(msg_short(m) || (msg_destnode(m) == d)); 277 return(msg_short(m) || (msg_destnode(m) == d));
278} 278}
@@ -284,7 +284,7 @@ static inline u32 msg_routed(struct tipc_msg *m)
284 return(msg_destnode(m) ^ msg_orignode(m)) >> 11; 284 return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
285} 285}
286 286
287static inline void msg_set_nametype(struct tipc_msg *m, u32 n) 287static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
288{ 288{
289 msg_set_word(m, 8, n); 289 msg_set_word(m, 8, n);
290} 290}
@@ -309,17 +309,17 @@ static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
309 msg_set_word(m, 8, n); 309 msg_set_word(m, 8, n);
310} 310}
311 311
312static inline void msg_set_namelower(struct tipc_msg *m, u32 n) 312static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
313{ 313{
314 msg_set_word(m, 9, n); 314 msg_set_word(m, 9, n);
315} 315}
316 316
317static inline void msg_set_nameinst(struct tipc_msg *m, u32 n) 317static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
318{ 318{
319 msg_set_namelower(m, n); 319 msg_set_namelower(m, n);
320} 320}
321 321
322static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) 322static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
323{ 323{
324 msg_set_word(m, 10, n); 324 msg_set_word(m, 10, n);
325} 325}
@@ -329,7 +329,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
329 return (struct tipc_msg *)msg_data(m); 329 return (struct tipc_msg *)msg_data(m);
330} 330}
331 331
332static inline void msg_expand(struct tipc_msg *m, u32 destnode) 332static inline void msg_expand(struct tipc_msg *m, u32 destnode)
333{ 333{
334 if (!msg_short(m)) 334 if (!msg_short(m))
335 return; 335 return;
@@ -344,7 +344,7 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
344/* 344/*
345 TIPC internal message header format, version 2 345 TIPC internal message header format, version 2
346 346
347 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 347 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
348 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 348 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
349 w0:|vers |msg usr|hdr sz |n|resrv| packet size | 349 w0:|vers |msg usr|hdr sz |n|resrv| packet size |
350 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 350 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -372,9 +372,9 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
372 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 372 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
373 373
374 NB: CONN_MANAGER use data message format. LINK_CONFIG has own format. 374 NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
375*/ 375*/
376 376
377/* 377/*
378 * Internal users 378 * Internal users
379 */ 379 */
380 380
@@ -390,7 +390,7 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
390#define INT_H_SIZE 40 390#define INT_H_SIZE 40
391#define DSC_H_SIZE 40 391#define DSC_H_SIZE 40
392 392
393/* 393/*
394 * Connection management protocol messages 394 * Connection management protocol messages
395 */ 395 */
396 396
@@ -398,7 +398,7 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
398#define CONN_PROBE_REPLY 1 398#define CONN_PROBE_REPLY 1
399#define CONN_ACK 2 399#define CONN_ACK 2
400 400
401/* 401/*
402 * Name distributor messages 402 * Name distributor messages
403 */ 403 */
404 404
@@ -406,7 +406,7 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode)
406#define WITHDRAWAL 1 406#define WITHDRAWAL 1
407 407
408 408
409/* 409/*
410 * Word 1 410 * Word 1
411 */ 411 */
412 412
@@ -425,13 +425,13 @@ static inline u32 msg_req_links(struct tipc_msg *m)
425 return msg_bits(m, 1, 16, 0xfff); 425 return msg_bits(m, 1, 16, 0xfff);
426} 426}
427 427
428static inline void msg_set_req_links(struct tipc_msg *m, u32 n) 428static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
429{ 429{
430 msg_set_bits(m, 1, 16, 0xfff, n); 430 msg_set_bits(m, 1, 16, 0xfff, n);
431} 431}
432 432
433 433
434/* 434/*
435 * Word 2 435 * Word 2
436 */ 436 */
437 437
@@ -440,7 +440,7 @@ static inline u32 msg_dest_domain(struct tipc_msg *m)
440 return msg_word(m, 2); 440 return msg_word(m, 2);
441} 441}
442 442
443static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n) 443static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
444{ 444{
445 msg_set_word(m, 2, n); 445 msg_set_word(m, 2, n);
446} 446}
@@ -460,13 +460,13 @@ static inline u32 msg_bcgap_to(struct tipc_msg *m)
460 return msg_bits(m, 2, 0, 0xffff); 460 return msg_bits(m, 2, 0, 0xffff);
461} 461}
462 462
463static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n) 463static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
464{ 464{
465 msg_set_bits(m, 2, 0, 0xffff, n); 465 msg_set_bits(m, 2, 0, 0xffff, n);
466} 466}
467 467
468 468
469/* 469/*
470 * Word 4 470 * Word 4
471 */ 471 */
472 472
@@ -533,7 +533,7 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
533 msg_set_bits(m, 4, 0, 1, (n & 1)); 533 msg_set_bits(m, 4, 0, 1, (n & 1));
534} 534}
535 535
536/* 536/*
537 * Word 5 537 * Word 5
538 */ 538 */
539 539
@@ -603,7 +603,7 @@ static inline void msg_clear_redundant_link(struct tipc_msg *m)
603} 603}
604 604
605 605
606/* 606/*
607 * Word 9 607 * Word 9
608 */ 608 */
609 609
@@ -627,12 +627,12 @@ static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
627 msg_set_bits(m, 9, 16, 0xffff, n); 627 msg_set_bits(m, 9, 16, 0xffff, n);
628} 628}
629 629
630static inline u32 msg_max_pkt(struct tipc_msg *m) 630static inline u32 msg_max_pkt(struct tipc_msg *m)
631{ 631{
632 return (msg_bits(m, 9, 16, 0xffff) * 4); 632 return (msg_bits(m, 9, 16, 0xffff) * 4);
633} 633}
634 634
635static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n) 635static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
636{ 636{
637 msg_set_bits(m, 9, 16, 0xffff, (n / 4)); 637 msg_set_bits(m, 9, 16, 0xffff, (n / 4));
638} 638}
@@ -647,7 +647,7 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
647 msg_set_bits(m, 9, 0, 0xffff, n); 647 msg_set_bits(m, 9, 0, 0xffff, n);
648} 648}
649 649
650/* 650/*
651 * Routing table message data 651 * Routing table message data
652 */ 652 */
653 653
@@ -672,7 +672,7 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
672 msg_data(m)[pos + 4] = 1; 672 msg_data(m)[pos + 4] = 1;
673} 673}
674 674
675/* 675/*
676 * Segmentation message types 676 * Segmentation message types
677 */ 677 */
678 678
@@ -680,7 +680,7 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
680#define FRAGMENT 1 680#define FRAGMENT 1
681#define LAST_FRAGMENT 2 681#define LAST_FRAGMENT 2
682 682
683/* 683/*
684 * Link management protocol message types 684 * Link management protocol message types
685 */ 685 */
686 686
@@ -688,13 +688,13 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
688#define RESET_MSG 1 688#define RESET_MSG 1
689#define ACTIVATE_MSG 2 689#define ACTIVATE_MSG 2
690 690
691/* 691/*
692 * Changeover tunnel message types 692 * Changeover tunnel message types
693 */ 693 */
694#define DUPLICATE_MSG 0 694#define DUPLICATE_MSG 0
695#define ORIGINAL_MSG 1 695#define ORIGINAL_MSG 1
696 696
697/* 697/*
698 * Routing table message types 698 * Routing table message types
699 */ 699 */
700#define EXT_ROUTING_TABLE 0 700#define EXT_ROUTING_TABLE 0
@@ -703,7 +703,7 @@ static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
703#define ROUTE_ADDITION 3 703#define ROUTE_ADDITION 3
704#define ROUTE_REMOVAL 4 704#define ROUTE_REMOVAL 4
705 705
706/* 706/*
707 * Config protocol message types 707 * Config protocol message types
708 */ 708 */
709 709
@@ -724,7 +724,7 @@ static inline u32 msg_tot_importance(struct tipc_msg *m)
724} 724}
725 725
726 726
727static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, 727static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
728 u32 err, u32 hsize, u32 destnode) 728 u32 err, u32 hsize, u32 destnode)
729{ 729{
730 memset(m, 0, hsize); 730 memset(m, 0, hsize);
@@ -741,7 +741,7 @@ static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
741 } 741 }
742} 742}
743 743
744/** 744/**
745 * msg_calc_data_size - determine total data size for message 745 * msg_calc_data_size - determine total data size for message
746 */ 746 */
747 747
@@ -755,15 +755,15 @@ static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
755 return dsz; 755 return dsz;
756} 756}
757 757
758/** 758/**
759 * msg_build - create message using specified header and data 759 * msg_build - create message using specified header and data
760 * 760 *
761 * Note: Caller must not hold any locks in case copy_from_user() is interrupted! 761 * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
762 * 762 *
763 * Returns message data size or errno 763 * Returns message data size or errno
764 */ 764 */
765 765
766static inline int msg_build(struct tipc_msg *hdr, 766static inline int msg_build(struct tipc_msg *hdr,
767 struct iovec const *msg_sect, u32 num_sect, 767 struct iovec const *msg_sect, u32 num_sect,
768 int max_size, int usrmem, struct sk_buff** buf) 768 int max_size, int usrmem, struct sk_buff** buf)
769{ 769{
@@ -789,11 +789,11 @@ static inline int msg_build(struct tipc_msg *hdr,
789 memcpy((*buf)->data, (unchar *)hdr, hsz); 789 memcpy((*buf)->data, (unchar *)hdr, hsz);
790 for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) { 790 for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
791 if (likely(usrmem)) 791 if (likely(usrmem))
792 res = !copy_from_user((*buf)->data + pos, 792 res = !copy_from_user((*buf)->data + pos,
793 msg_sect[cnt].iov_base, 793 msg_sect[cnt].iov_base,
794 msg_sect[cnt].iov_len); 794 msg_sect[cnt].iov_len);
795 else 795 else
796 memcpy((*buf)->data + pos, msg_sect[cnt].iov_base, 796 memcpy((*buf)->data + pos, msg_sect[cnt].iov_base,
797 msg_sect[cnt].iov_len); 797 msg_sect[cnt].iov_len);
798 pos += msg_sect[cnt].iov_len; 798 pos += msg_sect[cnt].iov_len;
799 } 799 }
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 7bf87cb26ef3..39fd1619febf 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/name_distr.c: TIPC name distribution code 2 * net/tipc/name_distr.c: TIPC name distribution code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -53,15 +53,15 @@
53 * @upper: name sequence upper bound 53 * @upper: name sequence upper bound
54 * @ref: publishing port reference 54 * @ref: publishing port reference
55 * @key: publication key 55 * @key: publication key
56 * 56 *
57 * ===> All fields are stored in network byte order. <=== 57 * ===> All fields are stored in network byte order. <===
58 * 58 *
59 * First 3 fields identify (name or) name sequence being published. 59 * First 3 fields identify (name or) name sequence being published.
60 * Reference field uniquely identifies port that published name sequence. 60 * Reference field uniquely identifies port that published name sequence.
61 * Key field uniquely identifies publication, in the event a port has 61 * Key field uniquely identifies publication, in the event a port has
62 * multiple publications of the same name sequence. 62 * multiple publications of the same name sequence.
63 * 63 *
64 * Note: There is no field that identifies the publishing node because it is 64 * Note: There is no field that identifies the publishing node because it is
65 * the same for all items contained within a publication message. 65 * the same for all items contained within a publication message.
66 */ 66 */
67 67
@@ -74,12 +74,12 @@ struct distr_item {
74}; 74};
75 75
76/** 76/**
77 * List of externally visible publications by this node -- 77 * List of externally visible publications by this node --
78 * that is, all publications having scope > TIPC_NODE_SCOPE. 78 * that is, all publications having scope > TIPC_NODE_SCOPE.
79 */ 79 */
80 80
81static LIST_HEAD(publ_root); 81static LIST_HEAD(publ_root);
82static u32 publ_cnt = 0; 82static u32 publ_cnt = 0;
83 83
84/** 84/**
85 * publ_to_item - add publication info to a publication message 85 * publ_to_item - add publication info to a publication message
@@ -101,12 +101,12 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
101 101
102static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) 102static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
103{ 103{
104 struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size); 104 struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);
105 struct tipc_msg *msg; 105 struct tipc_msg *msg;
106 106
107 if (buf != NULL) { 107 if (buf != NULL) {
108 msg = buf_msg(buf); 108 msg = buf_msg(buf);
109 msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK, 109 msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK,
110 LONG_H_SIZE, dest); 110 LONG_H_SIZE, dest);
111 msg_set_size(msg, LONG_H_SIZE + size); 111 msg_set_size(msg, LONG_H_SIZE + size);
112 } 112 }
@@ -174,7 +174,7 @@ void tipc_named_node_up(unsigned long node)
174 u32 rest; 174 u32 rest;
175 u32 max_item_buf; 175 u32 max_item_buf;
176 176
177 read_lock_bh(&tipc_nametbl_lock); 177 read_lock_bh(&tipc_nametbl_lock);
178 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE; 178 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
179 max_item_buf *= ITEM_SIZE; 179 max_item_buf *= ITEM_SIZE;
180 rest = publ_cnt * ITEM_SIZE; 180 rest = publ_cnt * ITEM_SIZE;
@@ -183,7 +183,7 @@ void tipc_named_node_up(unsigned long node)
183 if (!buf) { 183 if (!buf) {
184 left = (rest <= max_item_buf) ? rest : max_item_buf; 184 left = (rest <= max_item_buf) ? rest : max_item_buf;
185 rest -= left; 185 rest -= left;
186 buf = named_prepare_buf(PUBLICATION, left, node); 186 buf = named_prepare_buf(PUBLICATION, left, node);
187 if (!buf) { 187 if (!buf) {
188 warn("Bulk publication distribution failure\n"); 188 warn("Bulk publication distribution failure\n");
189 goto exit; 189 goto exit;
@@ -196,20 +196,20 @@ void tipc_named_node_up(unsigned long node)
196 if (!left) { 196 if (!left) {
197 msg_set_link_selector(buf_msg(buf), node); 197 msg_set_link_selector(buf_msg(buf), node);
198 dbg("tipc_named_node_up: sending publish msg to " 198 dbg("tipc_named_node_up: sending publish msg to "
199 "<%u.%u.%u>\n", tipc_zone(node), 199 "<%u.%u.%u>\n", tipc_zone(node),
200 tipc_cluster(node), tipc_node(node)); 200 tipc_cluster(node), tipc_node(node));
201 tipc_link_send(buf, node, node); 201 tipc_link_send(buf, node, node);
202 buf = NULL; 202 buf = NULL;
203 } 203 }
204 } 204 }
205exit: 205exit:
206 read_unlock_bh(&tipc_nametbl_lock); 206 read_unlock_bh(&tipc_nametbl_lock);
207} 207}
208 208
209/** 209/**
210 * node_is_down - remove publication associated with a failed node 210 * node_is_down - remove publication associated with a failed node
211 * 211 *
212 * Invoked for each publication issued by a newly failed node. 212 * Invoked for each publication issued by a newly failed node.
213 * Removes publication structure from name table & deletes it. 213 * Removes publication structure from name table & deletes it.
214 * In rare cases the link may have come back up again when this 214 * In rare cases the link may have come back up again when this
215 * function is called, and we have two items representing the same 215 * function is called, and we have two items representing the same
@@ -221,15 +221,15 @@ static void node_is_down(struct publication *publ)
221{ 221{
222 struct publication *p; 222 struct publication *p;
223 223
224 write_lock_bh(&tipc_nametbl_lock); 224 write_lock_bh(&tipc_nametbl_lock);
225 dbg("node_is_down: withdrawing %u, %u, %u\n", 225 dbg("node_is_down: withdrawing %u, %u, %u\n",
226 publ->type, publ->lower, publ->upper); 226 publ->type, publ->lower, publ->upper);
227 publ->key += 1222345; 227 publ->key += 1222345;
228 p = tipc_nametbl_remove_publ(publ->type, publ->lower, 228 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
229 publ->node, publ->ref, publ->key); 229 publ->node, publ->ref, publ->key);
230 write_unlock_bh(&tipc_nametbl_lock); 230 write_unlock_bh(&tipc_nametbl_lock);
231 231
232 if (p != publ) { 232 if (p != publ) {
233 err("Unable to remove publication from failed node\n" 233 err("Unable to remove publication from failed node\n"
234 "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n", 234 "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
235 publ->type, publ->lower, publ->node, publ->ref, publ->key); 235 publ->type, publ->lower, publ->node, publ->ref, publ->key);
@@ -251,27 +251,27 @@ void tipc_named_recv(struct sk_buff *buf)
251 struct distr_item *item = (struct distr_item *)msg_data(msg); 251 struct distr_item *item = (struct distr_item *)msg_data(msg);
252 u32 count = msg_data_sz(msg) / ITEM_SIZE; 252 u32 count = msg_data_sz(msg) / ITEM_SIZE;
253 253
254 write_lock_bh(&tipc_nametbl_lock); 254 write_lock_bh(&tipc_nametbl_lock);
255 while (count--) { 255 while (count--) {
256 if (msg_type(msg) == PUBLICATION) { 256 if (msg_type(msg) == PUBLICATION) {
257 dbg("tipc_named_recv: got publication for %u, %u, %u\n", 257 dbg("tipc_named_recv: got publication for %u, %u, %u\n",
258 ntohl(item->type), ntohl(item->lower), 258 ntohl(item->type), ntohl(item->lower),
259 ntohl(item->upper)); 259 ntohl(item->upper));
260 publ = tipc_nametbl_insert_publ(ntohl(item->type), 260 publ = tipc_nametbl_insert_publ(ntohl(item->type),
261 ntohl(item->lower), 261 ntohl(item->lower),
262 ntohl(item->upper), 262 ntohl(item->upper),
263 TIPC_CLUSTER_SCOPE, 263 TIPC_CLUSTER_SCOPE,
264 msg_orignode(msg), 264 msg_orignode(msg),
265 ntohl(item->ref), 265 ntohl(item->ref),
266 ntohl(item->key)); 266 ntohl(item->key));
267 if (publ) { 267 if (publ) {
268 tipc_nodesub_subscribe(&publ->subscr, 268 tipc_nodesub_subscribe(&publ->subscr,
269 msg_orignode(msg), 269 msg_orignode(msg),
270 publ, 270 publ,
271 (net_ev_handler)node_is_down); 271 (net_ev_handler)node_is_down);
272 } 272 }
273 } else if (msg_type(msg) == WITHDRAWAL) { 273 } else if (msg_type(msg) == WITHDRAWAL) {
274 dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n", 274 dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n",
275 ntohl(item->type), ntohl(item->lower), 275 ntohl(item->type), ntohl(item->lower),
276 ntohl(item->upper)); 276 ntohl(item->upper));
277 publ = tipc_nametbl_remove_publ(ntohl(item->type), 277 publ = tipc_nametbl_remove_publ(ntohl(item->type),
@@ -282,7 +282,7 @@ void tipc_named_recv(struct sk_buff *buf)
282 282
283 if (publ) { 283 if (publ) {
284 tipc_nodesub_unsubscribe(&publ->subscr); 284 tipc_nodesub_unsubscribe(&publ->subscr);
285 kfree(publ); 285 kfree(publ);
286 } else { 286 } else {
287 err("Unable to remove publication by node 0x%x\n" 287 err("Unable to remove publication by node 0x%x\n"
288 "(type=%u, lower=%u, ref=%u, key=%u)\n", 288 "(type=%u, lower=%u, ref=%u, key=%u)\n",
@@ -295,13 +295,13 @@ void tipc_named_recv(struct sk_buff *buf)
295 } 295 }
296 item++; 296 item++;
297 } 297 }
298 write_unlock_bh(&tipc_nametbl_lock); 298 write_unlock_bh(&tipc_nametbl_lock);
299 buf_discard(buf); 299 buf_discard(buf);
300} 300}
301 301
302/** 302/**
303 * tipc_named_reinit - re-initialize local publication list 303 * tipc_named_reinit - re-initialize local publication list
304 * 304 *
305 * This routine is called whenever TIPC networking is (re)enabled. 305 * This routine is called whenever TIPC networking is (re)enabled.
306 * All existing publications by this node that have "cluster" or "zone" scope 306 * All existing publications by this node that have "cluster" or "zone" scope
307 * are updated to reflect the node's current network address. 307 * are updated to reflect the node's current network address.
@@ -312,11 +312,11 @@ void tipc_named_reinit(void)
312{ 312{
313 struct publication *publ; 313 struct publication *publ;
314 314
315 write_lock_bh(&tipc_nametbl_lock); 315 write_lock_bh(&tipc_nametbl_lock);
316 list_for_each_entry(publ, &publ_root, local_list) { 316 list_for_each_entry(publ, &publ_root, local_list) {
317 if (publ->node == tipc_own_addr) 317 if (publ->node == tipc_own_addr)
318 break; 318 break;
319 publ->node = tipc_own_addr; 319 publ->node = tipc_own_addr;
320 } 320 }
321 write_unlock_bh(&tipc_nametbl_lock); 321 write_unlock_bh(&tipc_nametbl_lock);
322} 322}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 843da0172f4e..1e41bdd4f255 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/name_distr.h: Include file for TIPC name distribution code 2 * net/tipc/name_distr.h: Include file for TIPC name distribution code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 049242ea5c38..9dfc9127acdd 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/name_table.c: TIPC name table code 2 * net/tipc/name_table.c: TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -65,7 +65,7 @@ struct sub_seq {
65 struct publication *zone_list; 65 struct publication *zone_list;
66}; 66};
67 67
68/** 68/**
69 * struct name_seq - container for all published instances of a name type 69 * struct name_seq - container for all published instances of a name type
70 * @type: 32 bit 'type' value for name sequence 70 * @type: 32 bit 'type' value for name sequence
71 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type'; 71 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
@@ -89,7 +89,7 @@ struct name_seq {
89 89
90/** 90/**
91 * struct name_table - table containing all existing port name publications 91 * struct name_table - table containing all existing port name publications
92 * @types: pointer to fixed-sized array of name sequence lists, 92 * @types: pointer to fixed-sized array of name sequence lists,
93 * accessed via hashing on 'type'; name sequence lists are *not* sorted 93 * accessed via hashing on 'type'; name sequence lists are *not* sorted
94 * @local_publ_count: number of publications issued by this node 94 * @local_publ_count: number of publications issued by this node
95 */ 95 */
@@ -113,8 +113,8 @@ static int hash(int x)
113 * publ_create - create a publication structure 113 * publ_create - create a publication structure
114 */ 114 */
115 115
116static struct publication *publ_create(u32 type, u32 lower, u32 upper, 116static struct publication *publ_create(u32 type, u32 lower, u32 upper,
117 u32 scope, u32 node, u32 port_ref, 117 u32 scope, u32 node, u32 port_ref,
118 u32 key) 118 u32 key)
119{ 119{
120 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); 120 struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
@@ -148,7 +148,7 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
148 148
149/** 149/**
150 * tipc_nameseq_create - create a name sequence structure for the specified 'type' 150 * tipc_nameseq_create - create a name sequence structure for the specified 'type'
151 * 151 *
152 * Allocates a single sub-sequence structure and sets it to all 0's. 152 * Allocates a single sub-sequence structure and sets it to all 0's.
153 */ 153 */
154 154
@@ -178,7 +178,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
178 178
179/** 179/**
180 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance 180 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
181 * 181 *
182 * Very time-critical, so binary searches through sub-sequence array. 182 * Very time-critical, so binary searches through sub-sequence array.
183 */ 183 */
184 184
@@ -204,7 +204,7 @@ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
204 204
205/** 205/**
206 * nameseq_locate_subseq - determine position of name instance in sub-sequence 206 * nameseq_locate_subseq - determine position of name instance in sub-sequence
207 * 207 *
208 * Returns index in sub-sequence array of the entry that contains the specified 208 * Returns index in sub-sequence array of the entry that contains the specified
209 * instance value; if no entry contains that value, returns the position 209 * instance value; if no entry contains that value, returns the position
210 * where a new entry for it would be inserted in the array. 210 * where a new entry for it would be inserted in the array.
@@ -232,7 +232,7 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
232} 232}
233 233
234/** 234/**
235 * tipc_nameseq_insert_publ - 235 * tipc_nameseq_insert_publ -
236 */ 236 */
237 237
238static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, 238static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
@@ -343,8 +343,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
343 } 343 }
344 } 344 }
345 345
346 /* 346 /*
347 * Any subscriptions waiting for notification? 347 * Any subscriptions waiting for notification?
348 */ 348 */
349 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 349 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
350 dbg("calling report_overlap()\n"); 350 dbg("calling report_overlap()\n");
@@ -352,7 +352,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
352 publ->lower, 352 publ->lower,
353 publ->upper, 353 publ->upper,
354 TIPC_PUBLISHED, 354 TIPC_PUBLISHED,
355 publ->ref, 355 publ->ref,
356 publ->node, 356 publ->node,
357 created_subseq); 357 created_subseq);
358 } 358 }
@@ -361,7 +361,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
361 361
362/** 362/**
363 * tipc_nameseq_remove_publ - 363 * tipc_nameseq_remove_publ -
364 * 364 *
365 * NOTE: There may be cases where TIPC is asked to remove a publication 365 * NOTE: There may be cases where TIPC is asked to remove a publication
366 * that is not in the name table. For example, if another node issues a 366 * that is not in the name table. For example, if another node issues a
367 * publication for a name sequence that overlaps an existing name sequence 367 * publication for a name sequence that overlaps an existing name sequence
@@ -392,12 +392,12 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
392 392
393 prev = sseq->zone_list; 393 prev = sseq->zone_list;
394 publ = sseq->zone_list->zone_list_next; 394 publ = sseq->zone_list->zone_list_next;
395 while ((publ->key != key) || (publ->ref != ref) || 395 while ((publ->key != key) || (publ->ref != ref) ||
396 (publ->node && (publ->node != node))) { 396 (publ->node && (publ->node != node))) {
397 prev = publ; 397 prev = publ;
398 publ = publ->zone_list_next; 398 publ = publ->zone_list_next;
399 if (prev == sseq->zone_list) { 399 if (prev == sseq->zone_list) {
400 400
401 /* Prevent endless loop if publication not found */ 401 /* Prevent endless loop if publication not found */
402 402
403 return NULL; 403 return NULL;
@@ -426,7 +426,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
426 426
427 err("Unable to de-list cluster publication\n" 427 err("Unable to de-list cluster publication\n"
428 "{%u%u}, node=0x%x, ref=%u, key=%u)\n", 428 "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
429 publ->type, publ->lower, publ->node, 429 publ->type, publ->lower, publ->node,
430 publ->ref, publ->key); 430 publ->ref, publ->key);
431 goto end_cluster; 431 goto end_cluster;
432 } 432 }
@@ -456,7 +456,7 @@ end_cluster:
456 456
457 err("Unable to de-list node publication\n" 457 err("Unable to de-list node publication\n"
458 "{%u%u}, node=0x%x, ref=%u, key=%u)\n", 458 "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
459 publ->type, publ->lower, publ->node, 459 publ->type, publ->lower, publ->node,
460 publ->ref, publ->key); 460 publ->ref, publ->key);
461 goto end_node; 461 goto end_node;
462 } 462 }
@@ -486,8 +486,8 @@ end_node:
486 tipc_subscr_report_overlap(s, 486 tipc_subscr_report_overlap(s,
487 publ->lower, 487 publ->lower,
488 publ->upper, 488 publ->upper,
489 TIPC_WITHDRAWN, 489 TIPC_WITHDRAWN,
490 publ->ref, 490 publ->ref,
491 publ->node, 491 publ->node,
492 removed_subseq); 492 removed_subseq);
493 } 493 }
@@ -517,8 +517,8 @@ void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
517 int must_report = 1; 517 int must_report = 1;
518 518
519 do { 519 do {
520 tipc_subscr_report_overlap(s, 520 tipc_subscr_report_overlap(s,
521 sseq->lower, 521 sseq->lower,
522 sseq->upper, 522 sseq->upper,
523 TIPC_PUBLISHED, 523 TIPC_PUBLISHED,
524 crs->ref, 524 crs->ref,
@@ -576,7 +576,7 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
576 scope, node, port, key); 576 scope, node, port, key);
577} 577}
578 578
579struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, 579struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
580 u32 node, u32 ref, u32 key) 580 u32 node, u32 ref, u32 key)
581{ 581{
582 struct publication *publ; 582 struct publication *publ;
@@ -676,14 +676,14 @@ not_found:
676 676
677/** 677/**
678 * tipc_nametbl_mc_translate - find multicast destinations 678 * tipc_nametbl_mc_translate - find multicast destinations
679 * 679 *
680 * Creates list of all local ports that overlap the given multicast address; 680 * Creates list of all local ports that overlap the given multicast address;
681 * also determines if any off-node ports overlap. 681 * also determines if any off-node ports overlap.
682 * 682 *
683 * Note: Publications with a scope narrower than 'limit' are ignored. 683 * Note: Publications with a scope narrower than 'limit' are ignored.
684 * (i.e. local node-scope publications mustn't receive messages arriving 684 * (i.e. local node-scope publications mustn't receive messages arriving
685 * from another node, even if the multcast link brought it here) 685 * from another node, even if the multcast link brought it here)
686 * 686 *
687 * Returns non-zero if any off-node ports overlap 687 * Returns non-zero if any off-node ports overlap
688 */ 688 */
689 689
@@ -730,7 +730,7 @@ exit:
730 * tipc_nametbl_publish_rsv - publish port name using a reserved name type 730 * tipc_nametbl_publish_rsv - publish port name using a reserved name type
731 */ 731 */
732 732
733int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope, 733int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
734 struct tipc_name_seq const *seq) 734 struct tipc_name_seq const *seq)
735{ 735{
736 int res; 736 int res;
@@ -745,13 +745,13 @@ int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
745 * tipc_nametbl_publish - add name publication to network name tables 745 * tipc_nametbl_publish - add name publication to network name tables
746 */ 746 */
747 747
748struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 748struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
749 u32 scope, u32 port_ref, u32 key) 749 u32 scope, u32 port_ref, u32 key)
750{ 750{
751 struct publication *publ; 751 struct publication *publ;
752 752
753 if (table.local_publ_count >= tipc_max_publications) { 753 if (table.local_publ_count >= tipc_max_publications) {
754 warn("Publication failed, local publication limit reached (%u)\n", 754 warn("Publication failed, local publication limit reached (%u)\n",
755 tipc_max_publications); 755 tipc_max_publications);
756 return NULL; 756 return NULL;
757 } 757 }
@@ -808,22 +808,22 @@ void tipc_nametbl_subscribe(struct subscription *s)
808 u32 type = s->seq.type; 808 u32 type = s->seq.type;
809 struct name_seq *seq; 809 struct name_seq *seq;
810 810
811 write_lock_bh(&tipc_nametbl_lock); 811 write_lock_bh(&tipc_nametbl_lock);
812 seq = nametbl_find_seq(type); 812 seq = nametbl_find_seq(type);
813 if (!seq) { 813 if (!seq) {
814 seq = tipc_nameseq_create(type, &table.types[hash(type)]); 814 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
815 } 815 }
816 if (seq){ 816 if (seq){
817 spin_lock_bh(&seq->lock); 817 spin_lock_bh(&seq->lock);
818 dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n", 818 dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
819 seq, type, s->seq.lower, s->seq.upper); 819 seq, type, s->seq.lower, s->seq.upper);
820 tipc_nameseq_subscribe(seq, s); 820 tipc_nameseq_subscribe(seq, s);
821 spin_unlock_bh(&seq->lock); 821 spin_unlock_bh(&seq->lock);
822 } else { 822 } else {
823 warn("Failed to create subscription for {%u,%u,%u}\n", 823 warn("Failed to create subscription for {%u,%u,%u}\n",
824 s->seq.type, s->seq.lower, s->seq.upper); 824 s->seq.type, s->seq.lower, s->seq.upper);
825 } 825 }
826 write_unlock_bh(&tipc_nametbl_lock); 826 write_unlock_bh(&tipc_nametbl_lock);
827} 827}
828 828
829/** 829/**
@@ -834,19 +834,19 @@ void tipc_nametbl_unsubscribe(struct subscription *s)
834{ 834{
835 struct name_seq *seq; 835 struct name_seq *seq;
836 836
837 write_lock_bh(&tipc_nametbl_lock); 837 write_lock_bh(&tipc_nametbl_lock);
838 seq = nametbl_find_seq(s->seq.type); 838 seq = nametbl_find_seq(s->seq.type);
839 if (seq != NULL){ 839 if (seq != NULL){
840 spin_lock_bh(&seq->lock); 840 spin_lock_bh(&seq->lock);
841 list_del_init(&s->nameseq_list); 841 list_del_init(&s->nameseq_list);
842 spin_unlock_bh(&seq->lock); 842 spin_unlock_bh(&seq->lock);
843 if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) { 843 if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
844 hlist_del_init(&seq->ns_list); 844 hlist_del_init(&seq->ns_list);
845 kfree(seq->sseqs); 845 kfree(seq->sseqs);
846 kfree(seq); 846 kfree(seq);
847 } 847 }
848 } 848 }
849 write_unlock_bh(&tipc_nametbl_lock); 849 write_unlock_bh(&tipc_nametbl_lock);
850} 850}
851 851
852 852
@@ -952,7 +952,7 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
952 * nametbl_list - print specified name table contents into the given buffer 952 * nametbl_list - print specified name table contents into the given buffer
953 */ 953 */
954 954
955static void nametbl_list(struct print_buf *buf, u32 depth_info, 955static void nametbl_list(struct print_buf *buf, u32 depth_info,
956 u32 type, u32 lowbound, u32 upbound) 956 u32 type, u32 lowbound, u32 upbound)
957{ 957{
958 struct hlist_head *seq_head; 958 struct hlist_head *seq_head;
@@ -976,7 +976,7 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
976 for (i = 0; i < tipc_nametbl_size; i++) { 976 for (i = 0; i < tipc_nametbl_size; i++) {
977 seq_head = &table.types[i]; 977 seq_head = &table.types[i];
978 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 978 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
979 nameseq_list(seq, buf, depth, seq->type, 979 nameseq_list(seq, buf, depth, seq->type,
980 lowbound, upbound, i); 980 lowbound, upbound, i);
981 } 981 }
982 } 982 }
@@ -991,7 +991,7 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
991 seq_head = &table.types[i]; 991 seq_head = &table.types[i];
992 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 992 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
993 if (seq->type == type) { 993 if (seq->type == type) {
994 nameseq_list(seq, buf, depth, type, 994 nameseq_list(seq, buf, depth, type,
995 lowbound, upbound, i); 995 lowbound, upbound, i);
996 break; 996 break;
997 } 997 }
@@ -1030,7 +1030,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
1030 tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY); 1030 tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
1031 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); 1031 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
1032 read_lock_bh(&tipc_nametbl_lock); 1032 read_lock_bh(&tipc_nametbl_lock);
1033 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), 1033 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
1034 ntohl(argv->lowbound), ntohl(argv->upbound)); 1034 ntohl(argv->lowbound), ntohl(argv->upbound));
1035 read_unlock_bh(&tipc_nametbl_lock); 1035 read_unlock_bh(&tipc_nametbl_lock);
1036 str_len = tipc_printbuf_validate(&b); 1036 str_len = tipc_printbuf_validate(&b);
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index e8a3d71763ce..b9e7cd336d76 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/name_table.h: Include file for TIPC name table code 2 * net/tipc/name_table.h: Include file for TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -64,7 +64,7 @@ struct port_list;
64 * @node_list: next matching name seq publication with >= node scope 64 * @node_list: next matching name seq publication with >= node scope
65 * @cluster_list: next matching name seq publication with >= cluster scope 65 * @cluster_list: next matching name seq publication with >= cluster scope
66 * @zone_list: next matching name seq publication with >= zone scope 66 * @zone_list: next matching name seq publication with >= zone scope
67 * 67 *
68 * Note that the node list, cluster list, and zone list are circular lists. 68 * Note that the node list, cluster list, and zone list are circular lists.
69 */ 69 */
70 70
@@ -89,16 +89,16 @@ extern rwlock_t tipc_nametbl_lock;
89 89
90struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space); 90struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
91u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); 91u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
92int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 92int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
93 struct port_list *dports); 93 struct port_list *dports);
94int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope, 94int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
95 struct tipc_name_seq const *seq); 95 struct tipc_name_seq const *seq);
96struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 96struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
97 u32 scope, u32 port_ref, u32 key); 97 u32 scope, u32 port_ref, u32 key);
98int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key); 98int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
99struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, 99struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
100 u32 scope, u32 node, u32 ref, u32 key); 100 u32 scope, u32 node, u32 ref, u32 key);
101struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, 101struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
102 u32 node, u32 ref, u32 key); 102 u32 node, u32 ref, u32 key);
103void tipc_nametbl_subscribe(struct subscription *s); 103void tipc_nametbl_subscribe(struct subscription *s);
104void tipc_nametbl_unsubscribe(struct subscription *s); 104void tipc_nametbl_unsubscribe(struct subscription *s);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index a991bf8a7f74..c39c76201e8e 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/net.c: TIPC network routing code 2 * net/tipc/net.c: TIPC network routing code
3 * 3 *
4 * Copyright (c) 1995-2006, Ericsson AB 4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -49,63 +49,63 @@
49#include "discover.h" 49#include "discover.h"
50#include "config.h" 50#include "config.h"
51 51
52/* 52/*
53 * The TIPC locking policy is designed to ensure a very fine locking 53 * The TIPC locking policy is designed to ensure a very fine locking
54 * granularity, permitting complete parallel access to individual 54 * granularity, permitting complete parallel access to individual
55 * port and node/link instances. The code consists of three major 55 * port and node/link instances. The code consists of three major
56 * locking domains, each protected with their own disjunct set of locks. 56 * locking domains, each protected with their own disjunct set of locks.
57 * 57 *
58 * 1: The routing hierarchy. 58 * 1: The routing hierarchy.
59 * Comprises the structures 'zone', 'cluster', 'node', 'link' 59 * Comprises the structures 'zone', 'cluster', 'node', 'link'
60 * and 'bearer'. The whole hierarchy is protected by a big 60 * and 'bearer'. The whole hierarchy is protected by a big
61 * read/write lock, tipc_net_lock, to enssure that nothing is added 61 * read/write lock, tipc_net_lock, to enssure that nothing is added
62 * or removed while code is accessing any of these structures. 62 * or removed while code is accessing any of these structures.
63 * This layer must not be called from the two others while they 63 * This layer must not be called from the two others while they
64 * hold any of their own locks. 64 * hold any of their own locks.
65 * Neither must it itself do any upcalls to the other two before 65 * Neither must it itself do any upcalls to the other two before
66 * it has released tipc_net_lock and other protective locks. 66 * it has released tipc_net_lock and other protective locks.
67 * 67 *
68 * Within the tipc_net_lock domain there are two sub-domains;'node' and 68 * Within the tipc_net_lock domain there are two sub-domains;'node' and
69 * 'bearer', where local write operations are permitted, 69 * 'bearer', where local write operations are permitted,
70 * provided that those are protected by individual spin_locks 70 * provided that those are protected by individual spin_locks
71 * per instance. Code holding tipc_net_lock(read) and a node spin_lock 71 * per instance. Code holding tipc_net_lock(read) and a node spin_lock
72 * is permitted to poke around in both the node itself and its 72 * is permitted to poke around in both the node itself and its
73 * subordinate links. I.e, it can update link counters and queues, 73 * subordinate links. I.e, it can update link counters and queues,
74 * change link state, send protocol messages, and alter the 74 * change link state, send protocol messages, and alter the
75 * "active_links" array in the node; but it can _not_ remove a link 75 * "active_links" array in the node; but it can _not_ remove a link
76 * or a node from the overall structure. 76 * or a node from the overall structure.
77 * Correspondingly, individual bearers may change status within a 77 * Correspondingly, individual bearers may change status within a
78 * tipc_net_lock(read), protected by an individual spin_lock ber bearer 78 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
79 * instance, but it needs tipc_net_lock(write) to remove/add any bearers. 79 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
80 *
81 * 80 *
82 * 2: The transport level of the protocol. 81 *
83 * This consists of the structures port, (and its user level 82 * 2: The transport level of the protocol.
84 * representations, such as user_port and tipc_sock), reference and 83 * This consists of the structures port, (and its user level
85 * tipc_user (port.c, reg.c, socket.c). 84 * representations, such as user_port and tipc_sock), reference and
85 * tipc_user (port.c, reg.c, socket.c).
86 * 86 *
87 * This layer has four different locks: 87 * This layer has four different locks:
88 * - The tipc_port spin_lock. This is protecting each port instance 88 * - The tipc_port spin_lock. This is protecting each port instance
89 * from parallel data access and removal. Since we can not place 89 * from parallel data access and removal. Since we can not place
90 * this lock in the port itself, it has been placed in the 90 * this lock in the port itself, it has been placed in the
91 * corresponding reference table entry, which has the same life 91 * corresponding reference table entry, which has the same life
92 * cycle as the module. This entry is difficult to access from 92 * cycle as the module. This entry is difficult to access from
93 * outside the TIPC core, however, so a pointer to the lock has 93 * outside the TIPC core, however, so a pointer to the lock has
94 * been added in the port instance, -to be used for unlocking 94 * been added in the port instance, -to be used for unlocking
95 * only. 95 * only.
96 * - A read/write lock to protect the reference table itself (teg.c). 96 * - A read/write lock to protect the reference table itself (teg.c).
97 * (Nobody is using read-only access to this, so it can just as 97 * (Nobody is using read-only access to this, so it can just as
98 * well be changed to a spin_lock) 98 * well be changed to a spin_lock)
99 * - A spin lock to protect the registry of kernel/driver users (reg.c) 99 * - A spin lock to protect the registry of kernel/driver users (reg.c)
100 * - A global spin_lock (tipc_port_lock), which only task is to ensure 100 * - A global spin_lock (tipc_port_lock), which only task is to ensure
101 * consistency where more than one port is involved in an operation, 101 * consistency where more than one port is involved in an operation,
102 * i.e., whe a port is part of a linked list of ports. 102 * i.e., whe a port is part of a linked list of ports.
103 * There are two such lists; 'port_list', which is used for management, 103 * There are two such lists; 'port_list', which is used for management,
104 * and 'wait_list', which is used to queue ports during congestion. 104 * and 'wait_list', which is used to queue ports during congestion.
105 * 105 *
106 * 3: The name table (name_table.c, name_distr.c, subscription.c) 106 * 3: The name table (name_table.c, name_distr.c, subscription.c)
107 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the 107 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
108 * overall name table structure. Nothing must be added/removed to 108 * overall name table structure. Nothing must be added/removed to
109 * this structure without holding write access to it. 109 * this structure without holding write access to it.
110 * - There is one local spin_lock per sub_sequence, which can be seen 110 * - There is one local spin_lock per sub_sequence, which can be seen
111 * as a sub-domain to the tipc_nametbl_lock domain. It is used only 111 * as a sub-domain to the tipc_nametbl_lock domain. It is used only
@@ -118,7 +118,7 @@
118DEFINE_RWLOCK(tipc_net_lock); 118DEFINE_RWLOCK(tipc_net_lock);
119struct network tipc_net = { NULL }; 119struct network tipc_net = { NULL };
120 120
121struct node *tipc_net_select_remote_node(u32 addr, u32 ref) 121struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
122{ 122{
123 return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref); 123 return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
124} 124}
@@ -224,7 +224,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
224 buf_discard(buf); 224 buf_discard(buf);
225 } else { 225 } else {
226 msg_dbg(msg, "NET>REJ>:"); 226 msg_dbg(msg, "NET>REJ>:");
227 tipc_reject_msg(buf, msg_destport(msg) ? 227 tipc_reject_msg(buf, msg_destport(msg) ?
228 TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME); 228 TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
229 } 229 }
230 return; 230 return;
@@ -236,7 +236,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
236 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); 236 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
237 if (in_scope(dnode, tipc_own_addr)) { 237 if (in_scope(dnode, tipc_own_addr)) {
238 if (msg_isdata(msg)) { 238 if (msg_isdata(msg)) {
239 if (msg_mcast(msg)) 239 if (msg_mcast(msg))
240 tipc_port_recv_mcast(buf, NULL); 240 tipc_port_recv_mcast(buf, NULL);
241 else if (msg_destport(msg)) 241 else if (msg_destport(msg))
242 tipc_port_recv_msg(buf); 242 tipc_port_recv_msg(buf);
@@ -284,7 +284,7 @@ int tipc_net_start(void)
284 (res = tipc_bclink_init())) { 284 (res = tipc_bclink_init())) {
285 return res; 285 return res;
286 } 286 }
287 tipc_subscr_stop(); 287 tipc_subscr_stop();
288 tipc_cfg_stop(); 288 tipc_cfg_stop();
289 tipc_k_signal((Handler)tipc_subscr_start, 0); 289 tipc_k_signal((Handler)tipc_subscr_start, 0);
290 tipc_k_signal((Handler)tipc_cfg_init, 0); 290 tipc_k_signal((Handler)tipc_cfg_init, 0);
@@ -298,12 +298,12 @@ void tipc_net_stop(void)
298{ 298{
299 if (tipc_mode != TIPC_NET_MODE) 299 if (tipc_mode != TIPC_NET_MODE)
300 return; 300 return;
301 write_lock_bh(&tipc_net_lock); 301 write_lock_bh(&tipc_net_lock);
302 tipc_bearer_stop(); 302 tipc_bearer_stop();
303 tipc_mode = TIPC_NODE_MODE; 303 tipc_mode = TIPC_NODE_MODE;
304 tipc_bclink_stop(); 304 tipc_bclink_stop();
305 net_stop(); 305 net_stop();
306 write_unlock_bh(&tipc_net_lock); 306 write_unlock_bh(&tipc_net_lock);
307 info("Left network mode \n"); 307 info("Left network mode \n");
308} 308}
309 309
diff --git a/net/tipc/net.h b/net/tipc/net.h
index f3e0b85e6475..a6a0e9976ac9 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/net.h: Include file for TIPC network routing code 2 * net/tipc/net.h: Include file for TIPC network routing code
3 * 3 *
4 * Copyright (c) 1995-2006, Ericsson AB 4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -43,7 +43,7 @@ struct _zone;
43 * struct network - TIPC network structure 43 * struct network - TIPC network structure
44 * @zones: array of pointers to all zones within network 44 * @zones: array of pointers to all zones within network
45 */ 45 */
46 46
47struct network { 47struct network {
48 struct _zone **zones; 48 struct _zone **zones;
49}; 49};
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index eb1bb4dce7af..b8e1edc2badc 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/netlink.c: TIPC configuration handling 2 * net/tipc/netlink.c: TIPC configuration handling
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -63,15 +63,15 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
63 genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid); 63 genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid);
64 } 64 }
65 65
66 return 0; 66 return 0;
67} 67}
68 68
69static struct genl_family family = { 69static struct genl_family family = {
70 .id = GENL_ID_GENERATE, 70 .id = GENL_ID_GENERATE,
71 .name = TIPC_GENL_NAME, 71 .name = TIPC_GENL_NAME,
72 .version = TIPC_GENL_VERSION, 72 .version = TIPC_GENL_VERSION,
73 .hdrsize = TIPC_GENL_HDRLEN, 73 .hdrsize = TIPC_GENL_HDRLEN,
74 .maxattr = 0, 74 .maxattr = 0,
75}; 75};
76 76
77static struct genl_ops ops = { 77static struct genl_ops ops = {
@@ -93,7 +93,7 @@ int tipc_netlink_start(void)
93 if (genl_register_ops(&family, &ops)) 93 if (genl_register_ops(&family, &ops))
94 goto err_unregister; 94 goto err_unregister;
95 95
96 return 0; 96 return 0;
97 97
98 err_unregister: 98 err_unregister:
99 genl_unregister_family(&family); 99 genl_unregister_family(&family);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 4111a31def79..e2e452a62ba1 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/node.c: TIPC node management routines 2 * net/tipc/node.c: TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -58,7 +58,7 @@ struct node *tipc_node_create(u32 addr)
58{ 58{
59 struct cluster *c_ptr; 59 struct cluster *c_ptr;
60 struct node *n_ptr; 60 struct node *n_ptr;
61 struct node **curr_node; 61 struct node **curr_node;
62 62
63 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 63 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
64 if (!n_ptr) { 64 if (!n_ptr) {
@@ -74,16 +74,16 @@ struct node *tipc_node_create(u32 addr)
74 kfree(n_ptr); 74 kfree(n_ptr);
75 return NULL; 75 return NULL;
76 } 76 }
77 77
78 n_ptr->addr = addr; 78 n_ptr->addr = addr;
79 spin_lock_init(&n_ptr->lock); 79 spin_lock_init(&n_ptr->lock);
80 INIT_LIST_HEAD(&n_ptr->nsub); 80 INIT_LIST_HEAD(&n_ptr->nsub);
81 n_ptr->owner = c_ptr; 81 n_ptr->owner = c_ptr;
82 tipc_cltr_attach_node(c_ptr, n_ptr); 82 tipc_cltr_attach_node(c_ptr, n_ptr);
83 n_ptr->last_router = -1; 83 n_ptr->last_router = -1;
84 84
85 /* Insert node into ordered list */ 85 /* Insert node into ordered list */
86 for (curr_node = &tipc_nodes; *curr_node; 86 for (curr_node = &tipc_nodes; *curr_node;
87 curr_node = &(*curr_node)->next) { 87 curr_node = &(*curr_node)->next) {
88 if (addr < (*curr_node)->addr) { 88 if (addr < (*curr_node)->addr) {
89 n_ptr->next = *curr_node; 89 n_ptr->next = *curr_node;
@@ -116,7 +116,7 @@ void tipc_node_delete(struct node *n_ptr)
116 116
117/** 117/**
118 * tipc_node_link_up - handle addition of link 118 * tipc_node_link_up - handle addition of link
119 * 119 *
120 * Link becomes active (alone or shared) or standby, depending on its priority. 120 * Link becomes active (alone or shared) or standby, depending on its priority.
121 */ 121 */
122 122
@@ -128,19 +128,19 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
128 128
129 info("Established link <%s> on network plane %c\n", 129 info("Established link <%s> on network plane %c\n",
130 l_ptr->name, l_ptr->b_ptr->net_plane); 130 l_ptr->name, l_ptr->b_ptr->net_plane);
131 131
132 if (!active[0]) { 132 if (!active[0]) {
133 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]); 133 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
134 active[0] = active[1] = l_ptr; 134 active[0] = active[1] = l_ptr;
135 node_established_contact(n_ptr); 135 node_established_contact(n_ptr);
136 return; 136 return;
137 } 137 }
138 if (l_ptr->priority < active[0]->priority) { 138 if (l_ptr->priority < active[0]->priority) {
139 info("New link <%s> becomes standby\n", l_ptr->name); 139 info("New link <%s> becomes standby\n", l_ptr->name);
140 return; 140 return;
141 } 141 }
142 tipc_link_send_duplicate(active[0], l_ptr); 142 tipc_link_send_duplicate(active[0], l_ptr);
143 if (l_ptr->priority == active[0]->priority) { 143 if (l_ptr->priority == active[0]->priority) {
144 active[0] = l_ptr; 144 active[0] = l_ptr;
145 return; 145 return;
146 } 146 }
@@ -160,17 +160,17 @@ static void node_select_active_links(struct node *n_ptr)
160 u32 i; 160 u32 i;
161 u32 highest_prio = 0; 161 u32 highest_prio = 0;
162 162
163 active[0] = active[1] = NULL; 163 active[0] = active[1] = NULL;
164 164
165 for (i = 0; i < MAX_BEARERS; i++) { 165 for (i = 0; i < MAX_BEARERS; i++) {
166 struct link *l_ptr = n_ptr->links[i]; 166 struct link *l_ptr = n_ptr->links[i];
167 167
168 if (!l_ptr || !tipc_link_is_up(l_ptr) || 168 if (!l_ptr || !tipc_link_is_up(l_ptr) ||
169 (l_ptr->priority < highest_prio)) 169 (l_ptr->priority < highest_prio))
170 continue; 170 continue;
171 171
172 if (l_ptr->priority > highest_prio) { 172 if (l_ptr->priority > highest_prio) {
173 highest_prio = l_ptr->priority; 173 highest_prio = l_ptr->priority;
174 active[0] = active[1] = l_ptr; 174 active[0] = active[1] = l_ptr;
175 } else { 175 } else {
176 active[1] = l_ptr; 176 active[1] = l_ptr;
@@ -203,15 +203,15 @@ void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
203 active[1] = active[0]; 203 active[1] = active[0];
204 if (active[0] == l_ptr) 204 if (active[0] == l_ptr)
205 node_select_active_links(n_ptr); 205 node_select_active_links(n_ptr);
206 if (tipc_node_is_up(n_ptr)) 206 if (tipc_node_is_up(n_ptr))
207 tipc_link_changeover(l_ptr); 207 tipc_link_changeover(l_ptr);
208 else 208 else
209 node_lost_contact(n_ptr); 209 node_lost_contact(n_ptr);
210} 210}
211 211
212int tipc_node_has_active_links(struct node *n_ptr) 212int tipc_node_has_active_links(struct node *n_ptr)
213{ 213{
214 return (n_ptr && 214 return (n_ptr &&
215 ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); 215 ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
216} 216}
217 217
@@ -236,28 +236,28 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
236 236
237 if (!n_ptr) 237 if (!n_ptr)
238 n_ptr = tipc_node_create(l_ptr->addr); 238 n_ptr = tipc_node_create(l_ptr->addr);
239 if (n_ptr) { 239 if (n_ptr) {
240 u32 bearer_id = l_ptr->b_ptr->identity; 240 u32 bearer_id = l_ptr->b_ptr->identity;
241 char addr_string[16]; 241 char addr_string[16];
242 242
243 if (n_ptr->link_cnt >= 2) { 243 if (n_ptr->link_cnt >= 2) {
244 char addr_string[16]; 244 char addr_string[16];
245 245
246 err("Attempt to create third link to %s\n", 246 err("Attempt to create third link to %s\n",
247 addr_string_fill(addr_string, n_ptr->addr)); 247 addr_string_fill(addr_string, n_ptr->addr));
248 return NULL; 248 return NULL;
249 } 249 }
250 250
251 if (!n_ptr->links[bearer_id]) { 251 if (!n_ptr->links[bearer_id]) {
252 n_ptr->links[bearer_id] = l_ptr; 252 n_ptr->links[bearer_id] = l_ptr;
253 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++; 253 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
254 n_ptr->link_cnt++; 254 n_ptr->link_cnt++;
255 return n_ptr; 255 return n_ptr;
256 } 256 }
257 err("Attempt to establish second link on <%s> to %s \n", 257 err("Attempt to establish second link on <%s> to %s \n",
258 l_ptr->b_ptr->publ.name, 258 l_ptr->b_ptr->publ.name,
259 addr_string_fill(addr_string, l_ptr->addr)); 259 addr_string_fill(addr_string, l_ptr->addr));
260 } 260 }
261 return NULL; 261 return NULL;
262} 262}
263 263
@@ -272,17 +272,17 @@ void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
272 * Routing table management - five cases to handle: 272 * Routing table management - five cases to handle:
273 * 273 *
274 * 1: A link towards a zone/cluster external node comes up. 274 * 1: A link towards a zone/cluster external node comes up.
275 * => Send a multicast message updating routing tables of all 275 * => Send a multicast message updating routing tables of all
276 * system nodes within own cluster that the new destination 276 * system nodes within own cluster that the new destination
277 * can be reached via this node. 277 * can be reached via this node.
278 * (node.establishedContact()=>cluster.multicastNewRoute()) 278 * (node.establishedContact()=>cluster.multicastNewRoute())
279 * 279 *
280 * 2: A link towards a slave node comes up. 280 * 2: A link towards a slave node comes up.
281 * => Send a multicast message updating routing tables of all 281 * => Send a multicast message updating routing tables of all
282 * system nodes within own cluster that the new destination 282 * system nodes within own cluster that the new destination
283 * can be reached via this node. 283 * can be reached via this node.
284 * (node.establishedContact()=>cluster.multicastNewRoute()) 284 * (node.establishedContact()=>cluster.multicastNewRoute())
285 * => Send a message to the slave node about existence 285 * => Send a message to the slave node about existence
286 * of all system nodes within cluster: 286 * of all system nodes within cluster:
287 * (node.establishedContact()=>cluster.sendLocalRoutes()) 287 * (node.establishedContact()=>cluster.sendLocalRoutes())
288 * 288 *
@@ -292,13 +292,13 @@ void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
292 * nodes which can be reached via this node. 292 * nodes which can be reached via this node.
293 * (node.establishedContact()==>network.sendExternalRoutes()) 293 * (node.establishedContact()==>network.sendExternalRoutes())
294 * (node.establishedContact()==>network.sendSlaveRoutes()) 294 * (node.establishedContact()==>network.sendSlaveRoutes())
295 * => Send messages to all directly connected slave nodes 295 * => Send messages to all directly connected slave nodes
296 * containing information about the existence of the new node 296 * containing information about the existence of the new node
297 * (node.establishedContact()=>cluster.multicastNewRoute()) 297 * (node.establishedContact()=>cluster.multicastNewRoute())
298 * 298 *
299 * 4: The link towards a zone/cluster external node or slave 299 * 4: The link towards a zone/cluster external node or slave
300 * node goes down. 300 * node goes down.
301 * => Send a multcast message updating routing tables of all 301 * => Send a multcast message updating routing tables of all
302 * nodes within cluster that the new destination can not any 302 * nodes within cluster that the new destination can not any
303 * longer be reached via this node. 303 * longer be reached via this node.
304 * (node.lostAllLinks()=>cluster.bcastLostRoute()) 304 * (node.lostAllLinks()=>cluster.bcastLostRoute())
@@ -308,7 +308,7 @@ void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
308 * routing tables. Note: This is a completely node 308 * routing tables. Note: This is a completely node
309 * local operation. 309 * local operation.
310 * (node.lostAllLinks()=>network.removeAsRouter()) 310 * (node.lostAllLinks()=>network.removeAsRouter())
311 * => Send messages to all directly connected slave nodes 311 * => Send messages to all directly connected slave nodes
312 * containing information about loss of the node 312 * containing information about loss of the node
313 * (node.establishedContact()=>cluster.multicastLostRoute()) 313 * (node.establishedContact()=>cluster.multicastLostRoute())
314 * 314 *
@@ -319,12 +319,12 @@ static void node_established_contact(struct node *n_ptr)
319 struct cluster *c_ptr; 319 struct cluster *c_ptr;
320 320
321 dbg("node_established_contact:-> %x\n", n_ptr->addr); 321 dbg("node_established_contact:-> %x\n", n_ptr->addr);
322 if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) { 322 if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
323 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 323 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
324 } 324 }
325 325
326 /* Syncronize broadcast acks */ 326 /* Syncronize broadcast acks */
327 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 327 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
328 328
329 if (is_slave(tipc_own_addr)) 329 if (is_slave(tipc_own_addr))
330 return; 330 return;
@@ -333,11 +333,11 @@ static void node_established_contact(struct node *n_ptr)
333 c_ptr = tipc_cltr_find(tipc_own_addr); 333 c_ptr = tipc_cltr_find(tipc_own_addr);
334 if (!c_ptr) 334 if (!c_ptr)
335 c_ptr = tipc_cltr_create(tipc_own_addr); 335 c_ptr = tipc_cltr_create(tipc_own_addr);
336 if (c_ptr) 336 if (c_ptr)
337 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, 337 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
338 tipc_max_nodes); 338 tipc_max_nodes);
339 return; 339 return;
340 } 340 }
341 341
342 c_ptr = n_ptr->owner; 342 c_ptr = n_ptr->owner;
343 if (is_slave(n_ptr->addr)) { 343 if (is_slave(n_ptr->addr)) {
@@ -367,26 +367,26 @@ static void node_lost_contact(struct node *n_ptr)
367 char addr_string[16]; 367 char addr_string[16];
368 u32 i; 368 u32 i;
369 369
370 /* Clean up broadcast reception remains */ 370 /* Clean up broadcast reception remains */
371 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; 371 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
372 while (n_ptr->bclink.deferred_head) { 372 while (n_ptr->bclink.deferred_head) {
373 struct sk_buff* buf = n_ptr->bclink.deferred_head; 373 struct sk_buff* buf = n_ptr->bclink.deferred_head;
374 n_ptr->bclink.deferred_head = buf->next; 374 n_ptr->bclink.deferred_head = buf->next;
375 buf_discard(buf); 375 buf_discard(buf);
376 } 376 }
377 if (n_ptr->bclink.defragm) { 377 if (n_ptr->bclink.defragm) {
378 buf_discard(n_ptr->bclink.defragm); 378 buf_discard(n_ptr->bclink.defragm);
379 n_ptr->bclink.defragm = NULL; 379 n_ptr->bclink.defragm = NULL;
380 } 380 }
381 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 381 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
382 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); 382 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
383 } 383 }
384 384
385 /* Update routing tables */ 385 /* Update routing tables */
386 if (is_slave(tipc_own_addr)) { 386 if (is_slave(tipc_own_addr)) {
387 tipc_net_remove_as_router(n_ptr->addr); 387 tipc_net_remove_as_router(n_ptr->addr);
388 } else { 388 } else {
389 if (!in_own_cluster(n_ptr->addr)) { 389 if (!in_own_cluster(n_ptr->addr)) {
390 /* Case 4 (see above) */ 390 /* Case 4 (see above) */
391 c_ptr = tipc_cltr_find(tipc_own_addr); 391 c_ptr = tipc_cltr_find(tipc_own_addr);
392 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 392 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
@@ -399,7 +399,7 @@ static void node_lost_contact(struct node *n_ptr)
399 tipc_max_nodes); 399 tipc_max_nodes);
400 } else { 400 } else {
401 if (n_ptr->bclink.supported) { 401 if (n_ptr->bclink.supported) {
402 tipc_nmap_remove(&tipc_cltr_bcast_nodes, 402 tipc_nmap_remove(&tipc_cltr_bcast_nodes,
403 n_ptr->addr); 403 n_ptr->addr);
404 if (n_ptr->addr < tipc_own_addr) 404 if (n_ptr->addr < tipc_own_addr)
405 tipc_own_tag--; 405 tipc_own_tag--;
@@ -414,13 +414,13 @@ static void node_lost_contact(struct node *n_ptr)
414 if (tipc_node_has_active_routes(n_ptr)) 414 if (tipc_node_has_active_routes(n_ptr))
415 return; 415 return;
416 416
417 info("Lost contact with %s\n", 417 info("Lost contact with %s\n",
418 addr_string_fill(addr_string, n_ptr->addr)); 418 addr_string_fill(addr_string, n_ptr->addr));
419 419
420 /* Abort link changeover */ 420 /* Abort link changeover */
421 for (i = 0; i < MAX_BEARERS; i++) { 421 for (i = 0; i < MAX_BEARERS; i++) {
422 struct link *l_ptr = n_ptr->links[i]; 422 struct link *l_ptr = n_ptr->links[i];
423 if (!l_ptr) 423 if (!l_ptr)
424 continue; 424 continue;
425 l_ptr->reset_checkpoint = l_ptr->next_in_no; 425 l_ptr->reset_checkpoint = l_ptr->next_in_no;
426 l_ptr->exp_msg_count = 0; 426 l_ptr->exp_msg_count = 0;
@@ -429,7 +429,7 @@ static void node_lost_contact(struct node *n_ptr)
429 429
430 /* Notify subscribers */ 430 /* Notify subscribers */
431 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { 431 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
432 ns->node = NULL; 432 ns->node = NULL;
433 list_del_init(&ns->nodesub_list); 433 list_del_init(&ns->nodesub_list);
434 tipc_k_signal((Handler)ns->handle_node_down, 434 tipc_k_signal((Handler)ns->handle_node_down,
435 (unsigned long)ns->usr_handle); 435 (unsigned long)ns->usr_handle);
@@ -438,7 +438,7 @@ static void node_lost_contact(struct node *n_ptr)
438 438
439/** 439/**
440 * tipc_node_select_next_hop - find the next-hop node for a message 440 * tipc_node_select_next_hop - find the next-hop node for a message
441 * 441 *
442 * Called by when cluster local lookup has failed. 442 * Called by when cluster local lookup has failed.
443 */ 443 */
444 444
@@ -447,13 +447,13 @@ struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
447 struct node *n_ptr; 447 struct node *n_ptr;
448 u32 router_addr; 448 u32 router_addr;
449 449
450 if (!tipc_addr_domain_valid(addr)) 450 if (!tipc_addr_domain_valid(addr))
451 return NULL; 451 return NULL;
452 452
453 /* Look for direct link to destination processsor */ 453 /* Look for direct link to destination processsor */
454 n_ptr = tipc_node_find(addr); 454 n_ptr = tipc_node_find(addr);
455 if (n_ptr && tipc_node_has_active_links(n_ptr)) 455 if (n_ptr && tipc_node_has_active_links(n_ptr))
456 return n_ptr; 456 return n_ptr;
457 457
458 /* Cluster local system nodes *must* have direct links */ 458 /* Cluster local system nodes *must* have direct links */
459 if (!is_slave(addr) && in_own_cluster(addr)) 459 if (!is_slave(addr) && in_own_cluster(addr))
@@ -461,10 +461,10 @@ struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
461 461
462 /* Look for cluster local router with direct link to node */ 462 /* Look for cluster local router with direct link to node */
463 router_addr = tipc_node_select_router(n_ptr, selector); 463 router_addr = tipc_node_select_router(n_ptr, selector);
464 if (router_addr) 464 if (router_addr)
465 return tipc_node_select(router_addr, selector); 465 return tipc_node_select(router_addr, selector);
466 466
467 /* Slave nodes can only be accessed within own cluster via a 467 /* Slave nodes can only be accessed within own cluster via a
468 known router with direct link -- if no router was found,give up */ 468 known router with direct link -- if no router was found,give up */
469 if (is_slave(addr)) 469 if (is_slave(addr))
470 return NULL; 470 return NULL;
@@ -473,20 +473,20 @@ struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
473 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 473 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
474 n_ptr = tipc_net_select_remote_node(addr, selector); 474 n_ptr = tipc_net_select_remote_node(addr, selector);
475 if (n_ptr && tipc_node_has_active_links(n_ptr)) 475 if (n_ptr && tipc_node_has_active_links(n_ptr))
476 return n_ptr; 476 return n_ptr;
477 477
478 /* Last resort -- look for any router to anywhere in remote zone */ 478 /* Last resort -- look for any router to anywhere in remote zone */
479 router_addr = tipc_net_select_router(addr, selector); 479 router_addr = tipc_net_select_router(addr, selector);
480 if (router_addr) 480 if (router_addr)
481 return tipc_node_select(router_addr, selector); 481 return tipc_node_select(router_addr, selector);
482 482
483 return NULL; 483 return NULL;
484} 484}
485 485
486/** 486/**
487 * tipc_node_select_router - select router to reach specified node 487 * tipc_node_select_router - select router to reach specified node
488 * 488 *
489 * Uses a deterministic and fair algorithm for selecting router node. 489 * Uses a deterministic and fair algorithm for selecting router node.
490 */ 490 */
491 491
492u32 tipc_node_select_router(struct node *n_ptr, u32 ref) 492u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
@@ -496,8 +496,8 @@ u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
496 u32 start; 496 u32 start;
497 u32 r; 497 u32 r;
498 498
499 if (!n_ptr) 499 if (!n_ptr)
500 return 0; 500 return 0;
501 501
502 if (n_ptr->last_router < 0) 502 if (n_ptr->last_router < 0)
503 return 0; 503 return 0;
@@ -531,10 +531,10 @@ void tipc_node_add_router(struct node *n_ptr, u32 router)
531{ 531{
532 u32 r_num = tipc_node(router); 532 u32 r_num = tipc_node(router);
533 533
534 n_ptr->routers[r_num / 32] = 534 n_ptr->routers[r_num / 32] =
535 ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]); 535 ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
536 n_ptr->last_router = tipc_max_nodes / 32; 536 n_ptr->last_router = tipc_max_nodes / 32;
537 while ((--n_ptr->last_router >= 0) && 537 while ((--n_ptr->last_router >= 0) &&
538 !n_ptr->routers[n_ptr->last_router]); 538 !n_ptr->routers[n_ptr->last_router]);
539} 539}
540 540
@@ -548,7 +548,7 @@ void tipc_node_remove_router(struct node *n_ptr, u32 router)
548 n_ptr->routers[r_num / 32] = 548 n_ptr->routers[r_num / 32] =
549 ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32])); 549 ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
550 n_ptr->last_router = tipc_max_nodes / 32; 550 n_ptr->last_router = tipc_max_nodes / 32;
551 while ((--n_ptr->last_router >= 0) && 551 while ((--n_ptr->last_router >= 0) &&
552 !n_ptr->routers[n_ptr->last_router]); 552 !n_ptr->routers[n_ptr->last_router]);
553 553
554 if (!tipc_node_is_up(n_ptr)) 554 if (!tipc_node_is_up(n_ptr))
@@ -562,7 +562,7 @@ void node_print(struct print_buf *buf, struct node *n_ptr, char *str)
562 562
563 tipc_printf(buf, "\n\n%s", str); 563 tipc_printf(buf, "\n\n%s", str);
564 for (i = 0; i < MAX_BEARERS; i++) { 564 for (i = 0; i < MAX_BEARERS; i++) {
565 if (!n_ptr->links[i]) 565 if (!n_ptr->links[i])
566 continue; 566 continue;
567 tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]); 567 tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]);
568 } 568 }
@@ -590,7 +590,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
590 u32 domain; 590 u32 domain;
591 struct sk_buff *buf; 591 struct sk_buff *buf;
592 struct node *n_ptr; 592 struct node *n_ptr;
593 struct tipc_node_info node_info; 593 struct tipc_node_info node_info;
594 u32 payload_size; 594 u32 payload_size;
595 595
596 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 596 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
@@ -601,10 +601,10 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
601 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 601 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
602 " (network address)"); 602 " (network address)");
603 603
604 if (!tipc_nodes) 604 if (!tipc_nodes)
605 return tipc_cfg_reply_none(); 605 return tipc_cfg_reply_none();
606 606
607 /* For now, get space for all other nodes 607 /* For now, get space for all other nodes
608 (will need to modify this when slave nodes are supported */ 608 (will need to modify this when slave nodes are supported */
609 609
610 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); 610 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1);
@@ -620,9 +620,9 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
620 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 620 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
621 if (!in_scope(domain, n_ptr->addr)) 621 if (!in_scope(domain, n_ptr->addr))
622 continue; 622 continue;
623 node_info.addr = htonl(n_ptr->addr); 623 node_info.addr = htonl(n_ptr->addr);
624 node_info.up = htonl(tipc_node_is_up(n_ptr)); 624 node_info.up = htonl(tipc_node_is_up(n_ptr));
625 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 625 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
626 &node_info, sizeof(node_info)); 626 &node_info, sizeof(node_info));
627 } 627 }
628 628
@@ -634,7 +634,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
634 u32 domain; 634 u32 domain;
635 struct sk_buff *buf; 635 struct sk_buff *buf;
636 struct node *n_ptr; 636 struct node *n_ptr;
637 struct tipc_link_info link_info; 637 struct tipc_link_info link_info;
638 u32 payload_size; 638 u32 payload_size;
639 639
640 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 640 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
@@ -645,9 +645,9 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
645 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 645 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
646 " (network address)"); 646 " (network address)");
647 647
648 if (tipc_mode != TIPC_NET_MODE) 648 if (tipc_mode != TIPC_NET_MODE)
649 return tipc_cfg_reply_none(); 649 return tipc_cfg_reply_none();
650 650
651 /* Get space for all unicast links + multicast link */ 651 /* Get space for all unicast links + multicast link */
652 652
653 payload_size = TLV_SPACE(sizeof(link_info)) * 653 payload_size = TLV_SPACE(sizeof(link_info)) *
@@ -661,27 +661,27 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
661 661
662 /* Add TLV for broadcast link */ 662 /* Add TLV for broadcast link */
663 663
664 link_info.dest = htonl(tipc_own_addr & 0xfffff00); 664 link_info.dest = htonl(tipc_own_addr & 0xfffff00);
665 link_info.up = htonl(1); 665 link_info.up = htonl(1);
666 sprintf(link_info.str, tipc_bclink_name); 666 sprintf(link_info.str, tipc_bclink_name);
667 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 667 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
668 668
669 /* Add TLVs for any other links in scope */ 669 /* Add TLVs for any other links in scope */
670 670
671 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 671 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
672 u32 i; 672 u32 i;
673 673
674 if (!in_scope(domain, n_ptr->addr)) 674 if (!in_scope(domain, n_ptr->addr))
675 continue; 675 continue;
676 for (i = 0; i < MAX_BEARERS; i++) { 676 for (i = 0; i < MAX_BEARERS; i++) {
677 if (!n_ptr->links[i]) 677 if (!n_ptr->links[i])
678 continue; 678 continue;
679 link_info.dest = htonl(n_ptr->addr); 679 link_info.dest = htonl(n_ptr->addr);
680 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 680 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
681 strcpy(link_info.str, n_ptr->links[i]->name); 681 strcpy(link_info.str, n_ptr->links[i]->name);
682 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 682 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
683 &link_info, sizeof(link_info)); 683 &link_info, sizeof(link_info));
684 } 684 }
685 } 685 }
686 686
687 return buf; 687 return buf;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index a07cc79ea637..cd1882654bbb 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/node.h: Include file for TIPC node management routines 2 * net/tipc/node.h: Include file for TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -67,7 +67,7 @@
67 * @deferred_tail: newest OOS b'cast message received from node 67 * @deferred_tail: newest OOS b'cast message received from node
68 * @defragm: list of partially reassembled b'cast message fragments from node 68 * @defragm: list of partially reassembled b'cast message fragments from node
69 */ 69 */
70 70
71struct node { 71struct node {
72 u32 addr; 72 u32 addr;
73 spinlock_t lock; 73 spinlock_t lock;
@@ -85,8 +85,8 @@ struct node {
85 int supported; 85 int supported;
86 u32 acked; 86 u32 acked;
87 u32 last_in; 87 u32 last_in;
88 u32 gap_after; 88 u32 gap_after;
89 u32 gap_to; 89 u32 gap_to;
90 u32 nack_sync; 90 u32 nack_sync;
91 struct sk_buff *deferred_head; 91 struct sk_buff *deferred_head;
92 struct sk_buff *deferred_tail; 92 struct sk_buff *deferred_tail;
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index cc3fff3dec4f..8ecbd0fb6103 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/node_subscr.c: TIPC "node down" subscription handling 2 * net/tipc/node_subscr.c: TIPC "node down" subscription handling
3 * 3 *
4 * Copyright (c) 1995-2006, Ericsson AB 4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -44,14 +44,14 @@
44 * tipc_nodesub_subscribe - create "node down" subscription for specified node 44 * tipc_nodesub_subscribe - create "node down" subscription for specified node
45 */ 45 */
46 46
47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
48 void *usr_handle, net_ev_handler handle_down) 48 void *usr_handle, net_ev_handler handle_down)
49{ 49{
50 if (addr == tipc_own_addr) { 50 if (addr == tipc_own_addr) {
51 node_sub->node = NULL; 51 node_sub->node = NULL;
52 return; 52 return;
53 } 53 }
54 54
55 node_sub->node = tipc_node_find(addr); 55 node_sub->node = tipc_node_find(addr);
56 if (!node_sub->node) { 56 if (!node_sub->node) {
57 warn("Node subscription rejected, unknown node 0x%x\n", addr); 57 warn("Node subscription rejected, unknown node 0x%x\n", addr);
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index 01751c4fbb43..5f3f5859b84c 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling 2 * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
3 * 3 *
4 * Copyright (c) 1995-2006, Ericsson AB 4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
diff --git a/net/tipc/port.c b/net/tipc/port.c
index b7f3199523ca..5f8217d4b452 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/port.c: TIPC port code 2 * net/tipc/port.c: TIPC port code
3 * 3 *
4 * Copyright (c) 1992-2006, Ericsson AB 4 * Copyright (c) 1992-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -126,8 +126,8 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
126 126
127 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper, 127 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
128 TIPC_NODE_SCOPE, &dports); 128 TIPC_NODE_SCOPE, &dports);
129 129
130 /* Send message to destinations (duplicate it only if necessary) */ 130 /* Send message to destinations (duplicate it only if necessary) */
131 131
132 if (ext_targets) { 132 if (ext_targets) {
133 if (dports.count != 0) { 133 if (dports.count != 0) {
@@ -157,7 +157,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
157 157
158/** 158/**
159 * tipc_port_recv_mcast - deliver multicast message to all destination ports 159 * tipc_port_recv_mcast - deliver multicast message to all destination ports
160 * 160 *
161 * If there is no port list, perform a lookup to create one 161 * If there is no port list, perform a lookup to create one
162 */ 162 */
163 163
@@ -213,7 +213,7 @@ exit:
213 213
214/** 214/**
215 * tipc_createport_raw - create a native TIPC port 215 * tipc_createport_raw - create a native TIPC port
216 * 216 *
217 * Returns local port reference 217 * Returns local port reference
218 */ 218 */
219 219
@@ -273,7 +273,7 @@ int tipc_deleteport(u32 ref)
273 273
274 tipc_withdraw(ref, 0, NULL); 274 tipc_withdraw(ref, 0, NULL);
275 p_ptr = tipc_port_lock(ref); 275 p_ptr = tipc_port_lock(ref);
276 if (!p_ptr) 276 if (!p_ptr)
277 return -EINVAL; 277 return -EINVAL;
278 278
279 tipc_ref_discard(ref); 279 tipc_ref_discard(ref);
@@ -302,7 +302,7 @@ int tipc_deleteport(u32 ref)
302 302
303/** 303/**
304 * tipc_get_port() - return port associated with 'ref' 304 * tipc_get_port() - return port associated with 'ref'
305 * 305 *
306 * Note: Port is not locked. 306 * Note: Port is not locked.
307 */ 307 */
308 308
@@ -336,7 +336,7 @@ static int port_unreliable(struct port *p_ptr)
336int tipc_portunreliable(u32 ref, unsigned int *isunreliable) 336int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
337{ 337{
338 struct port *p_ptr; 338 struct port *p_ptr;
339 339
340 p_ptr = tipc_port_lock(ref); 340 p_ptr = tipc_port_lock(ref);
341 if (!p_ptr) 341 if (!p_ptr)
342 return -EINVAL; 342 return -EINVAL;
@@ -348,7 +348,7 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
348int tipc_set_portunreliable(u32 ref, unsigned int isunreliable) 348int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
349{ 349{
350 struct port *p_ptr; 350 struct port *p_ptr;
351 351
352 p_ptr = tipc_port_lock(ref); 352 p_ptr = tipc_port_lock(ref);
353 if (!p_ptr) 353 if (!p_ptr)
354 return -EINVAL; 354 return -EINVAL;
@@ -365,7 +365,7 @@ static int port_unreturnable(struct port *p_ptr)
365int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable) 365int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
366{ 366{
367 struct port *p_ptr; 367 struct port *p_ptr;
368 368
369 p_ptr = tipc_port_lock(ref); 369 p_ptr = tipc_port_lock(ref);
370 if (!p_ptr) 370 if (!p_ptr)
371 return -EINVAL; 371 return -EINVAL;
@@ -377,7 +377,7 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
377int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable) 377int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
378{ 378{
379 struct port *p_ptr; 379 struct port *p_ptr;
380 380
381 p_ptr = tipc_port_lock(ref); 381 p_ptr = tipc_port_lock(ref);
382 if (!p_ptr) 382 if (!p_ptr)
383 return -EINVAL; 383 return -EINVAL;
@@ -386,19 +386,19 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
386 return TIPC_OK; 386 return TIPC_OK;
387} 387}
388 388
389/* 389/*
390 * port_build_proto_msg(): build a port level protocol 390 * port_build_proto_msg(): build a port level protocol
391 * or a connection abortion message. Called with 391 * or a connection abortion message. Called with
392 * tipc_port lock on. 392 * tipc_port lock on.
393 */ 393 */
394static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode, 394static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
395 u32 origport, u32 orignode, 395 u32 origport, u32 orignode,
396 u32 usr, u32 type, u32 err, 396 u32 usr, u32 type, u32 err,
397 u32 seqno, u32 ack) 397 u32 seqno, u32 ack)
398{ 398{
399 struct sk_buff *buf; 399 struct sk_buff *buf;
400 struct tipc_msg *msg; 400 struct tipc_msg *msg;
401 401
402 buf = buf_acquire(LONG_H_SIZE); 402 buf = buf_acquire(LONG_H_SIZE);
403 if (buf) { 403 if (buf) {
404 msg = buf_msg(buf); 404 msg = buf_msg(buf);
@@ -461,7 +461,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
461 msg_set_orignode(rmsg, tipc_own_addr); 461 msg_set_orignode(rmsg, tipc_own_addr);
462 else 462 else
463 msg_set_orignode(rmsg, msg_destnode(msg)); 463 msg_set_orignode(rmsg, msg_destnode(msg));
464 msg_set_size(rmsg, data_sz + hdr_sz); 464 msg_set_size(rmsg, data_sz + hdr_sz);
465 msg_set_nametype(rmsg, msg_nametype(msg)); 465 msg_set_nametype(rmsg, msg_nametype(msg));
466 msg_set_nameinst(rmsg, msg_nameinst(msg)); 466 msg_set_nameinst(rmsg, msg_nameinst(msg));
467 memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz); 467 memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
@@ -492,7 +492,7 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
492 struct sk_buff *buf; 492 struct sk_buff *buf;
493 int res; 493 int res;
494 494
495 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 495 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
496 !p_ptr->user_port, &buf); 496 !p_ptr->user_port, &buf);
497 if (!buf) 497 if (!buf)
498 return res; 498 return res;
@@ -523,7 +523,7 @@ static void port_timeout(unsigned long ref)
523 tipc_own_addr, 523 tipc_own_addr,
524 CONN_MANAGER, 524 CONN_MANAGER,
525 CONN_PROBE, 525 CONN_PROBE,
526 TIPC_OK, 526 TIPC_OK,
527 port_out_seqno(p_ptr), 527 port_out_seqno(p_ptr),
528 0); 528 0);
529 port_incr_out_seqno(p_ptr); 529 port_incr_out_seqno(p_ptr);
@@ -562,7 +562,7 @@ static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
562 port_peernode(p_ptr), 562 port_peernode(p_ptr),
563 imp, 563 imp,
564 TIPC_CONN_MSG, 564 TIPC_CONN_MSG,
565 err, 565 err,
566 p_ptr->last_in_seqno + 1, 566 p_ptr->last_in_seqno + 1,
567 0); 567 0);
568} 568}
@@ -582,7 +582,7 @@ static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
582 tipc_own_addr, 582 tipc_own_addr,
583 imp, 583 imp,
584 TIPC_CONN_MSG, 584 TIPC_CONN_MSG,
585 err, 585 err,
586 port_out_seqno(p_ptr), 586 port_out_seqno(p_ptr),
587 0); 587 0);
588} 588}
@@ -613,7 +613,7 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
613 } 613 }
614 } 614 }
615 if (msg_type(msg) == CONN_ACK) { 615 if (msg_type(msg) == CONN_ACK) {
616 int wakeup = tipc_port_congested(p_ptr) && 616 int wakeup = tipc_port_congested(p_ptr) &&
617 p_ptr->publ.congested && 617 p_ptr->publ.congested &&
618 p_ptr->wakeup; 618 p_ptr->wakeup;
619 p_ptr->acked += msg_msgcnt(msg); 619 p_ptr->acked += msg_msgcnt(msg);
@@ -630,8 +630,8 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
630 } 630 }
631 if (err) { 631 if (err) {
632 r_buf = port_build_proto_msg(msg_origport(msg), 632 r_buf = port_build_proto_msg(msg_origport(msg),
633 msg_orignode(msg), 633 msg_orignode(msg),
634 msg_destport(msg), 634 msg_destport(msg),
635 tipc_own_addr, 635 tipc_own_addr,
636 DATA_HIGH, 636 DATA_HIGH,
637 TIPC_CONN_MSG, 637 TIPC_CONN_MSG,
@@ -643,10 +643,10 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
643 643
644 /* All is fine */ 644 /* All is fine */
645 if (msg_type(msg) == CONN_PROBE) { 645 if (msg_type(msg) == CONN_PROBE) {
646 r_buf = port_build_proto_msg(msg_origport(msg), 646 r_buf = port_build_proto_msg(msg_origport(msg),
647 msg_orignode(msg), 647 msg_orignode(msg),
648 msg_destport(msg), 648 msg_destport(msg),
649 tipc_own_addr, 649 tipc_own_addr,
650 CONN_MANAGER, 650 CONN_MANAGER,
651 CONN_PROBE_REPLY, 651 CONN_PROBE_REPLY,
652 TIPC_OK, 652 TIPC_OK,
@@ -665,39 +665,39 @@ exit:
665 665
666static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id) 666static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
667{ 667{
668 struct publication *publ; 668 struct publication *publ;
669 669
670 if (full_id) 670 if (full_id)
671 tipc_printf(buf, "<%u.%u.%u:%u>:", 671 tipc_printf(buf, "<%u.%u.%u:%u>:",
672 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 672 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
673 tipc_node(tipc_own_addr), p_ptr->publ.ref); 673 tipc_node(tipc_own_addr), p_ptr->publ.ref);
674 else 674 else
675 tipc_printf(buf, "%-10u:", p_ptr->publ.ref); 675 tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
676 676
677 if (p_ptr->publ.connected) { 677 if (p_ptr->publ.connected) {
678 u32 dport = port_peerport(p_ptr); 678 u32 dport = port_peerport(p_ptr);
679 u32 destnode = port_peernode(p_ptr); 679 u32 destnode = port_peernode(p_ptr);
680 680
681 tipc_printf(buf, " connected to <%u.%u.%u:%u>", 681 tipc_printf(buf, " connected to <%u.%u.%u:%u>",
682 tipc_zone(destnode), tipc_cluster(destnode), 682 tipc_zone(destnode), tipc_cluster(destnode),
683 tipc_node(destnode), dport); 683 tipc_node(destnode), dport);
684 if (p_ptr->publ.conn_type != 0) 684 if (p_ptr->publ.conn_type != 0)
685 tipc_printf(buf, " via {%u,%u}", 685 tipc_printf(buf, " via {%u,%u}",
686 p_ptr->publ.conn_type, 686 p_ptr->publ.conn_type,
687 p_ptr->publ.conn_instance); 687 p_ptr->publ.conn_instance);
688 } 688 }
689 else if (p_ptr->publ.published) { 689 else if (p_ptr->publ.published) {
690 tipc_printf(buf, " bound to"); 690 tipc_printf(buf, " bound to");
691 list_for_each_entry(publ, &p_ptr->publications, pport_list) { 691 list_for_each_entry(publ, &p_ptr->publications, pport_list) {
692 if (publ->lower == publ->upper) 692 if (publ->lower == publ->upper)
693 tipc_printf(buf, " {%u,%u}", publ->type, 693 tipc_printf(buf, " {%u,%u}", publ->type,
694 publ->lower); 694 publ->lower);
695 else 695 else
696 tipc_printf(buf, " {%u,%u,%u}", publ->type, 696 tipc_printf(buf, " {%u,%u,%u}", publ->type,
697 publ->lower, publ->upper); 697 publ->lower, publ->upper);
698 } 698 }
699 } 699 }
700 tipc_printf(buf, "\n"); 700 tipc_printf(buf, "\n");
701} 701}
702 702
703#define MAX_PORT_QUERY 32768 703#define MAX_PORT_QUERY 32768
@@ -818,7 +818,7 @@ static void port_dispatcher_sigh(void *dummy)
818 struct sk_buff *next = buf->next; 818 struct sk_buff *next = buf->next;
819 struct tipc_msg *msg = buf_msg(buf); 819 struct tipc_msg *msg = buf_msg(buf);
820 u32 dref = msg_destport(msg); 820 u32 dref = msg_destport(msg);
821 821
822 message_type = msg_type(msg); 822 message_type = msg_type(msg);
823 if (message_type > TIPC_DIRECT_MSG) 823 if (message_type > TIPC_DIRECT_MSG)
824 goto reject; /* Unsupported message type */ 824 goto reject; /* Unsupported message type */
@@ -838,7 +838,7 @@ static void port_dispatcher_sigh(void *dummy)
838 goto err; 838 goto err;
839 839
840 switch (message_type) { 840 switch (message_type) {
841 841
842 case TIPC_CONN_MSG:{ 842 case TIPC_CONN_MSG:{
843 tipc_conn_msg_event cb = up_ptr->conn_msg_cb; 843 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
844 u32 peer_port = port_peerport(p_ptr); 844 u32 peer_port = port_peerport(p_ptr);
@@ -856,9 +856,9 @@ static void port_dispatcher_sigh(void *dummy)
856 goto reject; 856 goto reject;
857 if (unlikely(!cb)) 857 if (unlikely(!cb))
858 goto reject; 858 goto reject;
859 if (unlikely(++p_ptr->publ.conn_unacked >= 859 if (unlikely(++p_ptr->publ.conn_unacked >=
860 TIPC_FLOW_CONTROL_WIN)) 860 TIPC_FLOW_CONTROL_WIN))
861 tipc_acknowledge(dref, 861 tipc_acknowledge(dref,
862 p_ptr->publ.conn_unacked); 862 p_ptr->publ.conn_unacked);
863 skb_pull(buf, msg_hdr_sz(msg)); 863 skb_pull(buf, msg_hdr_sz(msg));
864 cb(usr_handle, dref, &buf, msg_data(msg), 864 cb(usr_handle, dref, &buf, msg_data(msg),
@@ -874,7 +874,7 @@ static void port_dispatcher_sigh(void *dummy)
874 if (unlikely(!cb)) 874 if (unlikely(!cb))
875 goto reject; 875 goto reject;
876 skb_pull(buf, msg_hdr_sz(msg)); 876 skb_pull(buf, msg_hdr_sz(msg));
877 cb(usr_handle, dref, &buf, msg_data(msg), 877 cb(usr_handle, dref, &buf, msg_data(msg),
878 msg_data_sz(msg), msg_importance(msg), 878 msg_data_sz(msg), msg_importance(msg),
879 &orig); 879 &orig);
880 break; 880 break;
@@ -895,7 +895,7 @@ static void port_dispatcher_sigh(void *dummy)
895 dseq.upper = (message_type == TIPC_NAMED_MSG) 895 dseq.upper = (message_type == TIPC_NAMED_MSG)
896 ? dseq.lower : msg_nameupper(msg); 896 ? dseq.lower : msg_nameupper(msg);
897 skb_pull(buf, msg_hdr_sz(msg)); 897 skb_pull(buf, msg_hdr_sz(msg));
898 cb(usr_handle, dref, &buf, msg_data(msg), 898 cb(usr_handle, dref, &buf, msg_data(msg),
899 msg_data_sz(msg), msg_importance(msg), 899 msg_data_sz(msg), msg_importance(msg),
900 &orig, &dseq); 900 &orig, &dseq);
901 break; 901 break;
@@ -907,9 +907,9 @@ static void port_dispatcher_sigh(void *dummy)
907 continue; 907 continue;
908err: 908err:
909 switch (message_type) { 909 switch (message_type) {
910 910
911 case TIPC_CONN_MSG:{ 911 case TIPC_CONN_MSG:{
912 tipc_conn_shutdown_event cb = 912 tipc_conn_shutdown_event cb =
913 up_ptr->conn_err_cb; 913 up_ptr->conn_err_cb;
914 u32 peer_port = port_peerport(p_ptr); 914 u32 peer_port = port_peerport(p_ptr);
915 u32 peer_node = port_peernode(p_ptr); 915 u32 peer_node = port_peernode(p_ptr);
@@ -940,7 +940,7 @@ err:
940 } 940 }
941 case TIPC_MCAST_MSG: 941 case TIPC_MCAST_MSG:
942 case TIPC_NAMED_MSG:{ 942 case TIPC_NAMED_MSG:{
943 tipc_named_msg_err_event cb = 943 tipc_named_msg_err_event cb =
944 up_ptr->named_err_cb; 944 up_ptr->named_err_cb;
945 945
946 spin_unlock_bh(p_ptr->publ.lock); 946 spin_unlock_bh(p_ptr->publ.lock);
@@ -951,7 +951,7 @@ err:
951 dseq.upper = (message_type == TIPC_NAMED_MSG) 951 dseq.upper = (message_type == TIPC_NAMED_MSG)
952 ? dseq.lower : msg_nameupper(msg); 952 ? dseq.lower : msg_nameupper(msg);
953 skb_pull(buf, msg_hdr_sz(msg)); 953 skb_pull(buf, msg_hdr_sz(msg));
954 cb(usr_handle, dref, &buf, msg_data(msg), 954 cb(usr_handle, dref, &buf, msg_data(msg),
955 msg_data_sz(msg), msg_errcode(msg), &dseq); 955 msg_data_sz(msg), msg_errcode(msg), &dseq);
956 break; 956 break;
957 } 957 }
@@ -986,9 +986,9 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
986 return TIPC_OK; 986 return TIPC_OK;
987} 987}
988 988
989/* 989/*
990 * Wake up port after congestion: Called with port locked, 990 * Wake up port after congestion: Called with port locked,
991 * 991 *
992 */ 992 */
993 993
994static void port_wakeup_sh(unsigned long ref) 994static void port_wakeup_sh(unsigned long ref)
@@ -1033,7 +1033,7 @@ void tipc_acknowledge(u32 ref, u32 ack)
1033 tipc_own_addr, 1033 tipc_own_addr,
1034 CONN_MANAGER, 1034 CONN_MANAGER,
1035 CONN_ACK, 1035 CONN_ACK,
1036 TIPC_OK, 1036 TIPC_OK,
1037 port_out_seqno(p_ptr), 1037 port_out_seqno(p_ptr),
1038 ack); 1038 ack);
1039 } 1039 }
@@ -1046,20 +1046,20 @@ void tipc_acknowledge(u32 ref, u32 ack)
1046 * registry if non-zero user_ref. 1046 * registry if non-zero user_ref.
1047 */ 1047 */
1048 1048
1049int tipc_createport(u32 user_ref, 1049int tipc_createport(u32 user_ref,
1050 void *usr_handle, 1050 void *usr_handle,
1051 unsigned int importance, 1051 unsigned int importance,
1052 tipc_msg_err_event error_cb, 1052 tipc_msg_err_event error_cb,
1053 tipc_named_msg_err_event named_error_cb, 1053 tipc_named_msg_err_event named_error_cb,
1054 tipc_conn_shutdown_event conn_error_cb, 1054 tipc_conn_shutdown_event conn_error_cb,
1055 tipc_msg_event msg_cb, 1055 tipc_msg_event msg_cb,
1056 tipc_named_msg_event named_msg_cb, 1056 tipc_named_msg_event named_msg_cb,
1057 tipc_conn_msg_event conn_msg_cb, 1057 tipc_conn_msg_event conn_msg_cb,
1058 tipc_continue_event continue_event_cb,/* May be zero */ 1058 tipc_continue_event continue_event_cb,/* May be zero */
1059 u32 *portref) 1059 u32 *portref)
1060{ 1060{
1061 struct user_port *up_ptr; 1061 struct user_port *up_ptr;
1062 struct port *p_ptr; 1062 struct port *p_ptr;
1063 u32 ref; 1063 u32 ref;
1064 1064
1065 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1065 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
@@ -1088,7 +1088,7 @@ int tipc_createport(u32 user_ref,
1088 INIT_LIST_HEAD(&up_ptr->uport_list); 1088 INIT_LIST_HEAD(&up_ptr->uport_list);
1089 tipc_reg_add_port(up_ptr); 1089 tipc_reg_add_port(up_ptr);
1090 *portref = p_ptr->publ.ref; 1090 *portref = p_ptr->publ.ref;
1091 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref); 1091 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1092 tipc_port_unlock(p_ptr); 1092 tipc_port_unlock(p_ptr);
1093 return TIPC_OK; 1093 return TIPC_OK;
1094} 1094}
@@ -1103,7 +1103,7 @@ int tipc_ownidentity(u32 ref, struct tipc_portid *id)
1103int tipc_portimportance(u32 ref, unsigned int *importance) 1103int tipc_portimportance(u32 ref, unsigned int *importance)
1104{ 1104{
1105 struct port *p_ptr; 1105 struct port *p_ptr;
1106 1106
1107 p_ptr = tipc_port_lock(ref); 1107 p_ptr = tipc_port_lock(ref);
1108 if (!p_ptr) 1108 if (!p_ptr)
1109 return -EINVAL; 1109 return -EINVAL;
@@ -1172,19 +1172,19 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1172 struct publication *publ; 1172 struct publication *publ;
1173 struct publication *tpubl; 1173 struct publication *tpubl;
1174 int res = -EINVAL; 1174 int res = -EINVAL;
1175 1175
1176 p_ptr = tipc_port_lock(ref); 1176 p_ptr = tipc_port_lock(ref);
1177 if (!p_ptr) 1177 if (!p_ptr)
1178 return -EINVAL; 1178 return -EINVAL;
1179 if (!seq) { 1179 if (!seq) {
1180 list_for_each_entry_safe(publ, tpubl, 1180 list_for_each_entry_safe(publ, tpubl,
1181 &p_ptr->publications, pport_list) { 1181 &p_ptr->publications, pport_list) {
1182 tipc_nametbl_withdraw(publ->type, publ->lower, 1182 tipc_nametbl_withdraw(publ->type, publ->lower,
1183 publ->ref, publ->key); 1183 publ->ref, publ->key);
1184 } 1184 }
1185 res = TIPC_OK; 1185 res = TIPC_OK;
1186 } else { 1186 } else {
1187 list_for_each_entry_safe(publ, tpubl, 1187 list_for_each_entry_safe(publ, tpubl,
1188 &p_ptr->publications, pport_list) { 1188 &p_ptr->publications, pport_list) {
1189 if (publ->scope != scope) 1189 if (publ->scope != scope)
1190 continue; 1190 continue;
@@ -1194,7 +1194,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1194 continue; 1194 continue;
1195 if (publ->upper != seq->upper) 1195 if (publ->upper != seq->upper)
1196 break; 1196 break;
1197 tipc_nametbl_withdraw(publ->type, publ->lower, 1197 tipc_nametbl_withdraw(publ->type, publ->lower,
1198 publ->ref, publ->key); 1198 publ->ref, publ->key);
1199 res = TIPC_OK; 1199 res = TIPC_OK;
1200 break; 1200 break;
@@ -1292,7 +1292,7 @@ int tipc_shutdown(u32 ref)
1292 tipc_own_addr, 1292 tipc_own_addr,
1293 imp, 1293 imp,
1294 TIPC_CONN_MSG, 1294 TIPC_CONN_MSG,
1295 TIPC_CONN_SHUTDOWN, 1295 TIPC_CONN_SHUTDOWN,
1296 port_out_seqno(p_ptr), 1296 port_out_seqno(p_ptr),
1297 0); 1297 0);
1298 } 1298 }
@@ -1304,7 +1304,7 @@ int tipc_shutdown(u32 ref)
1304int tipc_isconnected(u32 ref, int *isconnected) 1304int tipc_isconnected(u32 ref, int *isconnected)
1305{ 1305{
1306 struct port *p_ptr; 1306 struct port *p_ptr;
1307 1307
1308 p_ptr = tipc_port_lock(ref); 1308 p_ptr = tipc_port_lock(ref);
1309 if (!p_ptr) 1309 if (!p_ptr)
1310 return -EINVAL; 1310 return -EINVAL;
@@ -1317,7 +1317,7 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
1317{ 1317{
1318 struct port *p_ptr; 1318 struct port *p_ptr;
1319 int res; 1319 int res;
1320 1320
1321 p_ptr = tipc_port_lock(ref); 1321 p_ptr = tipc_port_lock(ref);
1322 if (!p_ptr) 1322 if (!p_ptr)
1323 return -EINVAL; 1323 return -EINVAL;
@@ -1348,7 +1348,7 @@ int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
1348{ 1348{
1349 struct sk_buff *buf; 1349 struct sk_buff *buf;
1350 int res; 1350 int res;
1351 1351
1352 res = msg_build(&sender->publ.phdr, msg_sect, num_sect, 1352 res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
1353 MAX_MSG_SIZE, !sender->user_port, &buf); 1353 MAX_MSG_SIZE, !sender->user_port, &buf);
1354 if (likely(buf)) 1354 if (likely(buf))
@@ -1394,7 +1394,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1394 return -ELINKCONG; 1394 return -ELINKCONG;
1395} 1395}
1396 1396
1397/** 1397/**
1398 * tipc_send_buf - send message buffer on connection 1398 * tipc_send_buf - send message buffer on connection
1399 */ 1399 */
1400 1400
@@ -1406,7 +1406,7 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1406 u32 hsz; 1406 u32 hsz;
1407 u32 sz; 1407 u32 sz;
1408 u32 res; 1408 u32 res;
1409 1409
1410 p_ptr = tipc_port_deref(ref); 1410 p_ptr = tipc_port_deref(ref);
1411 if (!p_ptr || !p_ptr->publ.connected) 1411 if (!p_ptr || !p_ptr->publ.connected)
1412 return -EINVAL; 1412 return -EINVAL;
@@ -1447,12 +1447,12 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1447 * tipc_forward2name - forward message sections to port name 1447 * tipc_forward2name - forward message sections to port name
1448 */ 1448 */
1449 1449
1450int tipc_forward2name(u32 ref, 1450int tipc_forward2name(u32 ref,
1451 struct tipc_name const *name, 1451 struct tipc_name const *name,
1452 u32 domain, 1452 u32 domain,
1453 u32 num_sect, 1453 u32 num_sect,
1454 struct iovec const *msg_sect, 1454 struct iovec const *msg_sect,
1455 struct tipc_portid const *orig, 1455 struct tipc_portid const *orig,
1456 unsigned int importance) 1456 unsigned int importance)
1457{ 1457{
1458 struct port *p_ptr; 1458 struct port *p_ptr;
@@ -1483,7 +1483,7 @@ int tipc_forward2name(u32 ref,
1483 p_ptr->sent++; 1483 p_ptr->sent++;
1484 if (likely(destnode == tipc_own_addr)) 1484 if (likely(destnode == tipc_own_addr))
1485 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1485 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1486 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1486 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1487 destnode); 1487 destnode);
1488 if (likely(res != -ELINKCONG)) 1488 if (likely(res != -ELINKCONG))
1489 return res; 1489 return res;
@@ -1493,7 +1493,7 @@ int tipc_forward2name(u32 ref,
1493 } 1493 }
1494 return -ELINKCONG; 1494 return -ELINKCONG;
1495 } 1495 }
1496 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect, 1496 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1497 TIPC_ERR_NO_NAME); 1497 TIPC_ERR_NO_NAME);
1498} 1498}
1499 1499
@@ -1501,10 +1501,10 @@ int tipc_forward2name(u32 ref,
1501 * tipc_send2name - send message sections to port name 1501 * tipc_send2name - send message sections to port name
1502 */ 1502 */
1503 1503
1504int tipc_send2name(u32 ref, 1504int tipc_send2name(u32 ref,
1505 struct tipc_name const *name, 1505 struct tipc_name const *name,
1506 unsigned int domain, 1506 unsigned int domain,
1507 unsigned int num_sect, 1507 unsigned int num_sect,
1508 struct iovec const *msg_sect) 1508 struct iovec const *msg_sect)
1509{ 1509{
1510 struct tipc_portid orig; 1510 struct tipc_portid orig;
@@ -1515,7 +1515,7 @@ int tipc_send2name(u32 ref,
1515 TIPC_PORT_IMPORTANCE); 1515 TIPC_PORT_IMPORTANCE);
1516} 1516}
1517 1517
1518/** 1518/**
1519 * tipc_forward_buf2name - forward message buffer to port name 1519 * tipc_forward_buf2name - forward message buffer to port name
1520 */ 1520 */
1521 1521
@@ -1571,14 +1571,14 @@ int tipc_forward_buf2name(u32 ref,
1571 return tipc_reject_msg(buf, TIPC_ERR_NO_NAME); 1571 return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
1572} 1572}
1573 1573
1574/** 1574/**
1575 * tipc_send_buf2name - send message buffer to port name 1575 * tipc_send_buf2name - send message buffer to port name
1576 */ 1576 */
1577 1577
1578int tipc_send_buf2name(u32 ref, 1578int tipc_send_buf2name(u32 ref,
1579 struct tipc_name const *dest, 1579 struct tipc_name const *dest,
1580 u32 domain, 1580 u32 domain,
1581 struct sk_buff *buf, 1581 struct sk_buff *buf,
1582 unsigned int dsz) 1582 unsigned int dsz)
1583{ 1583{
1584 struct tipc_portid orig; 1584 struct tipc_portid orig;
@@ -1589,15 +1589,15 @@ int tipc_send_buf2name(u32 ref,
1589 TIPC_PORT_IMPORTANCE); 1589 TIPC_PORT_IMPORTANCE);
1590} 1590}
1591 1591
1592/** 1592/**
1593 * tipc_forward2port - forward message sections to port identity 1593 * tipc_forward2port - forward message sections to port identity
1594 */ 1594 */
1595 1595
1596int tipc_forward2port(u32 ref, 1596int tipc_forward2port(u32 ref,
1597 struct tipc_portid const *dest, 1597 struct tipc_portid const *dest,
1598 unsigned int num_sect, 1598 unsigned int num_sect,
1599 struct iovec const *msg_sect, 1599 struct iovec const *msg_sect,
1600 struct tipc_portid const *orig, 1600 struct tipc_portid const *orig,
1601 unsigned int importance) 1601 unsigned int importance)
1602{ 1602{
1603 struct port *p_ptr; 1603 struct port *p_ptr;
@@ -1630,24 +1630,24 @@ int tipc_forward2port(u32 ref,
1630 return -ELINKCONG; 1630 return -ELINKCONG;
1631} 1631}
1632 1632
1633/** 1633/**
1634 * tipc_send2port - send message sections to port identity 1634 * tipc_send2port - send message sections to port identity
1635 */ 1635 */
1636 1636
1637int tipc_send2port(u32 ref, 1637int tipc_send2port(u32 ref,
1638 struct tipc_portid const *dest, 1638 struct tipc_portid const *dest,
1639 unsigned int num_sect, 1639 unsigned int num_sect,
1640 struct iovec const *msg_sect) 1640 struct iovec const *msg_sect)
1641{ 1641{
1642 struct tipc_portid orig; 1642 struct tipc_portid orig;
1643 1643
1644 orig.ref = ref; 1644 orig.ref = ref;
1645 orig.node = tipc_own_addr; 1645 orig.node = tipc_own_addr;
1646 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig, 1646 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
1647 TIPC_PORT_IMPORTANCE); 1647 TIPC_PORT_IMPORTANCE);
1648} 1648}
1649 1649
1650/** 1650/**
1651 * tipc_forward_buf2port - forward message buffer to port identity 1651 * tipc_forward_buf2port - forward message buffer to port identity
1652 */ 1652 */
1653int tipc_forward_buf2port(u32 ref, 1653int tipc_forward_buf2port(u32 ref,
@@ -1692,20 +1692,20 @@ int tipc_forward_buf2port(u32 ref,
1692 return -ELINKCONG; 1692 return -ELINKCONG;
1693} 1693}
1694 1694
1695/** 1695/**
1696 * tipc_send_buf2port - send message buffer to port identity 1696 * tipc_send_buf2port - send message buffer to port identity
1697 */ 1697 */
1698 1698
1699int tipc_send_buf2port(u32 ref, 1699int tipc_send_buf2port(u32 ref,
1700 struct tipc_portid const *dest, 1700 struct tipc_portid const *dest,
1701 struct sk_buff *buf, 1701 struct sk_buff *buf,
1702 unsigned int dsz) 1702 unsigned int dsz)
1703{ 1703{
1704 struct tipc_portid orig; 1704 struct tipc_portid orig;
1705 1705
1706 orig.ref = ref; 1706 orig.ref = ref;
1707 orig.node = tipc_own_addr; 1707 orig.node = tipc_own_addr;
1708 return tipc_forward_buf2port(ref, dest, buf, dsz, &orig, 1708 return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
1709 TIPC_PORT_IMPORTANCE); 1709 TIPC_PORT_IMPORTANCE);
1710} 1710}
1711 1711
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 839f100da646..7ef4d64b32f7 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/port.h: Include file for TIPC port code 2 * net/tipc/port.h: Include file for TIPC port code
3 * 3 *
4 * Copyright (c) 1994-2006, Ericsson AB 4 * Copyright (c) 1994-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -52,17 +52,17 @@
52 * <various callback routines> 52 * <various callback routines>
53 * @uport_list: adjacent user ports in list of ports held by user 53 * @uport_list: adjacent user ports in list of ports held by user
54 */ 54 */
55 55
56struct user_port { 56struct user_port {
57 u32 user_ref; 57 u32 user_ref;
58 void *usr_handle; 58 void *usr_handle;
59 u32 ref; 59 u32 ref;
60 tipc_msg_err_event err_cb; 60 tipc_msg_err_event err_cb;
61 tipc_named_msg_err_event named_err_cb; 61 tipc_named_msg_err_event named_err_cb;
62 tipc_conn_shutdown_event conn_err_cb; 62 tipc_conn_shutdown_event conn_err_cb;
63 tipc_msg_event msg_cb; 63 tipc_msg_event msg_cb;
64 tipc_named_msg_event named_msg_cb; 64 tipc_named_msg_event named_msg_cb;
65 tipc_conn_msg_event conn_msg_cb; 65 tipc_conn_msg_event conn_msg_cb;
66 tipc_continue_event continue_event_cb; 66 tipc_continue_event continue_event_cb;
67 struct list_head uport_list; 67 struct list_head uport_list;
68}; 68};
@@ -113,7 +113,7 @@ struct port {
113extern spinlock_t tipc_port_list_lock; 113extern spinlock_t tipc_port_list_lock;
114struct port_list; 114struct port_list;
115 115
116int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect, 116int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect,
117 struct iovec const *msg_sect); 117 struct iovec const *msg_sect);
118int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, 118int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
119 struct iovec const *msg_sect, u32 num_sect, 119 struct iovec const *msg_sect, u32 num_sect,
@@ -133,9 +133,9 @@ static inline struct port *tipc_port_lock(u32 ref)
133 return (struct port *)tipc_ref_lock(ref); 133 return (struct port *)tipc_ref_lock(ref);
134} 134}
135 135
136/** 136/**
137 * tipc_port_unlock - unlock a port instance 137 * tipc_port_unlock - unlock a port instance
138 * 138 *
139 * Can use pointer instead of tipc_ref_unlock() since port is already locked. 139 * Can use pointer instead of tipc_ref_unlock() since port is already locked.
140 */ 140 */
141 141
@@ -164,7 +164,7 @@ static inline int tipc_port_congested(struct port *p_ptr)
164 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2)); 164 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
165} 165}
166 166
167/** 167/**
168 * tipc_port_recv_msg - receive message from lower layer and deliver to port user 168 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
169 */ 169 */
170 170
@@ -175,7 +175,7 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
175 u32 destport = msg_destport(msg); 175 u32 destport = msg_destport(msg);
176 u32 dsz = msg_data_sz(msg); 176 u32 dsz = msg_data_sz(msg);
177 u32 err; 177 u32 err;
178 178
179 /* forward unresolved named message */ 179 /* forward unresolved named message */
180 if (unlikely(!destport)) { 180 if (unlikely(!destport)) {
181 tipc_net_route_msg(buf); 181 tipc_net_route_msg(buf);
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index e6d6ae22ea49..6704a58c7851 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/ref.c: TIPC object registry code 2 * net/tipc/ref.c: TIPC object registry code
3 * 3 *
4 * Copyright (c) 1991-2006, Ericsson AB 4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -50,11 +50,11 @@
50 * Object reference table consists of 2**N entries. 50 * Object reference table consists of 2**N entries.
51 * 51 *
52 * A used entry has object ptr != 0, reference == XXXX|own index 52 * A used entry has object ptr != 0, reference == XXXX|own index
53 * (XXXX changes each time entry is acquired) 53 * (XXXX changes each time entry is acquired)
54 * A free entry has object ptr == 0, reference == YYYY|next free index 54 * A free entry has object ptr == 0, reference == YYYY|next free index
55 * (YYYY is one more than last used XXXX) 55 * (YYYY is one more than last used XXXX)
56 * 56 *
57 * Free list is initially chained from entry (2**N)-1 to entry 1. 57 * Free list is initially chained from entry (2**N)-1 to entry 1.
58 * Entry 0 is not used to allow index 0 to indicate the end of the free list. 58 * Entry 0 is not used to allow index 0 to indicate the end of the free list.
59 * 59 *
60 * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0 60 * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0
@@ -113,9 +113,9 @@ void tipc_ref_table_stop(void)
113 113
114/** 114/**
115 * tipc_ref_acquire - create reference to an object 115 * tipc_ref_acquire - create reference to an object
116 * 116 *
117 * Return a unique reference value which can be translated back to the pointer 117 * Return a unique reference value which can be translated back to the pointer
118 * 'object' at a later time. Also, pass back a pointer to the lock protecting 118 * 'object' at a later time. Also, pass back a pointer to the lock protecting
119 * the object, but without locking it. 119 * the object, but without locking it.
120 */ 120 */
121 121
@@ -141,15 +141,15 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
141 index = tipc_ref_table.first_free; 141 index = tipc_ref_table.first_free;
142 entry = &(tipc_ref_table.entries[index]); 142 entry = &(tipc_ref_table.entries[index]);
143 index_mask = tipc_ref_table.index_mask; 143 index_mask = tipc_ref_table.index_mask;
144 /* take lock in case a previous user of entry still holds it */ 144 /* take lock in case a previous user of entry still holds it */
145 spin_lock_bh(&entry->lock); 145 spin_lock_bh(&entry->lock);
146 next_plus_upper = entry->data.next_plus_upper; 146 next_plus_upper = entry->data.next_plus_upper;
147 tipc_ref_table.first_free = next_plus_upper & index_mask; 147 tipc_ref_table.first_free = next_plus_upper & index_mask;
148 reference = (next_plus_upper & ~index_mask) + index; 148 reference = (next_plus_upper & ~index_mask) + index;
149 entry->data.reference = reference; 149 entry->data.reference = reference;
150 entry->object = object; 150 entry->object = object;
151 if (lock != 0) 151 if (lock != 0)
152 *lock = &entry->lock; 152 *lock = &entry->lock;
153 spin_unlock_bh(&entry->lock); 153 spin_unlock_bh(&entry->lock);
154 } 154 }
155 write_unlock_bh(&ref_table_lock); 155 write_unlock_bh(&ref_table_lock);
@@ -158,7 +158,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
158 158
159/** 159/**
160 * tipc_ref_discard - invalidate references to an object 160 * tipc_ref_discard - invalidate references to an object
161 * 161 *
162 * Disallow future references to an object and free up the entry for re-use. 162 * Disallow future references to an object and free up the entry for re-use.
163 * Note: The entry's spin_lock may still be busy after discard 163 * Note: The entry's spin_lock may still be busy after discard
164 */ 164 */
@@ -166,7 +166,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
166void tipc_ref_discard(u32 ref) 166void tipc_ref_discard(u32 ref)
167{ 167{
168 struct reference *entry; 168 struct reference *entry;
169 u32 index; 169 u32 index;
170 u32 index_mask; 170 u32 index_mask;
171 171
172 if (!ref) { 172 if (!ref) {
@@ -198,7 +198,7 @@ void tipc_ref_discard(u32 ref)
198 tipc_ref_table.first_free = index; 198 tipc_ref_table.first_free = index;
199 else 199 else
200 /* next_plus_upper is always XXXX|0--0 for last free entry */ 200 /* next_plus_upper is always XXXX|0--0 for last free entry */
201 tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper 201 tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper
202 |= index; 202 |= index;
203 tipc_ref_table.last_free = index; 203 tipc_ref_table.last_free = index;
204 204
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
index 6d20006be45b..38f3a7f4a78d 100644
--- a/net/tipc/ref.h
+++ b/net/tipc/ref.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/ref.h: Include file for TIPC object registry code 2 * net/tipc/ref.h: Include file for TIPC object registry code
3 * 3 *
4 * Copyright (c) 1991-2006, Ericsson AB 4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -43,7 +43,7 @@
43 * @lock: spinlock controlling access to object 43 * @lock: spinlock controlling access to object
44 * @data: reference value associated with object (or link to next unused entry) 44 * @data: reference value associated with object (or link to next unused entry)
45 */ 45 */
46 46
47struct reference { 47struct reference {
48 void *object; 48 void *object;
49 spinlock_t lock; 49 spinlock_t lock;
@@ -96,7 +96,7 @@ static inline void *tipc_ref_lock(u32 ref)
96} 96}
97 97
98/** 98/**
99 * tipc_ref_unlock - unlock referenced object 99 * tipc_ref_unlock - unlock referenced object
100 */ 100 */
101 101
102static inline void tipc_ref_unlock(u32 ref) 102static inline void tipc_ref_unlock(u32 ref)
@@ -119,7 +119,7 @@ static inline void tipc_ref_unlock(u32 ref)
119static inline void *tipc_ref_deref(u32 ref) 119static inline void *tipc_ref_deref(u32 ref)
120{ 120{
121 if (likely(tipc_ref_table.entries)) { 121 if (likely(tipc_ref_table.entries)) {
122 struct reference *r = 122 struct reference *r =
123 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask]; 123 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
124 124
125 if (likely(r->data.reference == ref)) 125 if (likely(r->data.reference == ref))
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 2a6a5a6b4c12..b71739fbe2c6 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2006, Ericsson AB 4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -82,29 +82,29 @@ static int sockets_enabled = 0;
82static atomic_t tipc_queue_size = ATOMIC_INIT(0); 82static atomic_t tipc_queue_size = ATOMIC_INIT(0);
83 83
84 84
85/* 85/*
86 * sock_lock(): Lock a port/socket pair. lock_sock() can 86 * sock_lock(): Lock a port/socket pair. lock_sock() can
87 * not be used here, since the same lock must protect ports 87 * not be used here, since the same lock must protect ports
88 * with non-socket interfaces. 88 * with non-socket interfaces.
89 * See net.c for description of locking policy. 89 * See net.c for description of locking policy.
90 */ 90 */
91static void sock_lock(struct tipc_sock* tsock) 91static void sock_lock(struct tipc_sock* tsock)
92{ 92{
93 spin_lock_bh(tsock->p->lock); 93 spin_lock_bh(tsock->p->lock);
94} 94}
95 95
96/* 96/*
97 * sock_unlock(): Unlock a port/socket pair 97 * sock_unlock(): Unlock a port/socket pair
98 */ 98 */
99static void sock_unlock(struct tipc_sock* tsock) 99static void sock_unlock(struct tipc_sock* tsock)
100{ 100{
101 spin_unlock_bh(tsock->p->lock); 101 spin_unlock_bh(tsock->p->lock);
102} 102}
103 103
104/** 104/**
105 * pollmask - determine the current set of poll() events for a socket 105 * pollmask - determine the current set of poll() events for a socket
106 * @sock: socket structure 106 * @sock: socket structure
107 * 107 *
108 * TIPC sets the returned events as follows: 108 * TIPC sets the returned events as follows:
109 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty 109 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
110 * or if a connection-oriented socket is does not have an active connection 110 * or if a connection-oriented socket is does not have an active connection
@@ -115,7 +115,7 @@ static void sock_unlock(struct tipc_sock* tsock)
115 * 115 *
116 * IMPORTANT: The fact that a read or write operation will not block does NOT 116 * IMPORTANT: The fact that a read or write operation will not block does NOT
117 * imply that the operation will succeed! 117 * imply that the operation will succeed!
118 * 118 *
119 * Returns pollmask value 119 * Returns pollmask value
120 */ 120 */
121 121
@@ -130,7 +130,7 @@ static u32 pollmask(struct socket *sock)
130 else 130 else
131 mask = 0; 131 mask = 0;
132 132
133 if (sock->state == SS_DISCONNECTING) 133 if (sock->state == SS_DISCONNECTING)
134 mask |= POLLHUP; 134 mask |= POLLHUP;
135 else 135 else
136 mask |= POLLOUT; 136 mask |= POLLOUT;
@@ -146,9 +146,9 @@ static u32 pollmask(struct socket *sock)
146 146
147static void advance_queue(struct tipc_sock *tsock) 147static void advance_queue(struct tipc_sock *tsock)
148{ 148{
149 sock_lock(tsock); 149 sock_lock(tsock);
150 buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue)); 150 buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue));
151 sock_unlock(tsock); 151 sock_unlock(tsock);
152 atomic_dec(&tipc_queue_size); 152 atomic_dec(&tipc_queue_size);
153} 153}
154 154
@@ -156,7 +156,7 @@ static void advance_queue(struct tipc_sock *tsock)
156 * tipc_create - create a TIPC socket 156 * tipc_create - create a TIPC socket
157 * @sock: pre-allocated socket structure 157 * @sock: pre-allocated socket structure
158 * @protocol: protocol indicator (must be 0) 158 * @protocol: protocol indicator (must be 0)
159 * 159 *
160 * This routine creates and attaches a 'struct sock' to the 'struct socket', 160 * This routine creates and attaches a 'struct sock' to the 'struct socket',
161 * then create and attaches a TIPC port to the 'struct sock' part. 161 * then create and attaches a TIPC port to the 'struct sock' part.
162 * 162 *
@@ -167,7 +167,7 @@ static int tipc_create(struct socket *sock, int protocol)
167 struct tipc_sock *tsock; 167 struct tipc_sock *tsock;
168 struct tipc_port *port; 168 struct tipc_port *port;
169 struct sock *sk; 169 struct sock *sk;
170 u32 ref; 170 u32 ref;
171 171
172 if (unlikely(protocol != 0)) 172 if (unlikely(protocol != 0))
173 return -EPROTONOSUPPORT; 173 return -EPROTONOSUPPORT;
@@ -232,7 +232,7 @@ static int tipc_create(struct socket *sock, int protocol)
232 * For SEQPACKET and STREAM socket types, the first message is rejected 232 * For SEQPACKET and STREAM socket types, the first message is rejected
233 * and any others are discarded. (If the first message on a STREAM socket 233 * and any others are discarded. (If the first message on a STREAM socket
234 * is partially-read, it is discarded and the next one is rejected instead.) 234 * is partially-read, it is discarded and the next one is rejected instead.)
235 * 235 *
236 * NOTE: Rejected messages are not necessarily returned to the sender! They 236 * NOTE: Rejected messages are not necessarily returned to the sender! They
237 * are returned or discarded according to the "destination droppable" setting 237 * are returned or discarded according to the "destination droppable" setting
238 * specified for the message by the sender. 238 * specified for the message by the sender.
@@ -247,7 +247,7 @@ static int release(struct socket *sock)
247 int res = TIPC_OK; 247 int res = TIPC_OK;
248 struct sk_buff *buf; 248 struct sk_buff *buf;
249 249
250 dbg("sock_delete: %x\n",tsock); 250 dbg("sock_delete: %x\n",tsock);
251 if (!tsock) 251 if (!tsock)
252 return 0; 252 return 0;
253 down_interruptible(&tsock->sem); 253 down_interruptible(&tsock->sem);
@@ -255,7 +255,7 @@ static int release(struct socket *sock)
255 up(&tsock->sem); 255 up(&tsock->sem);
256 return 0; 256 return 0;
257 } 257 }
258 258
259 /* Reject unreceived messages, unless no longer connected */ 259 /* Reject unreceived messages, unless no longer connected */
260 260
261 while (sock->state != SS_DISCONNECTING) { 261 while (sock->state != SS_DISCONNECTING) {
@@ -289,7 +289,7 @@ static int release(struct socket *sock)
289 289
290 sock_put(sk); 290 sock_put(sk);
291 291
292 atomic_dec(&tipc_user_count); 292 atomic_dec(&tipc_user_count);
293 return res; 293 return res;
294} 294}
295 295
@@ -298,11 +298,11 @@ static int release(struct socket *sock)
298 * @sock: socket structure 298 * @sock: socket structure
299 * @uaddr: socket address describing name(s) and desired operation 299 * @uaddr: socket address describing name(s) and desired operation
300 * @uaddr_len: size of socket address data structure 300 * @uaddr_len: size of socket address data structure
301 * 301 *
302 * Name and name sequence binding is indicated using a positive scope value; 302 * Name and name sequence binding is indicated using a positive scope value;
303 * a negative scope value unbinds the specified name. Specifying no name 303 * a negative scope value unbinds the specified name. Specifying no name
304 * (i.e. a socket address length of 0) unbinds all names from the socket. 304 * (i.e. a socket address length of 0) unbinds all names from the socket.
305 * 305 *
306 * Returns 0 on success, errno otherwise 306 * Returns 0 on success, errno otherwise
307 */ 307 */
308 308
@@ -314,7 +314,7 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
314 314
315 if (down_interruptible(&tsock->sem)) 315 if (down_interruptible(&tsock->sem))
316 return -ERESTARTSYS; 316 return -ERESTARTSYS;
317 317
318 if (unlikely(!uaddr_len)) { 318 if (unlikely(!uaddr_len)) {
319 res = tipc_withdraw(tsock->p->ref, 0, NULL); 319 res = tipc_withdraw(tsock->p->ref, 0, NULL);
320 goto exit; 320 goto exit;
@@ -335,8 +335,8 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
335 res = -EAFNOSUPPORT; 335 res = -EAFNOSUPPORT;
336 goto exit; 336 goto exit;
337 } 337 }
338 338
339 if (addr->scope > 0) 339 if (addr->scope > 0)
340 res = tipc_publish(tsock->p->ref, addr->scope, 340 res = tipc_publish(tsock->p->ref, addr->scope,
341 &addr->addr.nameseq); 341 &addr->addr.nameseq);
342 else 342 else
@@ -347,17 +347,17 @@ exit:
347 return res; 347 return res;
348} 348}
349 349
350/** 350/**
351 * get_name - get port ID of socket or peer socket 351 * get_name - get port ID of socket or peer socket
352 * @sock: socket structure 352 * @sock: socket structure
353 * @uaddr: area for returned socket address 353 * @uaddr: area for returned socket address
354 * @uaddr_len: area for returned length of socket address 354 * @uaddr_len: area for returned length of socket address
355 * @peer: 0 to obtain socket name, 1 to obtain peer socket name 355 * @peer: 0 to obtain socket name, 1 to obtain peer socket name
356 * 356 *
357 * Returns 0 on success, errno otherwise 357 * Returns 0 on success, errno otherwise
358 */ 358 */
359 359
360static int get_name(struct socket *sock, struct sockaddr *uaddr, 360static int get_name(struct socket *sock, struct sockaddr *uaddr,
361 int *uaddr_len, int peer) 361 int *uaddr_len, int peer)
362{ 362{
363 struct tipc_sock *tsock = tipc_sk(sock->sk); 363 struct tipc_sock *tsock = tipc_sk(sock->sk);
@@ -390,7 +390,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
390 * Returns the pollmask 390 * Returns the pollmask
391 */ 391 */
392 392
393static unsigned int poll(struct file *file, struct socket *sock, 393static unsigned int poll(struct file *file, struct socket *sock,
394 poll_table *wait) 394 poll_table *wait)
395{ 395{
396 poll_wait(file, sock->sk->sk_sleep, wait); 396 poll_wait(file, sock->sk->sk_sleep, wait);
@@ -398,14 +398,14 @@ static unsigned int poll(struct file *file, struct socket *sock,
398 return pollmask(sock); 398 return pollmask(sock);
399} 399}
400 400
401/** 401/**
402 * dest_name_check - verify user is permitted to send to specified port name 402 * dest_name_check - verify user is permitted to send to specified port name
403 * @dest: destination address 403 * @dest: destination address
404 * @m: descriptor for message to be sent 404 * @m: descriptor for message to be sent
405 * 405 *
406 * Prevents restricted configuration commands from being issued by 406 * Prevents restricted configuration commands from being issued by
407 * unauthorized users. 407 * unauthorized users.
408 * 408 *
409 * Returns 0 if permission is granted, otherwise errno 409 * Returns 0 if permission is granted, otherwise errno
410 */ 410 */
411 411
@@ -413,19 +413,19 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
413{ 413{
414 struct tipc_cfg_msg_hdr hdr; 414 struct tipc_cfg_msg_hdr hdr;
415 415
416 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES)) 416 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
417 return 0; 417 return 0;
418 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV)) 418 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
419 return 0; 419 return 0;
420 420
421 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV)) 421 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
422 return -EACCES; 422 return -EACCES;
423 423
424 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) 424 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
425 return -EFAULT; 425 return -EFAULT;
426 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN))) 426 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
427 return -EACCES; 427 return -EACCES;
428 428
429 return 0; 429 return 0;
430} 430}
431 431
@@ -435,12 +435,12 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
435 * @sock: socket structure 435 * @sock: socket structure
436 * @m: message to send 436 * @m: message to send
437 * @total_len: length of message 437 * @total_len: length of message
438 * 438 *
439 * Message must have an destination specified explicitly. 439 * Message must have an destination specified explicitly.
440 * Used for SOCK_RDM and SOCK_DGRAM messages, 440 * Used for SOCK_RDM and SOCK_DGRAM messages,
441 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 441 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
442 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 442 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
443 * 443 *
444 * Returns the number of bytes sent on success, or errno otherwise 444 * Returns the number of bytes sent on success, or errno otherwise
445 */ 445 */
446 446
@@ -448,7 +448,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
448 struct msghdr *m, size_t total_len) 448 struct msghdr *m, size_t total_len)
449{ 449{
450 struct tipc_sock *tsock = tipc_sk(sock->sk); 450 struct tipc_sock *tsock = tipc_sk(sock->sk);
451 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 451 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
452 struct sk_buff *buf; 452 struct sk_buff *buf;
453 int needs_conn; 453 int needs_conn;
454 int res = -EINVAL; 454 int res = -EINVAL;
@@ -489,61 +489,61 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
489 sock->state = SS_CONNECTING; 489 sock->state = SS_CONNECTING;
490 } 490 }
491 491
492 do { 492 do {
493 if (dest->addrtype == TIPC_ADDR_NAME) { 493 if (dest->addrtype == TIPC_ADDR_NAME) {
494 if ((res = dest_name_check(dest, m))) 494 if ((res = dest_name_check(dest, m)))
495 goto exit; 495 goto exit;
496 res = tipc_send2name(tsock->p->ref, 496 res = tipc_send2name(tsock->p->ref,
497 &dest->addr.name.name, 497 &dest->addr.name.name,
498 dest->addr.name.domain, 498 dest->addr.name.domain,
499 m->msg_iovlen, 499 m->msg_iovlen,
500 m->msg_iov); 500 m->msg_iov);
501 } 501 }
502 else if (dest->addrtype == TIPC_ADDR_ID) { 502 else if (dest->addrtype == TIPC_ADDR_ID) {
503 res = tipc_send2port(tsock->p->ref, 503 res = tipc_send2port(tsock->p->ref,
504 &dest->addr.id, 504 &dest->addr.id,
505 m->msg_iovlen, 505 m->msg_iovlen,
506 m->msg_iov); 506 m->msg_iov);
507 } 507 }
508 else if (dest->addrtype == TIPC_ADDR_MCAST) { 508 else if (dest->addrtype == TIPC_ADDR_MCAST) {
509 if (needs_conn) { 509 if (needs_conn) {
510 res = -EOPNOTSUPP; 510 res = -EOPNOTSUPP;
511 goto exit; 511 goto exit;
512 } 512 }
513 if ((res = dest_name_check(dest, m))) 513 if ((res = dest_name_check(dest, m)))
514 goto exit; 514 goto exit;
515 res = tipc_multicast(tsock->p->ref, 515 res = tipc_multicast(tsock->p->ref,
516 &dest->addr.nameseq, 516 &dest->addr.nameseq,
517 0, 517 0,
518 m->msg_iovlen, 518 m->msg_iovlen,
519 m->msg_iov); 519 m->msg_iov);
520 } 520 }
521 if (likely(res != -ELINKCONG)) { 521 if (likely(res != -ELINKCONG)) {
522exit: 522exit:
523 up(&tsock->sem); 523 up(&tsock->sem);
524 return res; 524 return res;
525 } 525 }
526 if (m->msg_flags & MSG_DONTWAIT) { 526 if (m->msg_flags & MSG_DONTWAIT) {
527 res = -EWOULDBLOCK; 527 res = -EWOULDBLOCK;
528 goto exit; 528 goto exit;
529 } 529 }
530 if (wait_event_interruptible(*sock->sk->sk_sleep, 530 if (wait_event_interruptible(*sock->sk->sk_sleep,
531 !tsock->p->congested)) { 531 !tsock->p->congested)) {
532 res = -ERESTARTSYS; 532 res = -ERESTARTSYS;
533 goto exit; 533 goto exit;
534 } 534 }
535 } while (1); 535 } while (1);
536} 536}
537 537
538/** 538/**
539 * send_packet - send a connection-oriented message 539 * send_packet - send a connection-oriented message
540 * @iocb: (unused) 540 * @iocb: (unused)
541 * @sock: socket structure 541 * @sock: socket structure
542 * @m: message to send 542 * @m: message to send
543 * @total_len: length of message 543 * @total_len: length of message
544 * 544 *
545 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data. 545 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
546 * 546 *
547 * Returns the number of bytes sent on success, or errno otherwise 547 * Returns the number of bytes sent on success, or errno otherwise
548 */ 548 */
549 549
@@ -551,7 +551,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
551 struct msghdr *m, size_t total_len) 551 struct msghdr *m, size_t total_len)
552{ 552{
553 struct tipc_sock *tsock = tipc_sk(sock->sk); 553 struct tipc_sock *tsock = tipc_sk(sock->sk);
554 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 554 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
555 int res; 555 int res;
556 556
557 /* Handle implied connection establishment */ 557 /* Handle implied connection establishment */
@@ -561,45 +561,45 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
561 561
562 if (down_interruptible(&tsock->sem)) { 562 if (down_interruptible(&tsock->sem)) {
563 return -ERESTARTSYS; 563 return -ERESTARTSYS;
564 } 564 }
565 565
566 do { 566 do {
567 if (unlikely(sock->state != SS_CONNECTED)) { 567 if (unlikely(sock->state != SS_CONNECTED)) {
568 if (sock->state == SS_DISCONNECTING) 568 if (sock->state == SS_DISCONNECTING)
569 res = -EPIPE; 569 res = -EPIPE;
570 else 570 else
571 res = -ENOTCONN; 571 res = -ENOTCONN;
572 goto exit; 572 goto exit;
573 } 573 }
574 574
575 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); 575 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
576 if (likely(res != -ELINKCONG)) { 576 if (likely(res != -ELINKCONG)) {
577exit: 577exit:
578 up(&tsock->sem); 578 up(&tsock->sem);
579 return res; 579 return res;
580 } 580 }
581 if (m->msg_flags & MSG_DONTWAIT) { 581 if (m->msg_flags & MSG_DONTWAIT) {
582 res = -EWOULDBLOCK; 582 res = -EWOULDBLOCK;
583 goto exit; 583 goto exit;
584 } 584 }
585 if (wait_event_interruptible(*sock->sk->sk_sleep, 585 if (wait_event_interruptible(*sock->sk->sk_sleep,
586 !tsock->p->congested)) { 586 !tsock->p->congested)) {
587 res = -ERESTARTSYS; 587 res = -ERESTARTSYS;
588 goto exit; 588 goto exit;
589 } 589 }
590 } while (1); 590 } while (1);
591} 591}
592 592
593/** 593/**
594 * send_stream - send stream-oriented data 594 * send_stream - send stream-oriented data
595 * @iocb: (unused) 595 * @iocb: (unused)
596 * @sock: socket structure 596 * @sock: socket structure
597 * @m: data to send 597 * @m: data to send
598 * @total_len: total length of data to be sent 598 * @total_len: total length of data to be sent
599 * 599 *
600 * Used for SOCK_STREAM data. 600 * Used for SOCK_STREAM data.
601 * 601 *
602 * Returns the number of bytes sent on success (or partial success), 602 * Returns the number of bytes sent on success (or partial success),
603 * or errno if no data sent 603 * or errno if no data sent
604 */ 604 */
605 605
@@ -616,26 +616,26 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
616 int bytes_to_send; 616 int bytes_to_send;
617 int bytes_sent; 617 int bytes_sent;
618 int res; 618 int res;
619 619
620 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE)) 620 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
621 return send_packet(iocb, sock, m, total_len); 621 return send_packet(iocb, sock, m, total_len);
622 622
623 /* Can only send large data streams if already connected */ 623 /* Can only send large data streams if already connected */
624 624
625 if (unlikely(sock->state != SS_CONNECTED)) { 625 if (unlikely(sock->state != SS_CONNECTED)) {
626 if (sock->state == SS_DISCONNECTING) 626 if (sock->state == SS_DISCONNECTING)
627 return -EPIPE; 627 return -EPIPE;
628 else 628 else
629 return -ENOTCONN; 629 return -ENOTCONN;
630 } 630 }
631 631
632 if (unlikely(m->msg_name)) 632 if (unlikely(m->msg_name))
633 return -EISCONN; 633 return -EISCONN;
634 634
635 /* 635 /*
636 * Send each iovec entry using one or more messages 636 * Send each iovec entry using one or more messages
637 * 637 *
638 * Note: This algorithm is good for the most likely case 638 * Note: This algorithm is good for the most likely case
639 * (i.e. one large iovec entry), but could be improved to pass sets 639 * (i.e. one large iovec entry), but could be improved to pass sets
640 * of small iovec entries into send_packet(). 640 * of small iovec entries into send_packet().
641 */ 641 */
@@ -657,7 +657,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
657 ? curr_left : TIPC_MAX_USER_MSG_SIZE; 657 ? curr_left : TIPC_MAX_USER_MSG_SIZE;
658 my_iov.iov_base = curr_start; 658 my_iov.iov_base = curr_start;
659 my_iov.iov_len = bytes_to_send; 659 my_iov.iov_len = bytes_to_send;
660 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) { 660 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) {
661 return bytes_sent ? bytes_sent : res; 661 return bytes_sent ? bytes_sent : res;
662 } 662 }
663 curr_left -= bytes_to_send; 663 curr_left -= bytes_to_send;
@@ -676,11 +676,11 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
676 * @sock: socket structure 676 * @sock: socket structure
677 * @tsock: TIPC-specific socket structure 677 * @tsock: TIPC-specific socket structure
678 * @msg: peer's response message 678 * @msg: peer's response message
679 * 679 *
680 * Returns 0 on success, errno otherwise 680 * Returns 0 on success, errno otherwise
681 */ 681 */
682 682
683static int auto_connect(struct socket *sock, struct tipc_sock *tsock, 683static int auto_connect(struct socket *sock, struct tipc_sock *tsock,
684 struct tipc_msg *msg) 684 struct tipc_msg *msg)
685{ 685{
686 struct tipc_portid peer; 686 struct tipc_portid peer;
@@ -702,15 +702,15 @@ static int auto_connect(struct socket *sock, struct tipc_sock *tsock,
702 * set_orig_addr - capture sender's address for received message 702 * set_orig_addr - capture sender's address for received message
703 * @m: descriptor for message info 703 * @m: descriptor for message info
704 * @msg: received message header 704 * @msg: received message header
705 * 705 *
706 * Note: Address is not captured if not requested by receiver. 706 * Note: Address is not captured if not requested by receiver.
707 */ 707 */
708 708
709static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) 709static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
710{ 710{
711 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name; 711 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
712 712
713 if (addr) { 713 if (addr) {
714 addr->family = AF_TIPC; 714 addr->family = AF_TIPC;
715 addr->addrtype = TIPC_ADDR_ID; 715 addr->addrtype = TIPC_ADDR_ID;
716 addr->addr.id.ref = msg_origport(msg); 716 addr->addr.id.ref = msg_origport(msg);
@@ -722,13 +722,13 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
722} 722}
723 723
724/** 724/**
725 * anc_data_recv - optionally capture ancillary data for received message 725 * anc_data_recv - optionally capture ancillary data for received message
726 * @m: descriptor for message info 726 * @m: descriptor for message info
727 * @msg: received message header 727 * @msg: received message header
728 * @tport: TIPC port associated with message 728 * @tport: TIPC port associated with message
729 * 729 *
730 * Note: Ancillary data is not captured if not requested by receiver. 730 * Note: Ancillary data is not captured if not requested by receiver.
731 * 731 *
732 * Returns 0 if successful, otherwise errno 732 * Returns 0 if successful, otherwise errno
733 */ 733 */
734 734
@@ -753,7 +753,7 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
753 if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data))) 753 if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
754 return res; 754 return res;
755 if (anc_data[1] && 755 if (anc_data[1] &&
756 (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 756 (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
757 msg_data(msg)))) 757 msg_data(msg))))
758 return res; 758 return res;
759 } 759 }
@@ -790,13 +790,13 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
790 return 0; 790 return 0;
791} 791}
792 792
793/** 793/**
794 * recv_msg - receive packet-oriented message 794 * recv_msg - receive packet-oriented message
795 * @iocb: (unused) 795 * @iocb: (unused)
796 * @m: descriptor for message info 796 * @m: descriptor for message info
797 * @buf_len: total size of user buffer area 797 * @buf_len: total size of user buffer area
798 * @flags: receive flags 798 * @flags: receive flags
799 * 799 *
800 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 800 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
801 * If the complete message doesn't fit in user area, truncate it. 801 * If the complete message doesn't fit in user area, truncate it.
802 * 802 *
@@ -827,9 +827,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
827 if (sock->type == SOCK_SEQPACKET) { 827 if (sock->type == SOCK_SEQPACKET) {
828 if (unlikely(sock->state == SS_UNCONNECTED)) 828 if (unlikely(sock->state == SS_UNCONNECTED))
829 return -ENOTCONN; 829 return -ENOTCONN;
830 if (unlikely((sock->state == SS_DISCONNECTING) && 830 if (unlikely((sock->state == SS_DISCONNECTING) &&
831 (skb_queue_len(&sock->sk->sk_receive_queue) == 0))) 831 (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
832 return -ENOTCONN; 832 return -ENOTCONN;
833 } 833 }
834 834
835 /* Look for a message in receive queue; wait if necessary */ 835 /* Look for a message in receive queue; wait if necessary */
@@ -845,7 +845,7 @@ restart:
845 } 845 }
846 846
847 if ((res = wait_event_interruptible( 847 if ((res = wait_event_interruptible(
848 *sock->sk->sk_sleep, 848 *sock->sk->sk_sleep,
849 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) || 849 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
850 (sock->state == SS_DISCONNECTING))) )) { 850 (sock->state == SS_DISCONNECTING))) )) {
851 goto exit; 851 goto exit;
@@ -890,7 +890,7 @@ restart:
890 goto exit; 890 goto exit;
891 891
892 /* Capture message data (if valid) & compute return value (always) */ 892 /* Capture message data (if valid) & compute return value (always) */
893 893
894 if (!err) { 894 if (!err) {
895 if (unlikely(buf_len < sz)) { 895 if (unlikely(buf_len < sz)) {
896 sz = buf_len; 896 sz = buf_len;
@@ -913,23 +913,23 @@ restart:
913 /* Consume received message (optional) */ 913 /* Consume received message (optional) */
914 914
915 if (likely(!(flags & MSG_PEEK))) { 915 if (likely(!(flags & MSG_PEEK))) {
916 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 916 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
917 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked); 917 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
918 advance_queue(tsock); 918 advance_queue(tsock);
919 } 919 }
920exit: 920exit:
921 up(&tsock->sem); 921 up(&tsock->sem);
922 return res; 922 return res;
923} 923}
924 924
925/** 925/**
926 * recv_stream - receive stream-oriented data 926 * recv_stream - receive stream-oriented data
927 * @iocb: (unused) 927 * @iocb: (unused)
928 * @m: descriptor for message info 928 * @m: descriptor for message info
929 * @buf_len: total size of user buffer area 929 * @buf_len: total size of user buffer area
930 * @flags: receive flags 930 * @flags: receive flags
931 * 931 *
932 * Used for SOCK_STREAM messages only. If not enough data is available 932 * Used for SOCK_STREAM messages only. If not enough data is available
933 * will optionally wait for more; never truncates data. 933 * will optionally wait for more; never truncates data.
934 * 934 *
935 * Returns size of returned message data, errno otherwise 935 * Returns size of returned message data, errno otherwise
@@ -980,7 +980,7 @@ restart:
980 } 980 }
981 981
982 if ((res = wait_event_interruptible( 982 if ((res = wait_event_interruptible(
983 *sock->sk->sk_sleep, 983 *sock->sk->sk_sleep,
984 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) || 984 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
985 (sock->state == SS_DISCONNECTING))) )) { 985 (sock->state == SS_DISCONNECTING))) )) {
986 goto exit; 986 goto exit;
@@ -1017,7 +1017,7 @@ restart:
1017 } 1017 }
1018 1018
1019 /* Capture message data (if valid) & compute return value (always) */ 1019 /* Capture message data (if valid) & compute return value (always) */
1020 1020
1021 if (!err) { 1021 if (!err) {
1022 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle); 1022 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
1023 sz = buf->tail - buf_crs; 1023 sz = buf->tail - buf_crs;
@@ -1050,14 +1050,14 @@ restart:
1050 /* Consume received message (optional) */ 1050 /* Consume received message (optional) */
1051 1051
1052 if (likely(!(flags & MSG_PEEK))) { 1052 if (likely(!(flags & MSG_PEEK))) {
1053 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1053 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1054 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked); 1054 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
1055 advance_queue(tsock); 1055 advance_queue(tsock);
1056 } 1056 }
1057 1057
1058 /* Loop around if more data is required */ 1058 /* Loop around if more data is required */
1059 1059
1060 if ((sz_copied < buf_len) /* didn't get all requested data */ 1060 if ((sz_copied < buf_len) /* didn't get all requested data */
1061 && (flags & MSG_WAITALL) /* ... and need to wait for more */ 1061 && (flags & MSG_WAITALL) /* ... and need to wait for more */
1062 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */ 1062 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
1063 && (!err) /* ... and haven't reached a FIN */ 1063 && (!err) /* ... and haven't reached a FIN */
@@ -1074,7 +1074,7 @@ exit:
1074 * @queue_size: current size of queue 1074 * @queue_size: current size of queue
1075 * @base: nominal maximum size of queue 1075 * @base: nominal maximum size of queue
1076 * @msg: message to be added to queue 1076 * @msg: message to be added to queue
1077 * 1077 *
1078 * Returns 1 if queue is currently overloaded, 0 otherwise 1078 * Returns 1 if queue is currently overloaded, 0 otherwise
1079 */ 1079 */
1080 1080
@@ -1098,7 +1098,7 @@ static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
1098 return (queue_size > threshold); 1098 return (queue_size > threshold);
1099} 1099}
1100 1100
1101/** 1101/**
1102 * async_disconnect - wrapper function used to disconnect port 1102 * async_disconnect - wrapper function used to disconnect port
1103 * @portref: TIPC port reference (passed as pointer-sized value) 1103 * @portref: TIPC port reference (passed as pointer-sized value)
1104 */ 1104 */
@@ -1108,13 +1108,13 @@ static void async_disconnect(unsigned long portref)
1108 tipc_disconnect((u32)portref); 1108 tipc_disconnect((u32)portref);
1109} 1109}
1110 1110
1111/** 1111/**
1112 * dispatch - handle arriving message 1112 * dispatch - handle arriving message
1113 * @tport: TIPC port that received message 1113 * @tport: TIPC port that received message
1114 * @buf: message 1114 * @buf: message
1115 * 1115 *
1116 * Called with port locked. Must not take socket lock to avoid deadlock risk. 1116 * Called with port locked. Must not take socket lock to avoid deadlock risk.
1117 * 1117 *
1118 * Returns TIPC error status code (TIPC_OK if message is not to be rejected) 1118 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1119 */ 1119 */
1120 1120
@@ -1159,13 +1159,13 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1159 msg_dbg(msg, "dispatch filter 4\n"); 1159 msg_dbg(msg, "dispatch filter 4\n");
1160 return TIPC_ERR_NO_PORT; 1160 return TIPC_ERR_NO_PORT;
1161 } 1161 }
1162 } 1162 }
1163 else if (sock->state == SS_LISTENING) { 1163 else if (sock->state == SS_LISTENING) {
1164 if (msg_connected(msg) || msg_errcode(msg)) { 1164 if (msg_connected(msg) || msg_errcode(msg)) {
1165 msg_dbg(msg, "dispatch filter 5\n"); 1165 msg_dbg(msg, "dispatch filter 5\n");
1166 return TIPC_ERR_NO_PORT; 1166 return TIPC_ERR_NO_PORT;
1167 } 1167 }
1168 } 1168 }
1169 else if (sock->state == SS_DISCONNECTING) { 1169 else if (sock->state == SS_DISCONNECTING) {
1170 msg_dbg(msg, "dispatch filter 6\n"); 1170 msg_dbg(msg, "dispatch filter 6\n");
1171 return TIPC_ERR_NO_PORT; 1171 return TIPC_ERR_NO_PORT;
@@ -1180,18 +1180,18 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1180 1180
1181 /* Reject message if there isn't room to queue it */ 1181 /* Reject message if there isn't room to queue it */
1182 1182
1183 if (unlikely((u32)atomic_read(&tipc_queue_size) > 1183 if (unlikely((u32)atomic_read(&tipc_queue_size) >
1184 OVERLOAD_LIMIT_BASE)) { 1184 OVERLOAD_LIMIT_BASE)) {
1185 if (queue_overloaded(atomic_read(&tipc_queue_size), 1185 if (queue_overloaded(atomic_read(&tipc_queue_size),
1186 OVERLOAD_LIMIT_BASE, msg)) 1186 OVERLOAD_LIMIT_BASE, msg))
1187 return TIPC_ERR_OVERLOAD; 1187 return TIPC_ERR_OVERLOAD;
1188 } 1188 }
1189 recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue); 1189 recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
1190 if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) { 1190 if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
1191 if (queue_overloaded(recv_q_len, 1191 if (queue_overloaded(recv_q_len,
1192 OVERLOAD_LIMIT_BASE / 2, msg)) 1192 OVERLOAD_LIMIT_BASE / 2, msg))
1193 return TIPC_ERR_OVERLOAD; 1193 return TIPC_ERR_OVERLOAD;
1194 } 1194 }
1195 1195
1196 /* Initiate connection termination for an incoming 'FIN' */ 1196 /* Initiate connection termination for an incoming 'FIN' */
1197 1197
@@ -1213,10 +1213,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1213 return TIPC_OK; 1213 return TIPC_OK;
1214} 1214}
1215 1215
1216/** 1216/**
1217 * wakeupdispatch - wake up port after congestion 1217 * wakeupdispatch - wake up port after congestion
1218 * @tport: port to wakeup 1218 * @tport: port to wakeup
1219 * 1219 *
1220 * Called with port lock on. 1220 * Called with port lock on.
1221 */ 1221 */
1222 1222
@@ -1238,7 +1238,7 @@ static void wakeupdispatch(struct tipc_port *tport)
1238 * Returns 0 on success, errno otherwise 1238 * Returns 0 on success, errno otherwise
1239 */ 1239 */
1240 1240
1241static int connect(struct socket *sock, struct sockaddr *dest, int destlen, 1241static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1242 int flags) 1242 int flags)
1243{ 1243{
1244 struct tipc_sock *tsock = tipc_sk(sock->sk); 1244 struct tipc_sock *tsock = tipc_sk(sock->sk);
@@ -1260,7 +1260,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1260 if (sock->state == SS_CONNECTING) 1260 if (sock->state == SS_CONNECTING)
1261 return -EALREADY; 1261 return -EALREADY;
1262 if (sock->state != SS_UNCONNECTED) 1262 if (sock->state != SS_UNCONNECTED)
1263 return -EISCONN; 1263 return -EISCONN;
1264 1264
1265 /* 1265 /*
1266 * Reject connection attempt using multicast address 1266 * Reject connection attempt using multicast address
@@ -1270,7 +1270,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1270 */ 1270 */
1271 1271
1272 if (dst->addrtype == TIPC_ADDR_MCAST) 1272 if (dst->addrtype == TIPC_ADDR_MCAST)
1273 return -EINVAL; 1273 return -EINVAL;
1274 1274
1275 /* Send a 'SYN-' to destination */ 1275 /* Send a 'SYN-' to destination */
1276 1276
@@ -1281,19 +1281,19 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1281 return res; 1281 return res;
1282 } 1282 }
1283 1283
1284 if (down_interruptible(&tsock->sem)) 1284 if (down_interruptible(&tsock->sem))
1285 return -ERESTARTSYS; 1285 return -ERESTARTSYS;
1286 1286
1287 /* Wait for destination's 'ACK' response */ 1287 /* Wait for destination's 'ACK' response */
1288 1288
1289 res = wait_event_interruptible_timeout(*sock->sk->sk_sleep, 1289 res = wait_event_interruptible_timeout(*sock->sk->sk_sleep,
1290 skb_queue_len(&sock->sk->sk_receive_queue), 1290 skb_queue_len(&sock->sk->sk_receive_queue),
1291 sock->sk->sk_rcvtimeo); 1291 sock->sk->sk_rcvtimeo);
1292 buf = skb_peek(&sock->sk->sk_receive_queue); 1292 buf = skb_peek(&sock->sk->sk_receive_queue);
1293 if (res > 0) { 1293 if (res > 0) {
1294 msg = buf_msg(buf); 1294 msg = buf_msg(buf);
1295 res = auto_connect(sock, tsock, msg); 1295 res = auto_connect(sock, tsock, msg);
1296 if (!res) { 1296 if (!res) {
1297 if (!msg_data_sz(msg)) 1297 if (!msg_data_sz(msg))
1298 advance_queue(tsock); 1298 advance_queue(tsock);
1299 } 1299 }
@@ -1301,7 +1301,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1301 if (res == 0) { 1301 if (res == 0) {
1302 res = -ETIMEDOUT; 1302 res = -ETIMEDOUT;
1303 } else 1303 } else
1304 { /* leave "res" unchanged */ } 1304 { /* leave "res" unchanged */ }
1305 sock->state = SS_DISCONNECTING; 1305 sock->state = SS_DISCONNECTING;
1306 } 1306 }
1307 1307
@@ -1309,11 +1309,11 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1309 return res; 1309 return res;
1310} 1310}
1311 1311
1312/** 1312/**
1313 * listen - allow socket to listen for incoming connections 1313 * listen - allow socket to listen for incoming connections
1314 * @sock: socket structure 1314 * @sock: socket structure
1315 * @len: (unused) 1315 * @len: (unused)
1316 * 1316 *
1317 * Returns 0 on success, errno otherwise 1317 * Returns 0 on success, errno otherwise
1318 */ 1318 */
1319 1319
@@ -1326,15 +1326,15 @@ static int listen(struct socket *sock, int len)
1326 if (sock->state != SS_UNCONNECTED) 1326 if (sock->state != SS_UNCONNECTED)
1327 return -EINVAL; 1327 return -EINVAL;
1328 sock->state = SS_LISTENING; 1328 sock->state = SS_LISTENING;
1329 return 0; 1329 return 0;
1330} 1330}
1331 1331
1332/** 1332/**
1333 * accept - wait for connection request 1333 * accept - wait for connection request
1334 * @sock: listening socket 1334 * @sock: listening socket
1335 * @newsock: new socket that is to be connected 1335 * @newsock: new socket that is to be connected
1336 * @flags: file-related flags associated with socket 1336 * @flags: file-related flags associated with socket
1337 * 1337 *
1338 * Returns 0 on success, errno otherwise 1338 * Returns 0 on success, errno otherwise
1339 */ 1339 */
1340 1340
@@ -1348,15 +1348,15 @@ static int accept(struct socket *sock, struct socket *newsock, int flags)
1348 return -EOPNOTSUPP; 1348 return -EOPNOTSUPP;
1349 if (sock->state != SS_LISTENING) 1349 if (sock->state != SS_LISTENING)
1350 return -EINVAL; 1350 return -EINVAL;
1351 1351
1352 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) && 1352 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
1353 (flags & O_NONBLOCK))) 1353 (flags & O_NONBLOCK)))
1354 return -EWOULDBLOCK; 1354 return -EWOULDBLOCK;
1355 1355
1356 if (down_interruptible(&tsock->sem)) 1356 if (down_interruptible(&tsock->sem))
1357 return -ERESTARTSYS; 1357 return -ERESTARTSYS;
1358 1358
1359 if (wait_event_interruptible(*sock->sk->sk_sleep, 1359 if (wait_event_interruptible(*sock->sk->sk_sleep,
1360 skb_queue_len(&sock->sk->sk_receive_queue))) { 1360 skb_queue_len(&sock->sk->sk_receive_queue))) {
1361 res = -ERESTARTSYS; 1361 res = -ERESTARTSYS;
1362 goto exit; 1362 goto exit;
@@ -1381,18 +1381,18 @@ static int accept(struct socket *sock, struct socket *newsock, int flags)
1381 new_tsock->p->conn_instance = msg_nameinst(msg); 1381 new_tsock->p->conn_instance = msg_nameinst(msg);
1382 } 1382 }
1383 1383
1384 /* 1384 /*
1385 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 1385 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1386 * Respond to 'SYN+' by queuing it on new socket. 1386 * Respond to 'SYN+' by queuing it on new socket.
1387 */ 1387 */
1388 1388
1389 msg_dbg(msg,"<ACC<: "); 1389 msg_dbg(msg,"<ACC<: ");
1390 if (!msg_data_sz(msg)) { 1390 if (!msg_data_sz(msg)) {
1391 struct msghdr m = {NULL,}; 1391 struct msghdr m = {NULL,};
1392 1392
1393 send_packet(NULL, newsock, &m, 0); 1393 send_packet(NULL, newsock, &m, 0);
1394 advance_queue(tsock); 1394 advance_queue(tsock);
1395 } else { 1395 } else {
1396 sock_lock(tsock); 1396 sock_lock(tsock);
1397 skb_dequeue(&sock->sk->sk_receive_queue); 1397 skb_dequeue(&sock->sk->sk_receive_queue);
1398 sock_unlock(tsock); 1398 sock_unlock(tsock);
@@ -1410,7 +1410,7 @@ exit:
1410 * @how: direction to close (unused; always treated as read + write) 1410 * @how: direction to close (unused; always treated as read + write)
1411 * 1411 *
1412 * Terminates connection (if necessary), then purges socket's receive queue. 1412 * Terminates connection (if necessary), then purges socket's receive queue.
1413 * 1413 *
1414 * Returns 0 on success, errno otherwise 1414 * Returns 0 on success, errno otherwise
1415 */ 1415 */
1416 1416
@@ -1483,32 +1483,32 @@ restart:
1483 * @opt: option identifier 1483 * @opt: option identifier
1484 * @ov: pointer to new option value 1484 * @ov: pointer to new option value
1485 * @ol: length of option value 1485 * @ol: length of option value
1486 * 1486 *
1487 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 1487 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1488 * (to ease compatibility). 1488 * (to ease compatibility).
1489 * 1489 *
1490 * Returns 0 on success, errno otherwise 1490 * Returns 0 on success, errno otherwise
1491 */ 1491 */
1492 1492
1493static int setsockopt(struct socket *sock, 1493static int setsockopt(struct socket *sock,
1494 int lvl, int opt, char __user *ov, int ol) 1494 int lvl, int opt, char __user *ov, int ol)
1495{ 1495{
1496 struct tipc_sock *tsock = tipc_sk(sock->sk); 1496 struct tipc_sock *tsock = tipc_sk(sock->sk);
1497 u32 value; 1497 u32 value;
1498 int res; 1498 int res;
1499 1499
1500 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 1500 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1501 return 0; 1501 return 0;
1502 if (lvl != SOL_TIPC) 1502 if (lvl != SOL_TIPC)
1503 return -ENOPROTOOPT; 1503 return -ENOPROTOOPT;
1504 if (ol < sizeof(value)) 1504 if (ol < sizeof(value))
1505 return -EINVAL; 1505 return -EINVAL;
1506 if ((res = get_user(value, (u32 __user *)ov))) 1506 if ((res = get_user(value, (u32 __user *)ov)))
1507 return res; 1507 return res;
1508 1508
1509 if (down_interruptible(&tsock->sem)) 1509 if (down_interruptible(&tsock->sem))
1510 return -ERESTARTSYS; 1510 return -ERESTARTSYS;
1511 1511
1512 switch (opt) { 1512 switch (opt) {
1513 case TIPC_IMPORTANCE: 1513 case TIPC_IMPORTANCE:
1514 res = tipc_set_portimportance(tsock->p->ref, value); 1514 res = tipc_set_portimportance(tsock->p->ref, value);
@@ -1516,7 +1516,7 @@ static int setsockopt(struct socket *sock,
1516 case TIPC_SRC_DROPPABLE: 1516 case TIPC_SRC_DROPPABLE:
1517 if (sock->type != SOCK_STREAM) 1517 if (sock->type != SOCK_STREAM)
1518 res = tipc_set_portunreliable(tsock->p->ref, value); 1518 res = tipc_set_portunreliable(tsock->p->ref, value);
1519 else 1519 else
1520 res = -ENOPROTOOPT; 1520 res = -ENOPROTOOPT;
1521 break; 1521 break;
1522 case TIPC_DEST_DROPPABLE: 1522 case TIPC_DEST_DROPPABLE:
@@ -1540,29 +1540,29 @@ static int setsockopt(struct socket *sock,
1540 * @opt: option identifier 1540 * @opt: option identifier
1541 * @ov: receptacle for option value 1541 * @ov: receptacle for option value
1542 * @ol: receptacle for length of option value 1542 * @ol: receptacle for length of option value
1543 * 1543 *
1544 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 1544 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1545 * (to ease compatibility). 1545 * (to ease compatibility).
1546 * 1546 *
1547 * Returns 0 on success, errno otherwise 1547 * Returns 0 on success, errno otherwise
1548 */ 1548 */
1549 1549
1550static int getsockopt(struct socket *sock, 1550static int getsockopt(struct socket *sock,
1551 int lvl, int opt, char __user *ov, int __user *ol) 1551 int lvl, int opt, char __user *ov, int __user *ol)
1552{ 1552{
1553 struct tipc_sock *tsock = tipc_sk(sock->sk); 1553 struct tipc_sock *tsock = tipc_sk(sock->sk);
1554 int len; 1554 int len;
1555 u32 value; 1555 u32 value;
1556 int res; 1556 int res;
1557 1557
1558 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 1558 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1559 return put_user(0, ol); 1559 return put_user(0, ol);
1560 if (lvl != SOL_TIPC) 1560 if (lvl != SOL_TIPC)
1561 return -ENOPROTOOPT; 1561 return -ENOPROTOOPT;
1562 if ((res = get_user(len, ol))) 1562 if ((res = get_user(len, ol)))
1563 return res; 1563 return res;
1564 1564
1565 if (down_interruptible(&tsock->sem)) 1565 if (down_interruptible(&tsock->sem))
1566 return -ERESTARTSYS; 1566 return -ERESTARTSYS;
1567 1567
1568 switch (opt) { 1568 switch (opt) {
@@ -1595,30 +1595,30 @@ static int getsockopt(struct socket *sock,
1595 res = put_user(sizeof(value), ol); 1595 res = put_user(sizeof(value), ol);
1596 } 1596 }
1597 1597
1598 up(&tsock->sem); 1598 up(&tsock->sem);
1599 return res; 1599 return res;
1600} 1600}
1601 1601
1602/** 1602/**
1603 * Placeholders for non-implemented functionality 1603 * Placeholders for non-implemented functionality
1604 * 1604 *
1605 * Returns error code (POSIX-compliant where defined) 1605 * Returns error code (POSIX-compliant where defined)
1606 */ 1606 */
1607 1607
1608static int ioctl(struct socket *s, u32 cmd, unsigned long arg) 1608static int ioctl(struct socket *s, u32 cmd, unsigned long arg)
1609{ 1609{
1610 return -EINVAL; 1610 return -EINVAL;
1611} 1611}
1612 1612
1613static int no_mmap(struct file *file, struct socket *sock, 1613static int no_mmap(struct file *file, struct socket *sock,
1614 struct vm_area_struct *vma) 1614 struct vm_area_struct *vma)
1615{ 1615{
1616 return -EINVAL; 1616 return -EINVAL;
1617} 1617}
1618static ssize_t no_sendpage(struct socket *sock, struct page *page, 1618static ssize_t no_sendpage(struct socket *sock, struct page *page,
1619 int offset, size_t size, int flags) 1619 int offset, size_t size, int flags)
1620{ 1620{
1621 return -EINVAL; 1621 return -EINVAL;
1622} 1622}
1623 1623
1624static int no_skpair(struct socket *s1, struct socket *s2) 1624static int no_skpair(struct socket *s1, struct socket *s2)
@@ -1647,8 +1647,8 @@ static struct proto_ops msg_ops = {
1647 .getsockopt = getsockopt, 1647 .getsockopt = getsockopt,
1648 .sendmsg = send_msg, 1648 .sendmsg = send_msg,
1649 .recvmsg = recv_msg, 1649 .recvmsg = recv_msg,
1650 .mmap = no_mmap, 1650 .mmap = no_mmap,
1651 .sendpage = no_sendpage 1651 .sendpage = no_sendpage
1652}; 1652};
1653 1653
1654static struct proto_ops packet_ops = { 1654static struct proto_ops packet_ops = {
@@ -1668,8 +1668,8 @@ static struct proto_ops packet_ops = {
1668 .getsockopt = getsockopt, 1668 .getsockopt = getsockopt,
1669 .sendmsg = send_packet, 1669 .sendmsg = send_packet,
1670 .recvmsg = recv_msg, 1670 .recvmsg = recv_msg,
1671 .mmap = no_mmap, 1671 .mmap = no_mmap,
1672 .sendpage = no_sendpage 1672 .sendpage = no_sendpage
1673}; 1673};
1674 1674
1675static struct proto_ops stream_ops = { 1675static struct proto_ops stream_ops = {
@@ -1689,8 +1689,8 @@ static struct proto_ops stream_ops = {
1689 .getsockopt = getsockopt, 1689 .getsockopt = getsockopt,
1690 .sendmsg = send_stream, 1690 .sendmsg = send_stream,
1691 .recvmsg = recv_stream, 1691 .recvmsg = recv_stream,
1692 .mmap = no_mmap, 1692 .mmap = no_mmap,
1693 .sendpage = no_sendpage 1693 .sendpage = no_sendpage
1694}; 1694};
1695 1695
1696static struct net_proto_family tipc_family_ops = { 1696static struct net_proto_family tipc_family_ops = {
@@ -1707,14 +1707,14 @@ static struct proto tipc_proto = {
1707 1707
1708/** 1708/**
1709 * tipc_socket_init - initialize TIPC socket interface 1709 * tipc_socket_init - initialize TIPC socket interface
1710 * 1710 *
1711 * Returns 0 on success, errno otherwise 1711 * Returns 0 on success, errno otherwise
1712 */ 1712 */
1713int tipc_socket_init(void) 1713int tipc_socket_init(void)
1714{ 1714{
1715 int res; 1715 int res;
1716 1716
1717 res = proto_register(&tipc_proto, 1); 1717 res = proto_register(&tipc_proto, 1);
1718 if (res) { 1718 if (res) {
1719 err("Failed to register TIPC protocol type\n"); 1719 err("Failed to register TIPC protocol type\n");
1720 goto out; 1720 goto out;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ddade7388aa0..8c01ccd3626c 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/subscr.c: TIPC subscription service 2 * net/tipc/subscr.c: TIPC subscription service
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -49,10 +49,10 @@
49 * @port_ref: object reference to port used to communicate with subscriber 49 * @port_ref: object reference to port used to communicate with subscriber
50 * @swap: indicates if subscriber uses opposite endianness in its messages 50 * @swap: indicates if subscriber uses opposite endianness in its messages
51 */ 51 */
52 52
53struct subscriber { 53struct subscriber {
54 u32 ref; 54 u32 ref;
55 spinlock_t *lock; 55 spinlock_t *lock;
56 struct list_head subscriber_list; 56 struct list_head subscriber_list;
57 struct list_head subscription_list; 57 struct list_head subscription_list;
58 u32 port_ref; 58 u32 port_ref;
@@ -82,7 +82,7 @@ static struct top_srv topsrv = { 0 };
82 * htohl - convert value to endianness used by destination 82 * htohl - convert value to endianness used by destination
83 * @in: value to convert 83 * @in: value to convert
84 * @swap: non-zero if endianness must be reversed 84 * @swap: non-zero if endianness must be reversed
85 * 85 *
86 * Returns converted value 86 * Returns converted value
87 */ 87 */
88 88
@@ -97,11 +97,11 @@ static u32 htohl(u32 in, int swap)
97 * subscr_send_event - send a message containing a tipc_event to the subscriber 97 * subscr_send_event - send a message containing a tipc_event to the subscriber
98 */ 98 */
99 99
100static void subscr_send_event(struct subscription *sub, 100static void subscr_send_event(struct subscription *sub,
101 u32 found_lower, 101 u32 found_lower,
102 u32 found_upper, 102 u32 found_upper,
103 u32 event, 103 u32 event,
104 u32 port_ref, 104 u32 port_ref,
105 u32 node) 105 u32 node)
106{ 106{
107 struct iovec msg_sect; 107 struct iovec msg_sect;
@@ -123,8 +123,8 @@ static void subscr_send_event(struct subscription *sub,
123 * Returns 1 if there is overlap, otherwise 0. 123 * Returns 1 if there is overlap, otherwise 0.
124 */ 124 */
125 125
126int tipc_subscr_overlap(struct subscription *sub, 126int tipc_subscr_overlap(struct subscription *sub,
127 u32 found_lower, 127 u32 found_lower,
128 u32 found_upper) 128 u32 found_upper)
129 129
130{ 130{
@@ -139,15 +139,15 @@ int tipc_subscr_overlap(struct subscription *sub,
139 139
140/** 140/**
141 * tipc_subscr_report_overlap - issue event if there is subscription overlap 141 * tipc_subscr_report_overlap - issue event if there is subscription overlap
142 * 142 *
143 * Protected by nameseq.lock in name_table.c 143 * Protected by nameseq.lock in name_table.c
144 */ 144 */
145 145
146void tipc_subscr_report_overlap(struct subscription *sub, 146void tipc_subscr_report_overlap(struct subscription *sub,
147 u32 found_lower, 147 u32 found_lower,
148 u32 found_upper, 148 u32 found_upper,
149 u32 event, 149 u32 event,
150 u32 port_ref, 150 u32 port_ref,
151 u32 node, 151 u32 node,
152 int must) 152 int must)
153{ 153{
@@ -189,11 +189,11 @@ static void subscr_timeout(struct subscription *sub)
189 189
190 /* Notify subscriber of timeout, then unlink subscription */ 190 /* Notify subscriber of timeout, then unlink subscription */
191 191
192 subscr_send_event(sub, 192 subscr_send_event(sub,
193 sub->evt.s.seq.lower, 193 sub->evt.s.seq.lower,
194 sub->evt.s.seq.upper, 194 sub->evt.s.seq.upper,
195 TIPC_SUBSCR_TIMEOUT, 195 TIPC_SUBSCR_TIMEOUT,
196 0, 196 0,
197 0); 197 0);
198 list_del(&sub->subscription_list); 198 list_del(&sub->subscription_list);
199 199
@@ -221,11 +221,11 @@ static void subscr_del(struct subscription *sub)
221 221
222/** 222/**
223 * subscr_terminate - terminate communication with a subscriber 223 * subscr_terminate - terminate communication with a subscriber
224 * 224 *
225 * Called with subscriber locked. Routine must temporarily release this lock 225 * Called with subscriber locked. Routine must temporarily release this lock
226 * to enable subscription timeout routine(s) to finish without deadlocking; 226 * to enable subscription timeout routine(s) to finish without deadlocking;
227 * the lock is then reclaimed to allow caller to release it upon return. 227 * the lock is then reclaimed to allow caller to release it upon return.
228 * (This should work even in the unlikely event some other thread creates 228 * (This should work even in the unlikely event some other thread creates
229 * a new object reference in the interim that uses this lock; this routine will 229 * a new object reference in the interim that uses this lock; this routine will
230 * simply wait for it to be released, then claim it.) 230 * simply wait for it to be released, then claim it.)
231 */ 231 */
@@ -241,7 +241,7 @@ static void subscr_terminate(struct subscriber *subscriber)
241 spin_unlock_bh(subscriber->lock); 241 spin_unlock_bh(subscriber->lock);
242 242
243 /* Destroy any existing subscriptions for subscriber */ 243 /* Destroy any existing subscriptions for subscriber */
244 244
245 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 245 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
246 subscription_list) { 246 subscription_list) {
247 if (sub->timeout != TIPC_WAIT_FOREVER) { 247 if (sub->timeout != TIPC_WAIT_FOREVER) {
@@ -315,7 +315,7 @@ static void subscr_cancel(struct tipc_subscr *s,
315 315
316/** 316/**
317 * subscr_subscribe - create subscription for subscriber 317 * subscr_subscribe - create subscription for subscriber
318 * 318 *
319 * Called with subscriber locked 319 * Called with subscriber locked
320 */ 320 */
321 321
@@ -431,7 +431,7 @@ static void subscr_conn_msg_event(void *usr_handle,
431 subscr_terminate(subscriber); 431 subscr_terminate(subscriber);
432 else 432 else
433 subscr_subscribe((struct tipc_subscr *)data, subscriber); 433 subscr_subscribe((struct tipc_subscr *)data, subscriber);
434 434
435 spin_unlock_bh(subscriber_lock); 435 spin_unlock_bh(subscriber_lock);
436} 436}
437 437
@@ -444,7 +444,7 @@ static void subscr_named_msg_event(void *usr_handle,
444 struct sk_buff **buf, 444 struct sk_buff **buf,
445 const unchar *data, 445 const unchar *data,
446 u32 size, 446 u32 size,
447 u32 importance, 447 u32 importance,
448 struct tipc_portid const *orig, 448 struct tipc_portid const *orig,
449 struct tipc_name_seq const *dest) 449 struct tipc_name_seq const *dest)
450{ 450{
@@ -534,22 +534,22 @@ int tipc_subscr_start(void)
534 return res; 534 return res;
535 } 535 }
536 536
537 res = tipc_createport(topsrv.user_ref, 537 res = tipc_createport(topsrv.user_ref,
538 NULL, 538 NULL,
539 TIPC_CRITICAL_IMPORTANCE, 539 TIPC_CRITICAL_IMPORTANCE,
540 NULL, 540 NULL,
541 NULL, 541 NULL,
542 NULL, 542 NULL,
543 NULL, 543 NULL,
544 subscr_named_msg_event, 544 subscr_named_msg_event,
545 NULL, 545 NULL,
546 NULL, 546 NULL,
547 &topsrv.setup_port); 547 &topsrv.setup_port);
548 if (res) 548 if (res)
549 goto failed; 549 goto failed;
550 550
551 res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq); 551 res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
552 if (res) 552 if (res)
553 goto failed; 553 goto failed;
554 554
555 spin_unlock_bh(&topsrv.lock); 555 spin_unlock_bh(&topsrv.lock);
@@ -571,7 +571,7 @@ void tipc_subscr_stop(void)
571 571
572 if (topsrv.user_ref) { 572 if (topsrv.user_ref) {
573 tipc_deleteport(topsrv.setup_port); 573 tipc_deleteport(topsrv.setup_port);
574 list_for_each_entry_safe(subscriber, subscriber_temp, 574 list_for_each_entry_safe(subscriber, subscriber_temp,
575 &topsrv.subscriber_list, 575 &topsrv.subscriber_list,
576 subscriber_list) { 576 subscriber_list) {
577 tipc_ref_lock(subscriber->ref); 577 tipc_ref_lock(subscriber->ref);
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 1e5090465d2e..93a8e674fac1 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/subscr.h: Include file for TIPC subscription service 2 * net/tipc/subscr.h: Include file for TIPC subscription service
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -48,7 +48,7 @@
48 * @timer_ref: reference to timer governing subscription duration (may be NULL) 48 * @timer_ref: reference to timer governing subscription duration (may be NULL)
49 * @owner: pointer to subscriber object associated with this subscription 49 * @owner: pointer to subscriber object associated with this subscription
50 */ 50 */
51 51
52struct subscription { 52struct subscription {
53 struct tipc_name_seq seq; 53 struct tipc_name_seq seq;
54 u32 timeout; 54 u32 timeout;
@@ -60,15 +60,15 @@ struct subscription {
60 struct subscriber *owner; 60 struct subscriber *owner;
61}; 61};
62 62
63int tipc_subscr_overlap(struct subscription * sub, 63int tipc_subscr_overlap(struct subscription * sub,
64 u32 found_lower, 64 u32 found_lower,
65 u32 found_upper); 65 u32 found_upper);
66 66
67void tipc_subscr_report_overlap(struct subscription * sub, 67void tipc_subscr_report_overlap(struct subscription * sub,
68 u32 found_lower, 68 u32 found_lower,
69 u32 found_upper, 69 u32 found_upper,
70 u32 event, 70 u32 event,
71 u32 port_ref, 71 u32 port_ref,
72 u32 node, 72 u32 node,
73 int must_report); 73 int must_report);
74 74
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 04d1b9be9c51..4146c40cd20b 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/user_reg.c: TIPC user registry code 2 * net/tipc/user_reg.c: TIPC user registry code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -40,7 +40,7 @@
40/* 40/*
41 * TIPC user registry keeps track of users of the tipc_port interface. 41 * TIPC user registry keeps track of users of the tipc_port interface.
42 * 42 *
43 * The registry utilizes an array of "TIPC user" entries; 43 * The registry utilizes an array of "TIPC user" entries;
44 * a user's ID is the index of their associated array entry. 44 * a user's ID is the index of their associated array entry.
45 * Array entry 0 is not used, so userid 0 is not valid; 45 * Array entry 0 is not used, so userid 0 is not valid;
46 * TIPC sometimes uses this value to denote an anonymous user. 46 * TIPC sometimes uses this value to denote an anonymous user.
@@ -51,7 +51,7 @@
51 * struct tipc_user - registered TIPC user info 51 * struct tipc_user - registered TIPC user info
52 * @next: index of next free registry entry (or -1 for an allocated entry) 52 * @next: index of next free registry entry (or -1 for an allocated entry)
53 * @callback: ptr to routine to call when TIPC mode changes (NULL if none) 53 * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
54 * @usr_handle: user-defined value passed to callback routine 54 * @usr_handle: user-defined value passed to callback routine
55 * @ports: list of user ports owned by the user 55 * @ports: list of user ports owned by the user
56 */ 56 */
57 57
@@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(reg_lock);
71 71
72/** 72/**
73 * reg_init - create TIPC user registry (but don't activate it) 73 * reg_init - create TIPC user registry (but don't activate it)
74 * 74 *
75 * If registry has been pre-initialized it is left "as is". 75 * If registry has been pre-initialized it is left "as is".
76 * NOTE: This routine may be called when TIPC is inactive. 76 * NOTE: This routine may be called when TIPC is inactive.
77 */ 77 */
@@ -79,7 +79,7 @@ static DEFINE_SPINLOCK(reg_lock);
79static int reg_init(void) 79static int reg_init(void)
80{ 80{
81 u32 i; 81 u32 i;
82 82
83 spin_lock_bh(&reg_lock); 83 spin_lock_bh(&reg_lock);
84 if (!users) { 84 if (!users) {
85 users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC); 85 users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
@@ -137,7 +137,7 @@ int tipc_reg_start(void)
137 */ 137 */
138 138
139void tipc_reg_stop(void) 139void tipc_reg_stop(void)
140{ 140{
141 int id; 141 int id;
142 142
143 if (!users) 143 if (!users)
@@ -174,14 +174,14 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
174 user_ptr = &users[next_free_user]; 174 user_ptr = &users[next_free_user];
175 *userid = next_free_user; 175 *userid = next_free_user;
176 next_free_user = user_ptr->next; 176 next_free_user = user_ptr->next;
177 user_ptr->next = -1; 177 user_ptr->next = -1;
178 spin_unlock_bh(&reg_lock); 178 spin_unlock_bh(&reg_lock);
179 179
180 user_ptr->callback = cb; 180 user_ptr->callback = cb;
181 user_ptr->usr_handle = usr_handle; 181 user_ptr->usr_handle = usr_handle;
182 INIT_LIST_HEAD(&user_ptr->ports); 182 INIT_LIST_HEAD(&user_ptr->ports);
183 atomic_inc(&tipc_user_count); 183 atomic_inc(&tipc_user_count);
184 184
185 if (cb && (tipc_mode != TIPC_NOT_RUNNING)) 185 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
186 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr); 186 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
187 return TIPC_OK; 187 return TIPC_OK;
@@ -207,16 +207,16 @@ void tipc_detach(u32 userid)
207 } 207 }
208 208
209 user_ptr = &users[userid]; 209 user_ptr = &users[userid];
210 user_ptr->callback = NULL; 210 user_ptr->callback = NULL;
211 INIT_LIST_HEAD(&ports_temp); 211 INIT_LIST_HEAD(&ports_temp);
212 list_splice(&user_ptr->ports, &ports_temp); 212 list_splice(&user_ptr->ports, &ports_temp);
213 user_ptr->next = next_free_user; 213 user_ptr->next = next_free_user;
214 next_free_user = userid; 214 next_free_user = userid;
215 spin_unlock_bh(&reg_lock); 215 spin_unlock_bh(&reg_lock);
216 216
217 atomic_dec(&tipc_user_count); 217 atomic_dec(&tipc_user_count);
218 218
219 list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) { 219 list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
220 tipc_deleteport(up_ptr->ref); 220 tipc_deleteport(up_ptr->ref);
221 } 221 }
222} 222}
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
index d0e88794ed1b..81dc12e2882f 100644
--- a/net/tipc/user_reg.h
+++ b/net/tipc/user_reg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/user_reg.h: Include file for TIPC user registry code 2 * net/tipc/user_reg.h: Include file for TIPC user registry code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index f5b00ea2d5ac..114e173f11a5 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/zone.c: TIPC zone management routines 2 * net/tipc/zone.c: TIPC zone management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -92,7 +92,7 @@ void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router)
92 92
93 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) { 93 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
94 if (z_ptr->clusters[c_num]) { 94 if (z_ptr->clusters[c_num]) {
95 tipc_cltr_remove_as_router(z_ptr->clusters[c_num], 95 tipc_cltr_remove_as_router(z_ptr->clusters[c_num],
96 router); 96 router);
97 } 97 }
98 } 98 }
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
index 5ab3d08602e2..6e7a08df8af5 100644
--- a/net/tipc/zone.h
+++ b/net/tipc/zone.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/zone.h: Include file for TIPC zone management routines 2 * net/tipc/zone.h: Include file for TIPC zone management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
@@ -47,7 +47,7 @@
47 * @clusters: array of pointers to all clusters within zone 47 * @clusters: array of pointers to all clusters within zone
48 * @links: number of (unicast) links to zone 48 * @links: number of (unicast) links to zone
49 */ 49 */
50 50
51struct _zone { 51struct _zone {
52 u32 addr; 52 u32 addr;
53 struct cluster *clusters[2]; /* currently limited to just 1 cluster */ 53 struct cluster *clusters[2]; /* currently limited to just 1 cluster */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2f208c7f4d43..ac9478d0ca8b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -195,7 +195,7 @@ static inline void unix_release_addr(struct unix_address *addr)
195 * - if started by not zero, should be NULL terminated (FS object) 195 * - if started by not zero, should be NULL terminated (FS object)
196 * - if started by zero, it is abstract name. 196 * - if started by zero, it is abstract name.
197 */ 197 */
198 198
199static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp) 199static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
200{ 200{
201 if (len <= sizeof(short) || len > sizeof(*sunaddr)) 201 if (len <= sizeof(short) || len > sizeof(*sunaddr))
@@ -432,7 +432,7 @@ static int unix_release_sock (struct sock *sk, int embrion)
432 */ 432 */
433 433
434 if (atomic_read(&unix_tot_inflight)) 434 if (atomic_read(&unix_tot_inflight))
435 unix_gc(); /* Garbage collect fds */ 435 unix_gc(); /* Garbage collect fds */
436 436
437 return 0; 437 return 0;
438} 438}
@@ -698,7 +698,7 @@ static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
698 struct sock *u; 698 struct sock *u;
699 struct nameidata nd; 699 struct nameidata nd;
700 int err = 0; 700 int err = 0;
701 701
702 if (sunname->sun_path[0]) { 702 if (sunname->sun_path[0]) {
703 err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd); 703 err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
704 if (err) 704 if (err)
@@ -915,7 +915,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
915 unix_peer(sk)=other; 915 unix_peer(sk)=other;
916 unix_state_wunlock(sk); 916 unix_state_wunlock(sk);
917 } 917 }
918 return 0; 918 return 0;
919 919
920out_unlock: 920out_unlock:
921 unix_state_wunlock(sk); 921 unix_state_wunlock(sk);
@@ -1021,7 +1021,7 @@ restart:
1021 goto out; 1021 goto out;
1022 sock_put(other); 1022 sock_put(other);
1023 goto restart; 1023 goto restart;
1024 } 1024 }
1025 1025
1026 /* Latch our state. 1026 /* Latch our state.
1027 1027
@@ -1415,7 +1415,7 @@ out:
1415 return err; 1415 return err;
1416} 1416}
1417 1417
1418 1418
1419static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, 1419static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1420 struct msghdr *msg, size_t len) 1420 struct msghdr *msg, size_t len)
1421{ 1421{
@@ -1467,11 +1467,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1467 1467
1468 if (size > SKB_MAX_ALLOC) 1468 if (size > SKB_MAX_ALLOC)
1469 size = SKB_MAX_ALLOC; 1469 size = SKB_MAX_ALLOC;
1470 1470
1471 /* 1471 /*
1472 * Grab a buffer 1472 * Grab a buffer
1473 */ 1473 */
1474 1474
1475 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err); 1475 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1476 1476
1477 if (skb==NULL) 1477 if (skb==NULL)
@@ -1530,7 +1530,7 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1530{ 1530{
1531 int err; 1531 int err;
1532 struct sock *sk = sock->sk; 1532 struct sock *sk = sock->sk;
1533 1533
1534 err = sock_error(sk); 1534 err = sock_error(sk);
1535 if (err) 1535 if (err)
1536 return err; 1536 return err;
@@ -1543,7 +1543,7 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1543 1543
1544 return unix_dgram_sendmsg(kiocb, sock, msg, len); 1544 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1545} 1545}
1546 1546
1547static void unix_copy_addr(struct msghdr *msg, struct sock *sk) 1547static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1548{ 1548{
1549 struct unix_sock *u = unix_sk(sk); 1549 struct unix_sock *u = unix_sk(sk);
@@ -1605,7 +1605,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1605 if (UNIXCB(skb).fp) 1605 if (UNIXCB(skb).fp)
1606 unix_detach_fds(siocb->scm, skb); 1606 unix_detach_fds(siocb->scm, skb);
1607 } 1607 }
1608 else 1608 else
1609 { 1609 {
1610 /* It is questionable: on PEEK we could: 1610 /* It is questionable: on PEEK we could:
1611 - do not return fds - good, but too simple 8) 1611 - do not return fds - good, but too simple 8)
@@ -1613,11 +1613,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1613 apparently wrong) 1613 apparently wrong)
1614 - clone fds (I chose it for now, it is the most universal 1614 - clone fds (I chose it for now, it is the most universal
1615 solution) 1615 solution)
1616 1616
1617 POSIX 1003.1g does not actually define this clearly 1617 POSIX 1003.1g does not actually define this clearly
1618 at all. POSIX 1003.1g doesn't define a lot of things 1618 at all. POSIX 1003.1g doesn't define a lot of things
1619 clearly however! 1619 clearly however!
1620 1620
1621 */ 1621 */
1622 if (UNIXCB(skb).fp) 1622 if (UNIXCB(skb).fp)
1623 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1623 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
@@ -1637,7 +1637,7 @@ out:
1637/* 1637/*
1638 * Sleep until data has arrive. But check for races.. 1638 * Sleep until data has arrive. But check for races..
1639 */ 1639 */
1640 1640
1641static long unix_stream_data_wait(struct sock * sk, long timeo) 1641static long unix_stream_data_wait(struct sock * sk, long timeo)
1642{ 1642{
1643 DEFINE_WAIT(wait); 1643 DEFINE_WAIT(wait);
@@ -1721,7 +1721,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1721 /* 1721 /*
1722 * POSIX 1003.1g mandates this order. 1722 * POSIX 1003.1g mandates this order.
1723 */ 1723 */
1724 1724
1725 if ((err = sock_error(sk)) != 0) 1725 if ((err = sock_error(sk)) != 0)
1726 break; 1726 break;
1727 if (sk->sk_shutdown & RCV_SHUTDOWN) 1727 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -1937,7 +1937,7 @@ static struct sock *unix_seq_idx(int *iter, loff_t pos)
1937 struct sock *s; 1937 struct sock *s;
1938 1938
1939 for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) { 1939 for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
1940 if (off == pos) 1940 if (off == pos)
1941 return s; 1941 return s;
1942 ++off; 1942 ++off;
1943 } 1943 }
@@ -1955,7 +1955,7 @@ static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1955{ 1955{
1956 ++*pos; 1956 ++*pos;
1957 1957
1958 if (v == (void *)1) 1958 if (v == (void *)1)
1959 return first_unix_socket(seq->private); 1959 return first_unix_socket(seq->private);
1960 return next_unix_socket(seq->private, v); 1960 return next_unix_socket(seq->private, v);
1961} 1961}
@@ -1967,7 +1967,7 @@ static void unix_seq_stop(struct seq_file *seq, void *v)
1967 1967
1968static int unix_seq_show(struct seq_file *seq, void *v) 1968static int unix_seq_show(struct seq_file *seq, void *v)
1969{ 1969{
1970 1970
1971 if (v == (void *)1) 1971 if (v == (void *)1)
1972 seq_puts(seq, "Num RefCount Protocol Flags Type St " 1972 seq_puts(seq, "Num RefCount Protocol Flags Type St "
1973 "Inode Path\n"); 1973 "Inode Path\n");
@@ -2064,8 +2064,8 @@ static int __init af_unix_init(void)
2064 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)); 2064 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2065 2065
2066 rc = proto_register(&unix_proto, 1); 2066 rc = proto_register(&unix_proto, 1);
2067 if (rc != 0) { 2067 if (rc != 0) {
2068 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n", 2068 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2069 __FUNCTION__); 2069 __FUNCTION__);
2070 goto out; 2070 goto out;
2071 } 2071 }
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index f14ad6635fcc..f8bcf5d114d9 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -63,7 +63,7 @@
63 * Damn. Added missing check for ->dead in listen queues scanning. 63 * Damn. Added missing check for ->dead in listen queues scanning.
64 * 64 *
65 */ 65 */
66 66
67#include <linux/kernel.h> 67#include <linux/kernel.h>
68#include <linux/sched.h> 68#include <linux/sched.h>
69#include <linux/string.h> 69#include <linux/string.h>
@@ -118,7 +118,7 @@ static struct sock *unix_get_socket(struct file *filp)
118 * Keep the number of times in flight count for the file 118 * Keep the number of times in flight count for the file
119 * descriptor if it is for an AF_UNIX socket. 119 * descriptor if it is for an AF_UNIX socket.
120 */ 120 */
121 121
122void unix_inflight(struct file *fp) 122void unix_inflight(struct file *fp)
123{ 123{
124 struct sock *s = unix_get_socket(fp); 124 struct sock *s = unix_get_socket(fp);
@@ -190,7 +190,7 @@ void unix_gc(void)
190 unix_sk(s)->gc_tree = GC_ORPHAN; 190 unix_sk(s)->gc_tree = GC_ORPHAN;
191 } 191 }
192 /* 192 /*
193 * Everything is now marked 193 * Everything is now marked
194 */ 194 */
195 195
196 /* Invariant to be maintained: 196 /* Invariant to be maintained:
@@ -227,7 +227,7 @@ void unix_gc(void)
227 } 227 }
228 228
229 /* 229 /*
230 * Mark phase 230 * Mark phase
231 */ 231 */
232 232
233 while (!empty_stack()) 233 while (!empty_stack())
@@ -237,11 +237,11 @@ void unix_gc(void)
237 237
238 spin_lock(&x->sk_receive_queue.lock); 238 spin_lock(&x->sk_receive_queue.lock);
239 skb = skb_peek(&x->sk_receive_queue); 239 skb = skb_peek(&x->sk_receive_queue);
240 240
241 /* 241 /*
242 * Loop through all but first born 242 * Loop through all but first born
243 */ 243 */
244 244
245 while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) { 245 while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) {
246 /* 246 /*
247 * Do we have file descriptors ? 247 * Do we have file descriptors ?
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index c2059733e15a..41d7e32be70d 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -11,11 +11,11 @@
11* 2 of the License, or (at your option) any later version. 11* 2 of the License, or (at your option) any later version.
12* ============================================================================ 12* ============================================================================
13* Due Credit: 13* Due Credit:
14* Wanpipe socket layer is based on Packet and 14* Wanpipe socket layer is based on Packet and
15* the X25 socket layers. The above sockets were 15* the X25 socket layers. The above sockets were
16* used for the specific use of Sangoma Technologies 16* used for the specific use of Sangoma Technologies
17* API programs. 17* API programs.
18* Packet socket Authors: Ross Biro, Fred N. van Kempen and 18* Packet socket Authors: Ross Biro, Fred N. van Kempen and
19* Alan Cox. 19* Alan Cox.
20* X25 socket Author: Jonathan Naylor. 20* X25 socket Author: Jonathan Naylor.
21* ============================================================================ 21* ============================================================================
@@ -28,7 +28,7 @@
28* Feb 29, 2000 Nenad Corbic o Added support for PVC protocols, such as 28* Feb 29, 2000 Nenad Corbic o Added support for PVC protocols, such as
29* CHDLC, Frame Relay and HDLC API. 29* CHDLC, Frame Relay and HDLC API.
30* Jan 17, 2000 Nenad Corbic o Initial version, based on AF_PACKET socket. 30* Jan 17, 2000 Nenad Corbic o Initial version, based on AF_PACKET socket.
31* X25API support only. 31* X25API support only.
32* 32*
33******************************************************************************/ 33******************************************************************************/
34 34
@@ -71,33 +71,33 @@
71 #define DBG_PRINTK(format, a...) printk(format, ## a) 71 #define DBG_PRINTK(format, a...) printk(format, ## a)
72#else 72#else
73 #define DBG_PRINTK(format, a...) 73 #define DBG_PRINTK(format, a...)
74#endif 74#endif
75 75
76 76
77/* SECURE SOCKET IMPLEMENTATION 77/* SECURE SOCKET IMPLEMENTATION
78 * 78 *
79 * TRANSMIT: 79 * TRANSMIT:
80 * 80 *
81 * When the user sends a packet via send() system call 81 * When the user sends a packet via send() system call
82 * the wanpipe_sendmsg() function is executed. 82 * the wanpipe_sendmsg() function is executed.
83 * 83 *
84 * Each packet is enqueud into sk->sk_write_queue transmit 84 * Each packet is enqueud into sk->sk_write_queue transmit
85 * queue. When the packet is enqueued, a delayed transmit 85 * queue. When the packet is enqueued, a delayed transmit
86 * timer is triggerd which acts as a Bottom Half hander. 86 * timer is triggerd which acts as a Bottom Half hander.
87 * 87 *
88 * wanpipe_delay_transmit() function (BH), dequeues packets 88 * wanpipe_delay_transmit() function (BH), dequeues packets
89 * from the sk->sk_write_queue transmit queue and sends it 89 * from the sk->sk_write_queue transmit queue and sends it
90 * to the deriver via dev->hard_start_xmit(skb, dev) function. 90 * to the deriver via dev->hard_start_xmit(skb, dev) function.
91 * Note, this function is actual a function pointer of if_send() 91 * Note, this function is actual a function pointer of if_send()
92 * routine in the wanpipe driver. 92 * routine in the wanpipe driver.
93 * 93 *
94 * X25API GUARANTEED DELIVERY: 94 * X25API GUARANTEED DELIVERY:
95 * 95 *
96 * In order to provide 100% guaranteed packet delivery, 96 * In order to provide 100% guaranteed packet delivery,
97 * an atomic 'packet_sent' counter is implemented. Counter 97 * an atomic 'packet_sent' counter is implemented. Counter
98 * is incremented for each packet enqueued 98 * is incremented for each packet enqueued
99 * into sk->sk_write_queue. Counter is decremented each 99 * into sk->sk_write_queue. Counter is decremented each
100 * time wanpipe_delayed_transmit() function successfuly 100 * time wanpipe_delayed_transmit() function successfuly
101 * passes the packet to the driver. Before each send(), a poll 101 * passes the packet to the driver. Before each send(), a poll
102 * routine checks the sock resources The maximum value of 102 * routine checks the sock resources The maximum value of
103 * packet sent counter is 1, thus if one packet is queued, the 103 * packet sent counter is 1, thus if one packet is queued, the
@@ -110,11 +110,11 @@
110 * function, wanpipe_rcv() to queue the incoming packets 110 * function, wanpipe_rcv() to queue the incoming packets
111 * into an AF_WANPIPE socket queue. Based on wanpipe_rcv() 111 * into an AF_WANPIPE socket queue. Based on wanpipe_rcv()
112 * return code, the driver knows whether the packet was 112 * return code, the driver knows whether the packet was
113 * successfully queued. If the socket queue is full, 113 * successfully queued. If the socket queue is full,
114 * protocol flow control is used by the driver, if any, 114 * protocol flow control is used by the driver, if any,
115 * to slow down the traffic until the sock queue is free. 115 * to slow down the traffic until the sock queue is free.
116 * 116 *
117 * Every time a packet arrives into a socket queue the 117 * Every time a packet arrives into a socket queue the
118 * socket wakes up processes which are waiting to receive 118 * socket wakes up processes which are waiting to receive
119 * data. 119 * data.
120 * 120 *
@@ -122,12 +122,12 @@
122 * bit which signals the socket to kick the wanpipe driver 122 * bit which signals the socket to kick the wanpipe driver
123 * bottom half hander when the socket queue is partialy 123 * bottom half hander when the socket queue is partialy
124 * empty. wanpipe_recvmsg() function performs this action. 124 * empty. wanpipe_recvmsg() function performs this action.
125 * 125 *
126 * In case of x25api, packets will never be dropped, since 126 * In case of x25api, packets will never be dropped, since
127 * flow control is available. 127 * flow control is available.
128 * 128 *
129 * In case of streaming protocols like CHDLC, packets will 129 * In case of streaming protocols like CHDLC, packets will
130 * be dropped but the statistics will be generated. 130 * be dropped but the statistics will be generated.
131 */ 131 */
132 132
133 133
@@ -170,11 +170,11 @@ struct wanpipe_opt
170 struct net_device *dev; /* Bounded device */ 170 struct net_device *dev; /* Bounded device */
171 unsigned short lcn; /* Binded LCN */ 171 unsigned short lcn; /* Binded LCN */
172 unsigned char svc; /* 0=pvc, 1=svc */ 172 unsigned char svc; /* 0=pvc, 1=svc */
173 unsigned char timer; /* flag for delayed transmit*/ 173 unsigned char timer; /* flag for delayed transmit*/
174 struct timer_list tx_timer; 174 struct timer_list tx_timer;
175 unsigned poll_cnt; 175 unsigned poll_cnt;
176 unsigned char force; /* Used to force sock release */ 176 unsigned char force; /* Used to force sock release */
177 atomic_t packet_sent; 177 atomic_t packet_sent;
178}; 178};
179#endif 179#endif
180 180
@@ -215,8 +215,8 @@ static int check_driver_busy (struct sock *);
215 * 215 *
216 * Wanpipe socket bottom half handler. This function 216 * Wanpipe socket bottom half handler. This function
217 * is called by the WANPIPE device drivers to queue a 217 * is called by the WANPIPE device drivers to queue a
218 * incoming packet into the socket receive queue. 218 * incoming packet into the socket receive queue.
219 * Once the packet is queued, all processes waiting to 219 * Once the packet is queued, all processes waiting to
220 * read are woken up. 220 * read are woken up.
221 * 221 *
222 * During socket bind, this function is bounded into 222 * During socket bind, this function is bounded into
@@ -245,13 +245,13 @@ static int wanpipe_rcv(struct sk_buff *skb, struct net_device *dev,
245 if (dev->hard_header_parse) 245 if (dev->hard_header_parse)
246 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr); 246 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
247 247
248 /* 248 /*
249 * WAN_PACKET_DATA : Data which should be passed up the receive queue. 249 * WAN_PACKET_DATA : Data which should be passed up the receive queue.
250 * WAN_PACKET_ASYC : Asynchronous data like place call, which should 250 * WAN_PACKET_ASYC : Asynchronous data like place call, which should
251 * be passed up the listening sock. 251 * be passed up the listening sock.
252 * WAN_PACKET_ERR : Asynchronous data like clear call or restart 252 * WAN_PACKET_ERR : Asynchronous data like clear call or restart
253 * which should go into an error queue. 253 * which should go into an error queue.
254 */ 254 */
255 switch (skb->pkt_type){ 255 switch (skb->pkt_type){
256 256
257 case WAN_PACKET_DATA: 257 case WAN_PACKET_DATA:
@@ -261,10 +261,10 @@ static int wanpipe_rcv(struct sk_buff *skb, struct net_device *dev,
261 break; 261 break;
262 case WAN_PACKET_CMD: 262 case WAN_PACKET_CMD:
263 sk->sk_state = chan->state; 263 sk->sk_state = chan->state;
264 /* Bug fix: update Mar6. 264 /* Bug fix: update Mar6.
265 * Do not set the sock lcn number here, since 265 * Do not set the sock lcn number here, since
266 * cmd is not guaranteed to be executed on the 266 * cmd is not guaranteed to be executed on the
267 * board, thus Lcn could be wrong */ 267 * board, thus Lcn could be wrong */
268 sk->sk_data_ready(sk, skb->len); 268 sk->sk_data_ready(sk, skb->len);
269 kfree_skb(skb); 269 kfree_skb(skb);
270 break; 270 break;
@@ -276,7 +276,7 @@ static int wanpipe_rcv(struct sk_buff *skb, struct net_device *dev,
276 break; 276 break;
277 default: 277 default:
278 printk(KERN_INFO "wansock: BH Illegal Packet Type Dropping\n"); 278 printk(KERN_INFO "wansock: BH Illegal Packet Type Dropping\n");
279 kfree_skb(skb); 279 kfree_skb(skb);
280 break; 280 break;
281 } 281 }
282 282
@@ -297,20 +297,20 @@ static int wanpipe_rcv(struct sk_buff *skb, struct net_device *dev,
297 * 297 *
298 * Wanpipe LISTEN socket bottom half handler. This function 298 * Wanpipe LISTEN socket bottom half handler. This function
299 * is called by the WANPIPE device drivers to queue an 299 * is called by the WANPIPE device drivers to queue an
300 * incoming call into the socket listening queue. 300 * incoming call into the socket listening queue.
301 * Once the packet is queued, the waiting accept() process 301 * Once the packet is queued, the waiting accept() process
302 * is woken up. 302 * is woken up.
303 * 303 *
304 * During socket bind, this function is bounded into 304 * During socket bind, this function is bounded into
305 * WANPIPE driver private. 305 * WANPIPE driver private.
306 * 306 *
307 * IMPORTANT NOTE: 307 * IMPORTANT NOTE:
308 * The accept call() is waiting for an skb packet 308 * The accept call() is waiting for an skb packet
309 * which contains a pointer to a device structure. 309 * which contains a pointer to a device structure.
310 * 310 *
311 * When we do a bind to a device structre, we 311 * When we do a bind to a device structre, we
312 * bind a newly created socket into "chan->sk". Thus, 312 * bind a newly created socket into "chan->sk". Thus,
313 * when accept receives the skb packet, it will know 313 * when accept receives the skb packet, it will know
314 * from which dev it came form, and in turn it will know 314 * from which dev it came form, and in turn it will know
315 * the address of the new sock. 315 * the address of the new sock.
316 * 316 *
@@ -322,31 +322,31 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
322 wanpipe_opt *wp = wp_sk(sk), *newwp; 322 wanpipe_opt *wp = wp_sk(sk), *newwp;
323 struct wan_sockaddr_ll *sll = (struct wan_sockaddr_ll*)skb->cb; 323 struct wan_sockaddr_ll *sll = (struct wan_sockaddr_ll*)skb->cb;
324 struct sock *newsk; 324 struct sock *newsk;
325 struct net_device *dev; 325 struct net_device *dev;
326 sdla_t *card; 326 sdla_t *card;
327 mbox_cmd_t *mbox_ptr; 327 mbox_cmd_t *mbox_ptr;
328 wanpipe_common_t *chan; 328 wanpipe_common_t *chan;
329 329
330 /* Find a free device, if none found, all svc's are busy 330 /* Find a free device, if none found, all svc's are busy
331 */ 331 */
332 332
333 card = (sdla_t*)wp->card; 333 card = (sdla_t*)wp->card;
334 if (!card){ 334 if (!card){
335 printk(KERN_INFO "wansock: LISTEN ERROR, No Card\n"); 335 printk(KERN_INFO "wansock: LISTEN ERROR, No Card\n");
336 return -ENODEV; 336 return -ENODEV;
337 } 337 }
338 338
339 dev = wanpipe_find_free_dev(card); 339 dev = wanpipe_find_free_dev(card);
340 if (!dev){ 340 if (!dev){
341 printk(KERN_INFO "wansock: LISTEN ERROR, No Free Device\n"); 341 printk(KERN_INFO "wansock: LISTEN ERROR, No Free Device\n");
342 return -ENODEV; 342 return -ENODEV;
343 } 343 }
344 344
345 chan=dev->priv; 345 chan=dev->priv;
346 chan->state = WANSOCK_CONNECTING; 346 chan->state = WANSOCK_CONNECTING;
347 347
348 /* Allocate a new sock, which accept will bind 348 /* Allocate a new sock, which accept will bind
349 * and pass up to the user 349 * and pass up to the user
350 */ 350 */
351 if ((newsk = wanpipe_make_new(sk)) == NULL){ 351 if ((newsk = wanpipe_make_new(sk)) == NULL){
352 release_device(dev); 352 release_device(dev);
@@ -354,33 +354,33 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
354 } 354 }
355 355
356 356
357 /* Initialize the new sock structure 357 /* Initialize the new sock structure
358 */ 358 */
359 newsk->sk_bound_dev_if = dev->ifindex; 359 newsk->sk_bound_dev_if = dev->ifindex;
360 newwp = wp_sk(newsk); 360 newwp = wp_sk(newsk);
361 newwp->card = wp->card; 361 newwp->card = wp->card;
362 362
363 /* Insert the sock into the main wanpipe 363 /* Insert the sock into the main wanpipe
364 * sock list. 364 * sock list.
365 */ 365 */
366 atomic_inc(&wanpipe_socks_nr); 366 atomic_inc(&wanpipe_socks_nr);
367 367
368 /* Allocate and fill in the new Mail Box. Then 368 /* Allocate and fill in the new Mail Box. Then
369 * bind the mail box to the sock. It will be 369 * bind the mail box to the sock. It will be
370 * used by the ioctl call to read call information 370 * used by the ioctl call to read call information
371 * and to execute commands. 371 * and to execute commands.
372 */ 372 */
373 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { 373 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) {
374 wanpipe_kill_sock_irq (newsk); 374 wanpipe_kill_sock_irq (newsk);
375 release_device(dev); 375 release_device(dev);
376 return -ENOMEM; 376 return -ENOMEM;
377 } 377 }
378 memcpy(mbox_ptr,skb->data,skb->len); 378 memcpy(mbox_ptr,skb->data,skb->len);
379 379
380 /* Register the lcn on which incoming call came 380 /* Register the lcn on which incoming call came
381 * from. Thus, if we have to clear it, we know 381 * from. Thus, if we have to clear it, we know
382 * which lcn to clear 382 * which lcn to clear
383 */ 383 */
384 384
385 newwp->lcn = mbox_ptr->cmd.lcn; 385 newwp->lcn = mbox_ptr->cmd.lcn;
386 newwp->mbox = (void *)mbox_ptr; 386 newwp->mbox = (void *)mbox_ptr;
@@ -416,20 +416,20 @@ static int wanpipe_listen_rcv (struct sk_buff *skb, struct sock *sk)
416 416
417 /* We must do this manually, since the sock_queue_rcv_skb() 417 /* We must do this manually, since the sock_queue_rcv_skb()
418 * function sets the skb->dev to NULL. However, we use 418 * function sets the skb->dev to NULL. However, we use
419 * the dev field in the accept function.*/ 419 * the dev field in the accept function.*/
420 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 420 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
421 (unsigned)sk->sk_rcvbuf) { 421 (unsigned)sk->sk_rcvbuf) {
422 422
423 wanpipe_unlink_driver(newsk); 423 wanpipe_unlink_driver(newsk);
424 wanpipe_kill_sock_irq (newsk); 424 wanpipe_kill_sock_irq (newsk);
425 --sk->sk_ack_backlog; 425 --sk->sk_ack_backlog;
426 return -ENOMEM; 426 return -ENOMEM;
427 } 427 }
428 428
429 skb_set_owner_r(skb, sk); 429 skb_set_owner_r(skb, sk);
430 skb_queue_tail(&sk->sk_receive_queue, skb); 430 skb_queue_tail(&sk->sk_receive_queue, skb);
431 sk->sk_data_ready(sk, skb->len); 431 sk->sk_data_ready(sk, skb->len);
432 432
433 return 0; 433 return 0;
434} 434}
435 435
@@ -474,7 +474,7 @@ static struct sock *wanpipe_make_new(struct sock *osk)
474 return sk; 474 return sk;
475} 475}
476 476
477/* 477/*
478 * FIXME: wanpipe_opt has to include a sock in its definition and stop using 478 * FIXME: wanpipe_opt has to include a sock in its definition and stop using
479 * sk_protinfo, but this code is not even compilable now, so lets leave it for 479 * sk_protinfo, but this code is not even compilable now, so lets leave it for
480 * later. 480 * later.
@@ -489,12 +489,12 @@ static struct proto wanpipe_proto = {
489 * wanpipe_make_new 489 * wanpipe_make_new
490 * 490 *
491 * Allocate memory for the a new sock, and sock 491 * Allocate memory for the a new sock, and sock
492 * private data. 492 * private data.
493 * 493 *
494 * Increment the module use count. 494 * Increment the module use count.
495 * 495 *
496 * This function is used by wanpipe_create() and 496 * This function is used by wanpipe_create() and
497 * wanpipe_make_new() functions. 497 * wanpipe_make_new() functions.
498 * 498 *
499 *===========================================================*/ 499 *===========================================================*/
500 500
@@ -514,7 +514,7 @@ static struct sock *wanpipe_alloc_socket(void)
514 wp_sk(sk) = wan_opt; 514 wp_sk(sk) = wan_opt;
515 515
516 /* Use timer to send data to the driver. This will act 516 /* Use timer to send data to the driver. This will act
517 * as a BH handler for sendmsg functions */ 517 * as a BH handler for sendmsg functions */
518 init_timer(&wan_opt->tx_timer); 518 init_timer(&wan_opt->tx_timer);
519 wan_opt->tx_timer.data = (unsigned long)sk; 519 wan_opt->tx_timer.data = (unsigned long)sk;
520 wan_opt->tx_timer.function = wanpipe_delayed_transmit; 520 wan_opt->tx_timer.function = wanpipe_delayed_transmit;
@@ -528,14 +528,14 @@ static struct sock *wanpipe_alloc_socket(void)
528 * wanpipe_sendmsg 528 * wanpipe_sendmsg
529 * 529 *
530 * This function implements a sendto() system call, 530 * This function implements a sendto() system call,
531 * for AF_WANPIPE socket family. 531 * for AF_WANPIPE socket family.
532 * During socket bind() sk->sk_bound_dev_if is initialized 532 * During socket bind() sk->sk_bound_dev_if is initialized
533 * to a correct network device. This number is used 533 * to a correct network device. This number is used
534 * to find a network device to which the packet should 534 * to find a network device to which the packet should
535 * be passed to. 535 * be passed to.
536 * 536 *
537 * Each packet is queued into sk->sk_write_queue and 537 * Each packet is queued into sk->sk_write_queue and
538 * delayed transmit bottom half handler is marked for 538 * delayed transmit bottom half handler is marked for
539 * execution. 539 * execution.
540 * 540 *
541 * A socket must be in WANSOCK_CONNECTED state before 541 * A socket must be in WANSOCK_CONNECTED state before
@@ -554,18 +554,18 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
554 unsigned char *addr; 554 unsigned char *addr;
555 int ifindex, err, reserve = 0; 555 int ifindex, err, reserve = 0;
556 556
557 557
558 if (!sock_flag(sk, SOCK_ZAPPED)) 558 if (!sock_flag(sk, SOCK_ZAPPED))
559 return -ENETDOWN; 559 return -ENETDOWN;
560 560
561 if (sk->sk_state != WANSOCK_CONNECTED) 561 if (sk->sk_state != WANSOCK_CONNECTED)
562 return -ENOTCONN; 562 return -ENOTCONN;
563 563
564 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) 564 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
565 return(-EINVAL); 565 return(-EINVAL);
566 566
567 /* it was <=, now one can send 567 /* it was <=, now one can send
568 * zero length packets */ 568 * zero length packets */
569 if (len < sizeof(x25api_hdr_t)) 569 if (len < sizeof(x25api_hdr_t))
570 return -EINVAL; 570 return -EINVAL;
571 571
@@ -577,7 +577,7 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
577 addr = NULL; 577 addr = NULL;
578 578
579 }else{ 579 }else{
580 if (msg->msg_namelen < sizeof(struct wan_sockaddr_ll)){ 580 if (msg->msg_namelen < sizeof(struct wan_sockaddr_ll)){
581 return -EINVAL; 581 return -EINVAL;
582 } 582 }
583 583
@@ -592,12 +592,12 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
592 return -ENXIO; 592 return -ENXIO;
593 } 593 }
594 dev_put(dev); 594 dev_put(dev);
595 595
596 if (sock->type == SOCK_RAW) 596 if (sock->type == SOCK_RAW)
597 reserve = dev->hard_header_len; 597 reserve = dev->hard_header_len;
598 598
599 if (len > dev->mtu+reserve){ 599 if (len > dev->mtu+reserve){
600 return -EMSGSIZE; 600 return -EMSGSIZE;
601 } 601 }
602 602
603 skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev), 603 skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev),
@@ -606,7 +606,7 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
606 if (skb==NULL){ 606 if (skb==NULL){
607 goto out_unlock; 607 goto out_unlock;
608 } 608 }
609 609
610 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 610 skb_reserve(skb, LL_RESERVED_SPACE(dev));
611 skb->nh.raw = skb->data; 611 skb->nh.raw = skb->data;
612 612
@@ -645,7 +645,7 @@ static int wanpipe_sendmsg(struct kiocb *iocb, struct socket *sock,
645 645
646 if (!(test_and_set_bit(0, &wp->timer))) 646 if (!(test_and_set_bit(0, &wp->timer)))
647 mod_timer(&wp->tx_timer, jiffies + 1); 647 mod_timer(&wp->tx_timer, jiffies + 1);
648 648
649 return(len); 649 return(len);
650 650
651out_free: 651out_free:
@@ -658,12 +658,12 @@ out_unlock:
658 * wanpipe_delayed_tarnsmit 658 * wanpipe_delayed_tarnsmit
659 * 659 *
660 * Transmit bottom half handler. It dequeues packets 660 * Transmit bottom half handler. It dequeues packets
661 * from sk->sk_write_queue and passes them to the 661 * from sk->sk_write_queue and passes them to the
662 * driver. If the driver is busy, the packet is 662 * driver. If the driver is busy, the packet is
663 * re-enqueued. 663 * re-enqueued.
664 * 664 *
665 * Packet Sent counter is decremented on successful 665 * Packet Sent counter is decremented on successful
666 * transmission. 666 * transmission.
667 *===========================================================*/ 667 *===========================================================*/
668 668
669 669
@@ -680,17 +680,17 @@ static void wanpipe_delayed_transmit (unsigned long data)
680 DBG_PRINTK(KERN_INFO "wansock: Transmit delay, no dev or card\n"); 680 DBG_PRINTK(KERN_INFO "wansock: Transmit delay, no dev or card\n");
681 return; 681 return;
682 } 682 }
683 683
684 if (sk->sk_state != WANSOCK_CONNECTED || !sock_flag(sk, SOCK_ZAPPED)) { 684 if (sk->sk_state != WANSOCK_CONNECTED || !sock_flag(sk, SOCK_ZAPPED)) {
685 clear_bit(0, &wp->timer); 685 clear_bit(0, &wp->timer);
686 DBG_PRINTK(KERN_INFO "wansock: Tx Timer, State not CONNECTED\n"); 686 DBG_PRINTK(KERN_INFO "wansock: Tx Timer, State not CONNECTED\n");
687 return; 687 return;
688 } 688 }
689 689
690 /* If driver is executing command, we must offload 690 /* If driver is executing command, we must offload
691 * the board by not sending data. Otherwise a 691 * the board by not sending data. Otherwise a
692 * pending command will never get a free buffer 692 * pending command will never get a free buffer
693 * to execute */ 693 * to execute */
694 if (atomic_read(&card->u.x.command_busy)){ 694 if (atomic_read(&card->u.x.command_busy)){
695 wp->tx_timer.expires = jiffies + SLOW_BACKOFF; 695 wp->tx_timer.expires = jiffies + SLOW_BACKOFF;
696 add_timer(&wp->tx_timer); 696 add_timer(&wp->tx_timer);
@@ -698,30 +698,30 @@ static void wanpipe_delayed_transmit (unsigned long data)
698 return; 698 return;
699 } 699 }
700 700
701 701
702 if (test_and_set_bit(0,&wanpipe_tx_critical)){ 702 if (test_and_set_bit(0,&wanpipe_tx_critical)){
703 printk(KERN_INFO "WanSock: Tx timer critical %s\n",dev->name); 703 printk(KERN_INFO "WanSock: Tx timer critical %s\n",dev->name);
704 wp->tx_timer.expires = jiffies + SLOW_BACKOFF; 704 wp->tx_timer.expires = jiffies + SLOW_BACKOFF;
705 add_timer(&wp->tx_timer); 705 add_timer(&wp->tx_timer);
706 return; 706 return;
707 } 707 }
708 708
709 /* Check for a packet in the fifo and send */ 709 /* Check for a packet in the fifo and send */
710 if ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL){ 710 if ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL){
711 711
712 if (dev->hard_start_xmit(skb, dev) != 0){ 712 if (dev->hard_start_xmit(skb, dev) != 0){
713 713
714 /* Driver failed to transmit, re-enqueue 714 /* Driver failed to transmit, re-enqueue
715 * the packet and retry again later */ 715 * the packet and retry again later */
716 skb_queue_head(&sk->sk_write_queue,skb); 716 skb_queue_head(&sk->sk_write_queue,skb);
717 clear_bit(0,&wanpipe_tx_critical); 717 clear_bit(0,&wanpipe_tx_critical);
718 return; 718 return;
719 }else{ 719 }else{
720 720
721 /* Packet Sent successful. Check for more packets 721 /* Packet Sent successful. Check for more packets
722 * if more packets, re-trigger the transmit routine 722 * if more packets, re-trigger the transmit routine
723 * other wise exit 723 * other wise exit
724 */ 724 */
725 atomic_dec(&wp->packet_sent); 725 atomic_dec(&wp->packet_sent);
726 726
727 if (skb_peek(&sk->sk_write_queue) == NULL) { 727 if (skb_peek(&sk->sk_write_queue) == NULL) {
@@ -741,18 +741,18 @@ static void wanpipe_delayed_transmit (unsigned long data)
741} 741}
742 742
743/*============================================================ 743/*============================================================
744 * execute_command 744 * execute_command
745 * 745 *
746 * Execute x25api commands. The atomic variable 746 * Execute x25api commands. The atomic variable
747 * chan->command is used to indicate to the driver that 747 * chan->command is used to indicate to the driver that
748 * command is pending for execution. The acutal command 748 * command is pending for execution. The acutal command
749 * structure is placed into a sock mbox structure 749 * structure is placed into a sock mbox structure
750 * (wp_sk(sk)->mbox). 750 * (wp_sk(sk)->mbox).
751 * 751 *
752 * The sock private structure, mbox is 752 * The sock private structure, mbox is
753 * used as shared memory between sock and the driver. 753 * used as shared memory between sock and the driver.
754 * Driver uses the sock mbox to execute the command 754 * Driver uses the sock mbox to execute the command
755 * and return the result. 755 * and return the result.
756 * 756 *
757 * For all command except PLACE CALL, the function 757 * For all command except PLACE CALL, the function
758 * waits for the result. PLACE CALL can be ether 758 * waits for the result. PLACE CALL can be ether
@@ -768,7 +768,7 @@ static int execute_command(struct sock *sk, unsigned char cmd, unsigned int fla
768 wanpipe_common_t *chan=NULL; 768 wanpipe_common_t *chan=NULL;
769 int err=0; 769 int err=0;
770 DECLARE_WAITQUEUE(wait, current); 770 DECLARE_WAITQUEUE(wait, current);
771 771
772 dev = dev_get_by_index(sk->sk_bound_dev_if); 772 dev = dev_get_by_index(sk->sk_bound_dev_if);
773 if (dev == NULL){ 773 if (dev == NULL){
774 printk(KERN_INFO "wansock: Exec failed no dev %i\n", 774 printk(KERN_INFO "wansock: Exec failed no dev %i\n",
@@ -793,7 +793,7 @@ static int execute_command(struct sock *sk, unsigned char cmd, unsigned int fla
793 return -EINVAL; 793 return -EINVAL;
794 } 794 }
795 795
796 ((mbox_cmd_t*)wp->mbox)->cmd.command = cmd; 796 ((mbox_cmd_t*)wp->mbox)->cmd.command = cmd;
797 ((mbox_cmd_t*)wp->mbox)->cmd.lcn = wp->lcn; 797 ((mbox_cmd_t*)wp->mbox)->cmd.lcn = wp->lcn;
798 ((mbox_cmd_t*)wp->mbox)->cmd.result = 0x7F; 798 ((mbox_cmd_t*)wp->mbox)->cmd.result = 0x7F;
799 799
@@ -820,12 +820,12 @@ static int execute_command(struct sock *sk, unsigned char cmd, unsigned int fla
820 } 820 }
821 current->state = TASK_RUNNING; 821 current->state = TASK_RUNNING;
822 remove_wait_queue(sk->sk_sleep,&wait); 822 remove_wait_queue(sk->sk_sleep,&wait);
823 823
824 return err; 824 return err;
825} 825}
826 826
827/*============================================================ 827/*============================================================
828 * wanpipe_destroy_timer 828 * wanpipe_destroy_timer
829 * 829 *
830 * Used by wanpipe_release, to delay release of 830 * Used by wanpipe_release, to delay release of
831 * the socket. 831 * the socket.
@@ -846,7 +846,7 @@ static void wanpipe_destroy_timer(unsigned long data)
846 846
847 kfree(wp); 847 kfree(wp);
848 wp_sk(sk) = NULL; 848 wp_sk(sk) = NULL;
849 849
850 if (atomic_read(&sk->sk_refcnt) != 1) { 850 if (atomic_read(&sk->sk_refcnt) != 1) {
851 atomic_set(&sk->sk_refcnt, 1); 851 atomic_set(&sk->sk_refcnt, 1);
852 DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i ! :delay.\n", 852 DBG_PRINTK(KERN_INFO "wansock: Error, wrong reference count: %i ! :delay.\n",
@@ -865,9 +865,9 @@ static void wanpipe_destroy_timer(unsigned long data)
865/*============================================================ 865/*============================================================
866 * wanpipe_unlink_driver 866 * wanpipe_unlink_driver
867 * 867 *
868 * When the socket is released, this function is 868 * When the socket is released, this function is
869 * used to remove links that bind the sock and the 869 * used to remove links that bind the sock and the
870 * driver together. 870 * driver together.
871 *===========================================================*/ 871 *===========================================================*/
872static void wanpipe_unlink_driver (struct sock *sk) 872static void wanpipe_unlink_driver (struct sock *sk)
873{ 873{
@@ -882,7 +882,7 @@ static void wanpipe_unlink_driver (struct sock *sk)
882 if (!dev){ 882 if (!dev){
883 printk(KERN_INFO "wansock: No dev on release\n"); 883 printk(KERN_INFO "wansock: No dev on release\n");
884 return; 884 return;
885 } 885 }
886 dev_put(dev); 886 dev_put(dev);
887 887
888 if ((chan = dev->priv) == NULL){ 888 if ((chan = dev->priv) == NULL){
@@ -897,7 +897,7 @@ static void wanpipe_unlink_driver (struct sock *sk)
897 chan->tx_timer=NULL; 897 chan->tx_timer=NULL;
898 clear_bit(0,&chan->common_critical); 898 clear_bit(0,&chan->common_critical);
899 release_device(dev); 899 release_device(dev);
900 900
901 return; 901 return;
902} 902}
903 903
@@ -931,7 +931,7 @@ static void wanpipe_link_driver(struct net_device *dev, struct sock *sk)
931/*============================================================ 931/*============================================================
932 * release_device 932 * release_device
933 * 933 *
934 * During sock release, clear a critical bit, which 934 * During sock release, clear a critical bit, which
935 * marks the device a being taken. 935 * marks the device a being taken.
936 *===========================================================*/ 936 *===========================================================*/
937 937
@@ -945,8 +945,8 @@ static void release_device(struct net_device *dev)
945/*============================================================ 945/*============================================================
946 * wanpipe_release 946 * wanpipe_release
947 * 947 *
948 * Close a PACKET socket. This is fairly simple. We 948 * Close a PACKET socket. This is fairly simple. We
949 * immediately go to 'closed' state and remove our 949 * immediately go to 'closed' state and remove our
950 * protocol entry in the device list. 950 * protocol entry in the device list.
951 *===========================================================*/ 951 *===========================================================*/
952 952
@@ -954,7 +954,7 @@ static int wanpipe_release(struct socket *sock)
954{ 954{
955 wanpipe_opt *wp; 955 wanpipe_opt *wp;
956 struct sock *sk = sock->sk; 956 struct sock *sk = sock->sk;
957 957
958 if (!sk) 958 if (!sk)
959 return 0; 959 return 0;
960 960
@@ -962,9 +962,9 @@ static int wanpipe_release(struct socket *sock)
962 check_write_queue(sk); 962 check_write_queue(sk);
963 963
964 /* Kill the tx timer, if we don't kill it now, the timer 964 /* Kill the tx timer, if we don't kill it now, the timer
965 * will run after we kill the sock. Timer code will 965 * will run after we kill the sock. Timer code will
966 * try to access the sock which has been killed and cause 966 * try to access the sock which has been killed and cause
967 * kernel panic */ 967 * kernel panic */
968 968
969 del_timer(&wp->tx_timer); 969 del_timer(&wp->tx_timer);
970 970
@@ -982,7 +982,7 @@ static int wanpipe_release(struct socket *sock)
982 DBG_PRINTK(KERN_INFO "wansock: Sending Clear Indication %i\n", 982 DBG_PRINTK(KERN_INFO "wansock: Sending Clear Indication %i\n",
983 sk->sk_state); 983 sk->sk_state);
984 dev_put(dev); 984 dev_put(dev);
985 } 985 }
986 } 986 }
987 987
988 set_bit(1,&wanpipe_tx_critical); 988 set_bit(1,&wanpipe_tx_critical);
@@ -992,10 +992,10 @@ static int wanpipe_release(struct socket *sock)
992 clear_bit(1,&wanpipe_tx_critical); 992 clear_bit(1,&wanpipe_tx_critical);
993 993
994 994
995 995
996 release_driver(sk); 996 release_driver(sk);
997 997
998 998
999 /* 999 /*
1000 * Now the socket is dead. No more input will appear. 1000 * Now the socket is dead. No more input will appear.
1001 */ 1001 */
@@ -1040,9 +1040,9 @@ static int wanpipe_release(struct socket *sock)
1040/*============================================================ 1040/*============================================================
1041 * check_write_queue 1041 * check_write_queue
1042 * 1042 *
1043 * During sock shutdown, if the sock state is 1043 * During sock shutdown, if the sock state is
1044 * WANSOCK_CONNECTED and there is transmit data 1044 * WANSOCK_CONNECTED and there is transmit data
1045 * pending. Wait until data is released 1045 * pending. Wait until data is released
1046 * before proceeding. 1046 * before proceeding.
1047 *===========================================================*/ 1047 *===========================================================*/
1048 1048
@@ -1062,7 +1062,7 @@ static void check_write_queue(struct sock *sk)
1062/*============================================================ 1062/*============================================================
1063 * release_driver 1063 * release_driver
1064 * 1064 *
1065 * This function is called during sock shutdown, to 1065 * This function is called during sock shutdown, to
1066 * release any resources and links that bind the sock 1066 * release any resources and links that bind the sock
1067 * to the driver. It also changes the state of the 1067 * to the driver. It also changes the state of the
1068 * sock to WANSOCK_DISCONNECTED 1068 * sock to WANSOCK_DISCONNECTED
@@ -1105,7 +1105,7 @@ static void release_driver(struct sock *sk)
1105 * start_cleanup_timer 1105 * start_cleanup_timer
1106 * 1106 *
1107 * If new incoming call's are pending but the socket 1107 * If new incoming call's are pending but the socket
1108 * is being released, start the timer which will 1108 * is being released, start the timer which will
1109 * envoke the kill routines for pending socks. 1109 * envoke the kill routines for pending socks.
1110 *===========================================================*/ 1110 *===========================================================*/
1111 1111
@@ -1125,7 +1125,7 @@ static void start_cleanup_timer (struct sock *sk)
1125 * 1125 *
1126 * This is a function which performs actual killing 1126 * This is a function which performs actual killing
1127 * of the sock. It releases socket resources, 1127 * of the sock. It releases socket resources,
1128 * and unlinks the sock from the driver. 1128 * and unlinks the sock from the driver.
1129 *===========================================================*/ 1129 *===========================================================*/
1130 1130
1131static void wanpipe_kill_sock_timer (unsigned long data) 1131static void wanpipe_kill_sock_timer (unsigned long data)
@@ -1139,13 +1139,13 @@ static void wanpipe_kill_sock_timer (unsigned long data)
1139 1139
1140 /* This function can be called from interrupt. We must use 1140 /* This function can be called from interrupt. We must use
1141 * appropriate locks */ 1141 * appropriate locks */
1142 1142
1143 if (test_bit(1,&wanpipe_tx_critical)){ 1143 if (test_bit(1,&wanpipe_tx_critical)){
1144 sk->sk_timer.expires = jiffies + 10; 1144 sk->sk_timer.expires = jiffies + 10;
1145 add_timer(&sk->sk_timer); 1145 add_timer(&sk->sk_timer);
1146 return; 1146 return;
1147 } 1147 }
1148 1148
1149 write_lock(&wanpipe_sklist_lock); 1149 write_lock(&wanpipe_sklist_lock);
1150 sk_del_node_init(sk); 1150 sk_del_node_init(sk);
1151 write_unlock(&wanpipe_sklist_lock); 1151 write_unlock(&wanpipe_sklist_lock);
@@ -1159,7 +1159,7 @@ static void wanpipe_kill_sock_timer (unsigned long data)
1159 chan=dev->priv; 1159 chan=dev->priv;
1160 atomic_set(&chan->disconnect,1); 1160 atomic_set(&chan->disconnect,1);
1161 dev_put(dev); 1161 dev_put(dev);
1162 } 1162 }
1163 } 1163 }
1164 1164
1165 release_driver(sk); 1165 release_driver(sk);
@@ -1170,7 +1170,7 @@ static void wanpipe_kill_sock_timer (unsigned long data)
1170 skb_queue_purge(&sk->sk_receive_queue); 1170 skb_queue_purge(&sk->sk_receive_queue);
1171 skb_queue_purge(&sk->sk_write_queue); 1171 skb_queue_purge(&sk->sk_write_queue);
1172 skb_queue_purge(&sk->sk_error_queue); 1172 skb_queue_purge(&sk->sk_error_queue);
1173 1173
1174 if (atomic_read(&sk->sk_rmem_alloc) || 1174 if (atomic_read(&sk->sk_rmem_alloc) ||
1175 atomic_read(&sk->sk_wmem_alloc)) { 1175 atomic_read(&sk->sk_wmem_alloc)) {
1176 del_timer(&sk->sk_timer); 1176 del_timer(&sk->sk_timer);
@@ -1205,7 +1205,7 @@ static void wanpipe_kill_sock_accept (struct sock *sk)
1205 1205
1206 /* This function can be called from interrupt. We must use 1206 /* This function can be called from interrupt. We must use
1207 * appropriate locks */ 1207 * appropriate locks */
1208 1208
1209 write_lock(&wanpipe_sklist_lock); 1209 write_lock(&wanpipe_sklist_lock);
1210 sk_del_node_init(sk); 1210 sk_del_node_init(sk);
1211 write_unlock(&wanpipe_sklist_lock); 1211 write_unlock(&wanpipe_sklist_lock);
@@ -1282,10 +1282,10 @@ static int wanpipe_do_bind(struct sock *sk, struct net_device *dev,
1282 chan=dev->priv; 1282 chan=dev->priv;
1283 sk->sk_state = chan->state; 1283 sk->sk_state = chan->state;
1284 1284
1285 if (wp->num == htons(X25_PROT) && 1285 if (wp->num == htons(X25_PROT) &&
1286 sk->sk_state != WANSOCK_DISCONNECTED && 1286 sk->sk_state != WANSOCK_DISCONNECTED &&
1287 sk->sk_state != WANSOCK_CONNECTING) { 1287 sk->sk_state != WANSOCK_CONNECTING) {
1288 DBG_PRINTK(KERN_INFO 1288 DBG_PRINTK(KERN_INFO
1289 "wansock: Binding to Device not DISCONNECTED %i\n", 1289 "wansock: Binding to Device not DISCONNECTED %i\n",
1290 sk->sk_state); 1290 sk->sk_state);
1291 release_device(dev); 1291 release_device(dev);
@@ -1338,7 +1338,7 @@ static int wanpipe_bind(struct socket *sock, struct sockaddr *uaddr, int addr_le
1338 /* 1338 /*
1339 * Check legality 1339 * Check legality
1340 */ 1340 */
1341 1341
1342 if (addr_len < sizeof(struct wan_sockaddr_ll)){ 1342 if (addr_len < sizeof(struct wan_sockaddr_ll)){
1343 printk(KERN_INFO "wansock: Address length error\n"); 1343 printk(KERN_INFO "wansock: Address length error\n");
1344 return -EINVAL; 1344 return -EINVAL;
@@ -1358,12 +1358,12 @@ static int wanpipe_bind(struct socket *sock, struct sockaddr *uaddr, int addr_le
1358 1358
1359 if (!strcmp(sll->sll_device,"svc_listen")){ 1359 if (!strcmp(sll->sll_device,"svc_listen")){
1360 1360
1361 /* Bind a sock to a card structure for listening 1361 /* Bind a sock to a card structure for listening
1362 */ 1362 */
1363 int err=0; 1363 int err=0;
1364 1364
1365 /* This is x25 specific area if protocol doesn't 1365 /* This is x25 specific area if protocol doesn't
1366 * match, return error */ 1366 * match, return error */
1367 if (sll->sll_protocol != htons(X25_PROT)) 1367 if (sll->sll_protocol != htons(X25_PROT))
1368 return -EINVAL; 1368 return -EINVAL;
1369 1369
@@ -1376,14 +1376,14 @@ static int wanpipe_bind(struct socket *sock, struct sockaddr *uaddr, int addr_le
1376 sk->sk_state = WANSOCK_BIND_LISTEN; 1376 sk->sk_state = WANSOCK_BIND_LISTEN;
1377 return 0; 1377 return 0;
1378 1378
1379 }else if (!strcmp(sll->sll_device,"svc_connect")){ 1379 }else if (!strcmp(sll->sll_device,"svc_connect")){
1380 1380
1381 /* This is x25 specific area if protocol doesn't 1381 /* This is x25 specific area if protocol doesn't
1382 * match, return error */ 1382 * match, return error */
1383 if (sll->sll_protocol != htons(X25_PROT)) 1383 if (sll->sll_protocol != htons(X25_PROT))
1384 return -EINVAL; 1384 return -EINVAL;
1385 1385
1386 /* Find a free device 1386 /* Find a free device
1387 */ 1387 */
1388 dev = wanpipe_find_free_dev(card); 1388 dev = wanpipe_find_free_dev(card);
1389 if (dev == NULL){ 1389 if (dev == NULL){
@@ -1392,9 +1392,9 @@ static int wanpipe_bind(struct socket *sock, struct sockaddr *uaddr, int addr_le
1392 return -EINVAL; 1392 return -EINVAL;
1393 } 1393 }
1394 }else{ 1394 }else{
1395 /* Bind a socket to a interface name 1395 /* Bind a socket to a interface name
1396 * This is used by PVC mostly 1396 * This is used by PVC mostly
1397 */ 1397 */
1398 strlcpy(name,sll->sll_device,sizeof(name)); 1398 strlcpy(name,sll->sll_device,sizeof(name));
1399 dev = dev_get_by_name(name); 1399 dev = dev_get_by_name(name);
1400 if (dev == NULL){ 1400 if (dev == NULL){
@@ -1419,8 +1419,8 @@ static int wanpipe_bind(struct socket *sock, struct sockaddr *uaddr, int addr_le
1419 1419
1420/*============================================================ 1420/*============================================================
1421 * get_atomic_device 1421 * get_atomic_device
1422 * 1422 *
1423 * Sets a bit atomically which indicates that 1423 * Sets a bit atomically which indicates that
1424 * the interface is taken. This avoids race conditions. 1424 * the interface is taken. This avoids race conditions.
1425 *===========================================================*/ 1425 *===========================================================*/
1426 1426
@@ -1436,7 +1436,7 @@ static inline int get_atomic_device(struct net_device *dev)
1436 1436
1437/*============================================================ 1437/*============================================================
1438 * check_dev 1438 * check_dev
1439 * 1439 *
1440 * Check that device name belongs to a particular card. 1440 * Check that device name belongs to a particular card.
1441 *===========================================================*/ 1441 *===========================================================*/
1442 1442
@@ -1446,8 +1446,8 @@ static int check_dev(struct net_device *dev, sdla_t *card)
1446 1446
1447 for (tmp_dev = card->wandev.dev; tmp_dev; 1447 for (tmp_dev = card->wandev.dev; tmp_dev;
1448 tmp_dev = *((struct net_device **)tmp_dev->priv)) { 1448 tmp_dev = *((struct net_device **)tmp_dev->priv)) {
1449 if (tmp_dev->ifindex == dev->ifindex){ 1449 if (tmp_dev->ifindex == dev->ifindex){
1450 return 0; 1450 return 0;
1451 } 1451 }
1452 } 1452 }
1453 return 1; 1453 return 1;
@@ -1455,7 +1455,7 @@ static int check_dev(struct net_device *dev, sdla_t *card)
1455 1455
1456/*============================================================ 1456/*============================================================
1457 * wanpipe_find_free_dev 1457 * wanpipe_find_free_dev
1458 * 1458 *
1459 * Find a free network interface. If found set atomic 1459 * Find a free network interface. If found set atomic
1460 * bit indicating that the interface is taken. 1460 * bit indicating that the interface is taken.
1461 * X25API Specific. 1461 * X25API Specific.
@@ -1468,12 +1468,12 @@ struct net_device *wanpipe_find_free_dev(sdla_t *card)
1468 1468
1469 if (test_and_set_bit(0,&find_free_critical)){ 1469 if (test_and_set_bit(0,&find_free_critical)){
1470 printk(KERN_INFO "CRITICAL in Find Free\n"); 1470 printk(KERN_INFO "CRITICAL in Find Free\n");
1471 } 1471 }
1472 1472
1473 for (dev = card->wandev.dev; dev; 1473 for (dev = card->wandev.dev; dev;
1474 dev = *((struct net_device **)dev->priv)) { 1474 dev = *((struct net_device **)dev->priv)) {
1475 chan = dev->priv; 1475 chan = dev->priv;
1476 if (!chan) 1476 if (!chan)
1477 continue; 1477 continue;
1478 if (chan->usedby == API && chan->svc){ 1478 if (chan->usedby == API && chan->svc){
1479 if (!get_atomic_device (dev)){ 1479 if (!get_atomic_device (dev)){
@@ -1492,16 +1492,16 @@ struct net_device *wanpipe_find_free_dev(sdla_t *card)
1492 1492
1493/*============================================================ 1493/*============================================================
1494 * wanpipe_create 1494 * wanpipe_create
1495 * 1495 *
1496 * SOCKET() System call. It allocates a sock structure 1496 * SOCKET() System call. It allocates a sock structure
1497 * and adds the socket to the wanpipe_sk_list. 1497 * and adds the socket to the wanpipe_sk_list.
1498 * Crates AF_WANPIPE socket. 1498 * Crates AF_WANPIPE socket.
1499 *===========================================================*/ 1499 *===========================================================*/
1500 1500
1501static int wanpipe_create(struct socket *sock, int protocol) 1501static int wanpipe_create(struct socket *sock, int protocol)
1502{ 1502{
1503 struct sock *sk; 1503 struct sock *sk;
1504 1504
1505 //FIXME: This checks for root user, SECURITY ? 1505 //FIXME: This checks for root user, SECURITY ?
1506 //if (!capable(CAP_NET_RAW)) 1506 //if (!capable(CAP_NET_RAW))
1507 // return -EPERM; 1507 // return -EPERM;
@@ -1526,7 +1526,7 @@ static int wanpipe_create(struct socket *sock, int protocol)
1526 sk->sk_bound_dev_if = 0; 1526 sk->sk_bound_dev_if = 0;
1527 1527
1528 atomic_inc(&wanpipe_socks_nr); 1528 atomic_inc(&wanpipe_socks_nr);
1529 1529
1530 /* We must disable interrupts because the ISR 1530 /* We must disable interrupts because the ISR
1531 * can also change the list */ 1531 * can also change the list */
1532 set_bit(1,&wanpipe_tx_critical); 1532 set_bit(1,&wanpipe_tx_critical);
@@ -1541,8 +1541,8 @@ static int wanpipe_create(struct socket *sock, int protocol)
1541 1541
1542/*============================================================ 1542/*============================================================
1543 * wanpipe_recvmsg 1543 * wanpipe_recvmsg
1544 * 1544 *
1545 * Pull a packet from our receive queue and hand it 1545 * Pull a packet from our receive queue and hand it
1546 * to the user. If necessary we block. 1546 * to the user. If necessary we block.
1547 *===========================================================*/ 1547 *===========================================================*/
1548 1548
@@ -1570,13 +1570,13 @@ static int wanpipe_recvmsg(struct kiocb *iocb, struct socket *sock,
1570 * but then it will block. 1570 * but then it will block.
1571 */ 1571 */
1572 1572
1573 if (flags & MSG_OOB){ 1573 if (flags & MSG_OOB){
1574 skb = skb_dequeue(&sk->sk_error_queue); 1574 skb = skb_dequeue(&sk->sk_error_queue);
1575 }else{ 1575 }else{
1576 skb=skb_recv_datagram(sk,flags,1,&err); 1576 skb=skb_recv_datagram(sk,flags,1,&err);
1577 } 1577 }
1578 /* 1578 /*
1579 * An error occurred so return it. Because skb_recv_datagram() 1579 * An error occurred so return it. Because skb_recv_datagram()
1580 * handles the blocking we don't see and worry about blocking 1580 * handles the blocking we don't see and worry about blocking
1581 * retries. 1581 * retries.
1582 */ 1582 */
@@ -1602,9 +1602,9 @@ static int wanpipe_recvmsg(struct kiocb *iocb, struct socket *sock,
1602 err = memcpy_toiovec(msg->msg_iov, skb->data, copied); 1602 err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
1603 if (err) 1603 if (err)
1604 goto out_free; 1604 goto out_free;
1605 1605
1606 sock_recv_timestamp(msg, sk, skb); 1606 sock_recv_timestamp(msg, sk, skb);
1607 1607
1608 if (msg->msg_name) 1608 if (msg->msg_name)
1609 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1609 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1610 1610
@@ -1623,13 +1623,13 @@ out:
1623 1623
1624/*============================================================ 1624/*============================================================
1625 * wanpipe_wakeup_driver 1625 * wanpipe_wakeup_driver
1626 * 1626 *
1627 * If socket receive buffer is full and driver cannot 1627 * If socket receive buffer is full and driver cannot
1628 * pass data up the sock, it sets a packet_block flag. 1628 * pass data up the sock, it sets a packet_block flag.
1629 * This function check that flag and if sock receive 1629 * This function check that flag and if sock receive
1630 * queue has room it kicks the driver BH handler. 1630 * queue has room it kicks the driver BH handler.
1631 * 1631 *
1632 * This way, driver doesn't have to poll the sock 1632 * This way, driver doesn't have to poll the sock
1633 * receive queue. 1633 * receive queue.
1634 *===========================================================*/ 1634 *===========================================================*/
1635 1635
@@ -1646,8 +1646,8 @@ static void wanpipe_wakeup_driver(struct sock *sk)
1646 1646
1647 if ((chan = dev->priv) == NULL) 1647 if ((chan = dev->priv) == NULL)
1648 return; 1648 return;
1649 1649
1650 if (atomic_read(&chan->receive_block)){ 1650 if (atomic_read(&chan->receive_block)){
1651 if (atomic_read(&sk->sk_rmem_alloc) < 1651 if (atomic_read(&sk->sk_rmem_alloc) <
1652 ((unsigned)sk->sk_rcvbuf * 0.9)) { 1652 ((unsigned)sk->sk_rcvbuf * 0.9)) {
1653 printk(KERN_INFO "wansock: Queuing task for wanpipe\n"); 1653 printk(KERN_INFO "wansock: Queuing task for wanpipe\n");
@@ -1655,13 +1655,13 @@ static void wanpipe_wakeup_driver(struct sock *sk)
1655 wanpipe_queue_tq(&chan->wanpipe_task); 1655 wanpipe_queue_tq(&chan->wanpipe_task);
1656 wanpipe_mark_bh(); 1656 wanpipe_mark_bh();
1657 } 1657 }
1658 } 1658 }
1659} 1659}
1660 1660
1661/*============================================================ 1661/*============================================================
1662 * wanpipe_getname 1662 * wanpipe_getname
1663 * 1663 *
1664 * I don't know what to do with this yet. 1664 * I don't know what to do with this yet.
1665 * User can use this function to get sock address 1665 * User can use this function to get sock address
1666 * information. Not very useful for Sangoma's purposes. 1666 * information. Not very useful for Sangoma's purposes.
1667 *===========================================================*/ 1667 *===========================================================*/
@@ -1687,17 +1687,17 @@ static int wanpipe_getname(struct socket *sock, struct sockaddr *uaddr,
1687 sll->sll_halen = 0; 1687 sll->sll_halen = 0;
1688 } 1688 }
1689 *uaddr_len = sizeof(*sll); 1689 *uaddr_len = sizeof(*sll);
1690 1690
1691 dev_put(dev); 1691 dev_put(dev);
1692 1692
1693 return 0; 1693 return 0;
1694} 1694}
1695 1695
1696/*============================================================ 1696/*============================================================
1697 * wanpipe_notifier 1697 * wanpipe_notifier
1698 * 1698 *
1699 * If driver turns off network interface, this function 1699 * If driver turns off network interface, this function
1700 * will be envoked. Currently I treate it as a 1700 * will be envoked. Currently I treate it as a
1701 * call disconnect. More thought should go into this 1701 * call disconnect. More thought should go into this
1702 * function. 1702 * function.
1703 * 1703 *
@@ -1718,7 +1718,7 @@ static int wanpipe_notifier(struct notifier_block *this, unsigned long msg, void
1718 continue; 1718 continue;
1719 if (dev == NULL) 1719 if (dev == NULL)
1720 continue; 1720 continue;
1721 1721
1722 switch (msg) { 1722 switch (msg) {
1723 case NETDEV_DOWN: 1723 case NETDEV_DOWN:
1724 case NETDEV_UNREGISTER: 1724 case NETDEV_UNREGISTER:
@@ -1732,7 +1732,7 @@ static int wanpipe_notifier(struct notifier_block *this, unsigned long msg, void
1732 1732
1733 if (msg == NETDEV_UNREGISTER) { 1733 if (msg == NETDEV_UNREGISTER) {
1734 printk(KERN_INFO "wansock: Unregistering Device: %s\n", 1734 printk(KERN_INFO "wansock: Unregistering Device: %s\n",
1735 dev->name); 1735 dev->name);
1736 wanpipe_unlink_driver(sk); 1736 wanpipe_unlink_driver(sk);
1737 sk->sk_bound_dev_if = 0; 1737 sk->sk_bound_dev_if = 0;
1738 } 1738 }
@@ -1753,7 +1753,7 @@ static int wanpipe_notifier(struct notifier_block *this, unsigned long msg, void
1753 1753
1754/*============================================================ 1754/*============================================================
1755 * wanpipe_ioctl 1755 * wanpipe_ioctl
1756 * 1756 *
1757 * Execute a user commands, and set socket options. 1757 * Execute a user commands, and set socket options.
1758 * 1758 *
1759 * FIXME: More thought should go into this function. 1759 * FIXME: More thought should go into this function.
@@ -1765,7 +1765,7 @@ static int wanpipe_ioctl(struct socket *sock, unsigned int cmd, unsigned long ar
1765 struct sock *sk = sock->sk; 1765 struct sock *sk = sock->sk;
1766 int err; 1766 int err;
1767 1767
1768 switch(cmd) 1768 switch(cmd)
1769 { 1769 {
1770 case SIOCGSTAMP: 1770 case SIOCGSTAMP:
1771 return sock_get_timestamp(sk, (struct timeval __user *)arg); 1771 return sock_get_timestamp(sk, (struct timeval __user *)arg);
@@ -1778,7 +1778,7 @@ static int wanpipe_ioctl(struct socket *sock, unsigned int cmd, unsigned long ar
1778 1778
1779 if (sk->sk_state == WANSOCK_CONNECTED) 1779 if (sk->sk_state == WANSOCK_CONNECTED)
1780 return 0; 1780 return 0;
1781 1781
1782 return 1; 1782 return 1;
1783 1783
1784 1784
@@ -1804,7 +1804,7 @@ static int wanpipe_ioctl(struct socket *sock, unsigned int cmd, unsigned long ar
1804 case SIOC_WANPIPE_DEBUG: 1804 case SIOC_WANPIPE_DEBUG:
1805 1805
1806 return wanpipe_debug(sk,(void*)arg); 1806 return wanpipe_debug(sk,(void*)arg);
1807 1807
1808 case SIOC_WANPIPE_SET_NONBLOCK: 1808 case SIOC_WANPIPE_SET_NONBLOCK:
1809 1809
1810 if (sk->sk_state != WANSOCK_DISCONNECTED) 1810 if (sk->sk_state != WANSOCK_DISCONNECTED)
@@ -1812,7 +1812,7 @@ static int wanpipe_ioctl(struct socket *sock, unsigned int cmd, unsigned long ar
1812 1812
1813 sock->file->f_flags |= O_NONBLOCK; 1813 sock->file->f_flags |= O_NONBLOCK;
1814 return 0; 1814 return 0;
1815 1815
1816#ifdef CONFIG_INET 1816#ifdef CONFIG_INET
1817 case SIOCADDRT: 1817 case SIOCADDRT:
1818 case SIOCDELRT: 1818 case SIOCDELRT:
@@ -1842,7 +1842,7 @@ static int wanpipe_ioctl(struct socket *sock, unsigned int cmd, unsigned long ar
1842 1842
1843/*============================================================ 1843/*============================================================
1844 * wanpipe_debug 1844 * wanpipe_debug
1845 * 1845 *
1846 * This function will pass up information about all 1846 * This function will pass up information about all
1847 * active sockets. 1847 * active sockets.
1848 * 1848 *
@@ -1893,49 +1893,49 @@ static int wanpipe_debug (struct sock *origsk, void *arg)
1893 1893
1894 if (sk->sk_bound_dev_if) { 1894 if (sk->sk_bound_dev_if) {
1895 dev = dev_get_by_index(sk->sk_bound_dev_if); 1895 dev = dev_get_by_index(sk->sk_bound_dev_if);
1896 if (!dev) 1896 if (!dev)
1897 continue; 1897 continue;
1898 1898
1899 chan=dev->priv; 1899 chan=dev->priv;
1900 dev_put(dev); 1900 dev_put(dev);
1901 1901
1902 if ((err=put_user(chan->state, &dbg_data->debug[cnt].d_state))) 1902 if ((err=put_user(chan->state, &dbg_data->debug[cnt].d_state)))
1903 return err; 1903 return err;
1904 if ((err=put_user(chan->svc, &dbg_data->debug[cnt].svc))) 1904 if ((err=put_user(chan->svc, &dbg_data->debug[cnt].svc)))
1905 return err; 1905 return err;
1906 1906
1907 if ((err=put_user(atomic_read(&chan->command), 1907 if ((err=put_user(atomic_read(&chan->command),
1908 &dbg_data->debug[cnt].command))) 1908 &dbg_data->debug[cnt].command)))
1909 return err; 1909 return err;
1910 1910
1911 1911
1912 if (wp){ 1912 if (wp){
1913 sdla_t *card = (sdla_t*)wp->card; 1913 sdla_t *card = (sdla_t*)wp->card;
1914 1914
1915 if (card){ 1915 if (card){
1916 if ((err=put_user(atomic_read(&card->u.x.command_busy), 1916 if ((err=put_user(atomic_read(&card->u.x.command_busy),
1917 &dbg_data->debug[cnt].cmd_busy))) 1917 &dbg_data->debug[cnt].cmd_busy)))
1918 return err; 1918 return err;
1919 } 1919 }
1920 1920
1921 if ((err=put_user(wp->lcn, 1921 if ((err=put_user(wp->lcn,
1922 &dbg_data->debug[cnt].lcn))) 1922 &dbg_data->debug[cnt].lcn)))
1923 return err; 1923 return err;
1924 1924
1925 if (wp->mbox) { 1925 if (wp->mbox) {
1926 if ((err=put_user(1, &dbg_data->debug[cnt].mbox))) 1926 if ((err=put_user(1, &dbg_data->debug[cnt].mbox)))
1927 return err; 1927 return err;
1928 } 1928 }
1929 } 1929 }
1930 1930
1931 if ((err=put_user(atomic_read(&chan->receive_block), 1931 if ((err=put_user(atomic_read(&chan->receive_block),
1932 &dbg_data->debug[cnt].rblock))) 1932 &dbg_data->debug[cnt].rblock)))
1933 return err; 1933 return err;
1934 1934
1935 if (copy_to_user(dbg_data->debug[cnt].name, dev->name, strlen(dev->name))) 1935 if (copy_to_user(dbg_data->debug[cnt].name, dev->name, strlen(dev->name)))
1936 return -EFAULT; 1936 return -EFAULT;
1937 } 1937 }
1938 1938
1939 if (++cnt == MAX_NUM_DEBUG) 1939 if (++cnt == MAX_NUM_DEBUG)
1940 break; 1940 break;
1941 } 1941 }
@@ -1944,7 +1944,7 @@ static int wanpipe_debug (struct sock *origsk, void *arg)
1944 1944
1945/*============================================================ 1945/*============================================================
1946 * get_ioctl_cmd 1946 * get_ioctl_cmd
1947 * 1947 *
1948 * Pass up the contents of socket MBOX to the user. 1948 * Pass up the contents of socket MBOX to the user.
1949 *===========================================================*/ 1949 *===========================================================*/
1950 1950
@@ -1974,7 +1974,7 @@ static int get_ioctl_cmd (struct sock *sk, void *arg)
1974 if ((err=put_user(mbox_ptr->cmd.result, &usr_data->hdr.result))) 1974 if ((err=put_user(mbox_ptr->cmd.result, &usr_data->hdr.result)))
1975 return err; 1975 return err;
1976 if ((err=put_user(mbox_ptr->cmd.lcn, &usr_data->hdr.lcn))) 1976 if ((err=put_user(mbox_ptr->cmd.lcn, &usr_data->hdr.lcn)))
1977 return err; 1977 return err;
1978 1978
1979 if (mbox_ptr->cmd.length > 0){ 1979 if (mbox_ptr->cmd.length > 0){
1980 if (mbox_ptr->cmd.length > X25_MAX_DATA) 1980 if (mbox_ptr->cmd.length > X25_MAX_DATA)
@@ -1986,13 +1986,13 @@ static int get_ioctl_cmd (struct sock *sk, void *arg)
1986 } 1986 }
1987 } 1987 }
1988 return 0; 1988 return 0;
1989} 1989}
1990 1990
1991/*============================================================ 1991/*============================================================
1992 * set_ioctl_cmd 1992 * set_ioctl_cmd
1993 * 1993 *
1994 * Before command can be execute, socket MBOX must 1994 * Before command can be execute, socket MBOX must
1995 * be created, and initialized with user data. 1995 * be created, and initialized with user data.
1996 *===========================================================*/ 1996 *===========================================================*/
1997 1997
1998static int set_ioctl_cmd (struct sock *sk, void *arg) 1998static int set_ioctl_cmd (struct sock *sk, void *arg)
@@ -2008,7 +2008,7 @@ static int set_ioctl_cmd (struct sock *sk, void *arg)
2008 return -ENODEV; 2008 return -ENODEV;
2009 2009
2010 dev_put(dev); 2010 dev_put(dev);
2011 2011
2012 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) 2012 if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL)
2013 return -ENOMEM; 2013 return -ENOMEM;
2014 2014
@@ -2092,12 +2092,12 @@ unsigned int wanpipe_poll(struct file * file, struct socket *sock, poll_table *w
2092 return mask; 2092 return mask;
2093 } 2093 }
2094 2094
2095 /* This check blocks the user process if there is 2095 /* This check blocks the user process if there is
2096 * a packet already queued in the socket write queue. 2096 * a packet already queued in the socket write queue.
2097 * This option is only for X25API protocol, for other 2097 * This option is only for X25API protocol, for other
2098 * protocol like chdlc enable streaming mode, 2098 * protocol like chdlc enable streaming mode,
2099 * where multiple packets can be pending in the socket 2099 * where multiple packets can be pending in the socket
2100 * transmit queue */ 2100 * transmit queue */
2101 2101
2102 if (wp_sk(sk)->num == htons(X25_PROT)) { 2102 if (wp_sk(sk)->num == htons(X25_PROT)) {
2103 if (atomic_read(&wp_sk(sk)->packet_sent)) 2103 if (atomic_read(&wp_sk(sk)->packet_sent))
@@ -2110,7 +2110,7 @@ unsigned int wanpipe_poll(struct file * file, struct socket *sock, poll_table *w
2110 }else{ 2110 }else{
2111 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 2111 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2112 } 2112 }
2113 2113
2114 return mask; 2114 return mask;
2115} 2115}
2116 2116
@@ -2125,8 +2125,8 @@ static int wanpipe_listen(struct socket *sock, int backlog)
2125{ 2125{
2126 struct sock *sk = sock->sk; 2126 struct sock *sk = sock->sk;
2127 2127
2128 /* This is x25 specific area if protocol doesn't 2128 /* This is x25 specific area if protocol doesn't
2129 * match, return error */ 2129 * match, return error */
2130 if (wp_sk(sk)->num != htons(X25_PROT)) 2130 if (wp_sk(sk)->num != htons(X25_PROT))
2131 return -EINVAL; 2131 return -EINVAL;
2132 2132
@@ -2163,7 +2163,7 @@ static int wanpipe_link_card (struct sock *sk)
2163 card->sk=sk; 2163 card->sk=sk;
2164 card->func=wanpipe_listen_rcv; 2164 card->func=wanpipe_listen_rcv;
2165 sock_set_flag(sk, SOCK_ZAPPED); 2165 sock_set_flag(sk, SOCK_ZAPPED);
2166 2166
2167 return 0; 2167 return 0;
2168} 2168}
2169 2169
@@ -2176,7 +2176,7 @@ static int wanpipe_link_card (struct sock *sk)
2176 2176
2177static void wanpipe_unlink_card (struct sock *sk) 2177static void wanpipe_unlink_card (struct sock *sk)
2178{ 2178{
2179 sdla_t *card = (sdla_t*)wp_sk(sk)->card; 2179 sdla_t *card = (sdla_t*)wp_sk(sk)->card;
2180 2180
2181 if (card){ 2181 if (card){
2182 card->sk=NULL; 2182 card->sk=NULL;
@@ -2202,9 +2202,9 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
2202 printk(KERN_INFO "NO MBOX PTR !!!!!\n"); 2202 printk(KERN_INFO "NO MBOX PTR !!!!!\n");
2203 return -EINVAL; 2203 return -EINVAL;
2204 } 2204 }
2205 2205
2206 /* This is x25 specific area if protocol doesn't 2206 /* This is x25 specific area if protocol doesn't
2207 * match, return error */ 2207 * match, return error */
2208 if (wp->num != htons(X25_PROT)) 2208 if (wp->num != htons(X25_PROT))
2209 return -EINVAL; 2209 return -EINVAL;
2210 2210
@@ -2217,17 +2217,17 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
2217 err = -EHOSTDOWN; 2217 err = -EHOSTDOWN;
2218 break; 2218 break;
2219 } 2219 }
2220 2220
2221 err = execute_command(sk,X25_ACCEPT_CALL,0); 2221 err = execute_command(sk,X25_ACCEPT_CALL,0);
2222 if (err < 0) 2222 if (err < 0)
2223 break; 2223 break;
2224 2224
2225 /* Update. Mar6 2000. 2225 /* Update. Mar6 2000.
2226 * Do not set the sock lcn number here, since 2226 * Do not set the sock lcn number here, since
2227 * it is done in wanpipe_listen_rcv(). 2227 * it is done in wanpipe_listen_rcv().
2228 */ 2228 */
2229 if (sk->sk_state == WANSOCK_CONNECTED) { 2229 if (sk->sk_state == WANSOCK_CONNECTED) {
2230 wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn; 2230 wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn;
2231 DBG_PRINTK(KERN_INFO "\nwansock: Accept OK %i\n", 2231 DBG_PRINTK(KERN_INFO "\nwansock: Accept OK %i\n",
2232 wp->lcn); 2232 wp->lcn);
2233 err = 0; 2233 err = 0;
@@ -2249,15 +2249,15 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
2249 2249
2250 2250
2251 /* Check if data buffers are pending for transmission, 2251 /* Check if data buffers are pending for transmission,
2252 * if so, check whether user wants to wait until data 2252 * if so, check whether user wants to wait until data
2253 * is transmitted, or clear a call and drop packets */ 2253 * is transmitted, or clear a call and drop packets */
2254 2254
2255 if (atomic_read(&sk->sk_wmem_alloc) || 2255 if (atomic_read(&sk->sk_wmem_alloc) ||
2256 check_driver_busy(sk)) { 2256 check_driver_busy(sk)) {
2257 mbox_cmd_t *mbox = wp->mbox; 2257 mbox_cmd_t *mbox = wp->mbox;
2258 if (mbox->cmd.qdm & 0x80){ 2258 if (mbox->cmd.qdm & 0x80){
2259 mbox->cmd.result = 0x35; 2259 mbox->cmd.result = 0x35;
2260 err = -EAGAIN; 2260 err = -EAGAIN;
2261 break; 2261 break;
2262 } 2262 }
2263 } 2263 }
@@ -2286,15 +2286,15 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
2286 2286
2287 2287
2288 /* Check if data buffers are pending for transmission, 2288 /* Check if data buffers are pending for transmission,
2289 * if so, check whether user wants to wait until data 2289 * if so, check whether user wants to wait until data
2290 * is transmitted, or reset a call and drop packets */ 2290 * is transmitted, or reset a call and drop packets */
2291 2291
2292 if (atomic_read(&sk->sk_wmem_alloc) || 2292 if (atomic_read(&sk->sk_wmem_alloc) ||
2293 check_driver_busy(sk)) { 2293 check_driver_busy(sk)) {
2294 mbox_cmd_t *mbox = wp->mbox; 2294 mbox_cmd_t *mbox = wp->mbox;
2295 if (mbox->cmd.qdm & 0x80){ 2295 if (mbox->cmd.qdm & 0x80){
2296 mbox->cmd.result = 0x35; 2296 mbox->cmd.result = 0x35;
2297 err = -EAGAIN; 2297 err = -EAGAIN;
2298 break; 2298 break;
2299 } 2299 }
2300 } 2300 }
@@ -2316,7 +2316,7 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
2316 2316
2317 if (sk->sk_state == WANSOCK_CONNECTED) { 2317 if (sk->sk_state == WANSOCK_CONNECTED) {
2318 2318
2319 wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn; 2319 wp->lcn = ((mbox_cmd_t*)wp->mbox)->cmd.lcn;
2320 2320
2321 DBG_PRINTK(KERN_INFO "\nwansock: PLACE CALL OK %i\n", 2321 DBG_PRINTK(KERN_INFO "\nwansock: PLACE CALL OK %i\n",
2322 wp->lcn); 2322 wp->lcn);
@@ -2337,7 +2337,7 @@ static int wanpipe_exec_cmd(struct sock *sk, int cmd, unsigned int flags)
2337 2337
2338 break; 2338 break;
2339 2339
2340 default: 2340 default:
2341 return -EINVAL; 2341 return -EINVAL;
2342 } 2342 }
2343 2343
@@ -2364,9 +2364,9 @@ static int check_driver_busy (struct sock *sk)
2364/*====================================================================== 2364/*======================================================================
2365 * wanpipe_accept 2365 * wanpipe_accept
2366 * 2366 *
2367 * ACCEPT() System call. X25API Specific function. 2367 * ACCEPT() System call. X25API Specific function.
2368 * For each incoming call, create a new socket and 2368 * For each incoming call, create a new socket and
2369 * return it to the user. 2369 * return it to the user.
2370 *=====================================================================*/ 2370 *=====================================================================*/
2371 2371
2372static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags) 2372static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags)
@@ -2378,10 +2378,10 @@ static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags
2378 int err=0; 2378 int err=0;
2379 2379
2380 if (newsock->sk != NULL){ 2380 if (newsock->sk != NULL){
2381 wanpipe_kill_sock_accept(newsock->sk); 2381 wanpipe_kill_sock_accept(newsock->sk);
2382 newsock->sk=NULL; 2382 newsock->sk=NULL;
2383 } 2383 }
2384 2384
2385 if ((sk = sock->sk) == NULL) 2385 if ((sk = sock->sk) == NULL)
2386 return -EINVAL; 2386 return -EINVAL;
2387 2387
@@ -2410,10 +2410,10 @@ static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags
2410 } 2410 }
2411 current->state = TASK_RUNNING; 2411 current->state = TASK_RUNNING;
2412 remove_wait_queue(sk->sk_sleep,&wait); 2412 remove_wait_queue(sk->sk_sleep,&wait);
2413 2413
2414 if (err != 0) 2414 if (err != 0)
2415 return err; 2415 return err;
2416 2416
2417 newsk = get_newsk_from_skb(skb); 2417 newsk = get_newsk_from_skb(skb);
2418 if (!newsk){ 2418 if (!newsk){
2419 return -EINVAL; 2419 return -EINVAL;
@@ -2431,7 +2431,7 @@ static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags
2431 /* Now attach up the new socket */ 2431 /* Now attach up the new socket */
2432 sk->sk_ack_backlog--; 2432 sk->sk_ack_backlog--;
2433 newsock->sk = newsk; 2433 newsock->sk = newsk;
2434 2434
2435 kfree_skb(skb); 2435 kfree_skb(skb);
2436 2436
2437 DBG_PRINTK(KERN_INFO "\nwansock: ACCEPT Got LCN %i\n", 2437 DBG_PRINTK(KERN_INFO "\nwansock: ACCEPT Got LCN %i\n",
@@ -2449,16 +2449,16 @@ static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags
2449struct sock * get_newsk_from_skb (struct sk_buff *skb) 2449struct sock * get_newsk_from_skb (struct sk_buff *skb)
2450{ 2450{
2451 struct net_device *dev = skb->dev; 2451 struct net_device *dev = skb->dev;
2452 wanpipe_common_t *chan; 2452 wanpipe_common_t *chan;
2453 2453
2454 if (!dev){ 2454 if (!dev){
2455 return NULL; 2455 return NULL;
2456 } 2456 }
2457 2457
2458 if ((chan = dev->priv) == NULL){ 2458 if ((chan = dev->priv) == NULL){
2459 return NULL; 2459 return NULL;
2460 } 2460 }
2461 2461
2462 if (!chan->sk){ 2462 if (!chan->sk){
2463 return NULL; 2463 return NULL;
2464 } 2464 }
@@ -2470,7 +2470,7 @@ struct sock * get_newsk_from_skb (struct sk_buff *skb)
2470 * 2470 *
2471 * CONNECT() System Call. X25API specific function 2471 * CONNECT() System Call. X25API specific function
2472 * Check the state of the sock, and execute PLACE_CALL command. 2472 * Check the state of the sock, and execute PLACE_CALL command.
2473 * Connect can ether block or return without waiting for connection, 2473 * Connect can ether block or return without waiting for connection,
2474 * if specified by user. 2474 * if specified by user.
2475 *=====================================================================*/ 2475 *=====================================================================*/
2476 2476
@@ -2492,7 +2492,7 @@ static int wanpipe_connect(struct socket *sock, struct sockaddr *uaddr, int addr
2492 return -ECONNREFUSED; 2492 return -ECONNREFUSED;
2493 } 2493 }
2494 2494
2495 sk->sk_state = WANSOCK_DISCONNECTED; 2495 sk->sk_state = WANSOCK_DISCONNECTED;
2496 sock->state = SS_UNCONNECTED; 2496 sock->state = SS_UNCONNECTED;
2497 2497
2498 if (addr_len != sizeof(struct wan_sockaddr_ll)) 2498 if (addr_len != sizeof(struct wan_sockaddr_ll))
@@ -2505,7 +2505,7 @@ static int wanpipe_connect(struct socket *sock, struct sockaddr *uaddr, int addr
2505 return -ENETUNREACH; 2505 return -ENETUNREACH;
2506 2506
2507 dev_put(dev); 2507 dev_put(dev);
2508 2508
2509 if (!sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ 2509 if (!sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
2510 return -EINVAL; 2510 return -EINVAL;
2511 2511
@@ -2534,7 +2534,7 @@ static int wanpipe_connect(struct socket *sock, struct sockaddr *uaddr, int addr
2534 2534
2535 if (sk->sk_state != WANSOCK_CONNECTED) { 2535 if (sk->sk_state != WANSOCK_CONNECTED) {
2536 sock->state = SS_UNCONNECTED; 2536 sock->state = SS_UNCONNECTED;
2537 return -ECONNREFUSED; 2537 return -ECONNREFUSED;
2538 } 2538 }
2539 2539
2540 sock->state = SS_CONNECTED; 2540 sock->state = SS_CONNECTED;
@@ -2549,10 +2549,10 @@ const struct proto_ops wanpipe_ops = {
2549 .connect = wanpipe_connect, 2549 .connect = wanpipe_connect,
2550 .socketpair = sock_no_socketpair, 2550 .socketpair = sock_no_socketpair,
2551 .accept = wanpipe_accept, 2551 .accept = wanpipe_accept,
2552 .getname = wanpipe_getname, 2552 .getname = wanpipe_getname,
2553 .poll = wanpipe_poll, 2553 .poll = wanpipe_poll,
2554 .ioctl = wanpipe_ioctl, 2554 .ioctl = wanpipe_ioctl,
2555 .listen = wanpipe_listen, 2555 .listen = wanpipe_listen,
2556 .shutdown = sock_no_shutdown, 2556 .shutdown = sock_no_shutdown,
2557 .setsockopt = sock_no_setsockopt, 2557 .setsockopt = sock_no_setsockopt,
2558 .getsockopt = sock_no_getsockopt, 2558 .getsockopt = sock_no_getsockopt,
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 4d90a179aeda..5d2d93dc0837 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -314,10 +314,10 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
314 cnt += 1; 314 cnt += 1;
315 break; 315 break;
316 316
317 case NLPID_SNAP: /* SNAP encapsulation */ 317 case NLPID_SNAP: /* SNAP encapsulation */
318 if (memcmp(&skb->data[cnt + 1], wanrouter_oui_ether, 318 if (memcmp(&skb->data[cnt + 1], wanrouter_oui_ether,
319 sizeof(wanrouter_oui_ether))){ 319 sizeof(wanrouter_oui_ether))){
320 printk(KERN_INFO 320 printk(KERN_INFO
321 "%s: unsupported SNAP OUI %02X-%02X-%02X " 321 "%s: unsupported SNAP OUI %02X-%02X-%02X "
322 "on interface %s!\n", wanrouter_modname, 322 "on interface %s!\n", wanrouter_modname,
323 skb->data[cnt+1], skb->data[cnt+2], 323 skb->data[cnt+1], skb->data[cnt+2],
@@ -450,7 +450,7 @@ static int wanrouter_device_setup(struct wan_device *wandev,
450 kfree(conf); 450 kfree(conf);
451 printk(KERN_INFO "%s: ERROR, Invalid MAGIC Number\n", 451 printk(KERN_INFO "%s: ERROR, Invalid MAGIC Number\n",
452 wandev->name); 452 wandev->name);
453 return -EINVAL; 453 return -EINVAL;
454 } 454 }
455 455
456 if (conf->data_size && conf->data) { 456 if (conf->data_size && conf->data) {
@@ -459,13 +459,13 @@ static int wanrouter_device_setup(struct wan_device *wandev,
459 "%s: ERROR, Invalid firmware data size %i !\n", 459 "%s: ERROR, Invalid firmware data size %i !\n",
460 wandev->name, conf->data_size); 460 wandev->name, conf->data_size);
461 kfree(conf); 461 kfree(conf);
462 return -EINVAL; 462 return -EINVAL;
463 } 463 }
464 464
465 data = vmalloc(conf->data_size); 465 data = vmalloc(conf->data_size);
466 if (!data) { 466 if (!data) {
467 printk(KERN_INFO 467 printk(KERN_INFO
468 "%s: ERROR, Faild allocate kernel memory !\n", 468 "%s: ERROR, Faild allocate kernel memory !\n",
469 wandev->name); 469 wandev->name);
470 kfree(conf); 470 kfree(conf);
471 return -ENOBUFS; 471 return -ENOBUFS;
@@ -686,7 +686,7 @@ out:
686static int wanrouter_device_del_if(struct wan_device *wandev, char __user *u_name) 686static int wanrouter_device_del_if(struct wan_device *wandev, char __user *u_name)
687{ 687{
688 char name[WAN_IFNAME_SZ + 1]; 688 char name[WAN_IFNAME_SZ + 1];
689 int err = 0; 689 int err = 0;
690 690
691 if (wandev->state == WAN_UNCONFIGURED) 691 if (wandev->state == WAN_UNCONFIGURED)
692 return -ENODEV; 692 return -ENODEV;
@@ -709,8 +709,8 @@ static int wanrouter_device_del_if(struct wan_device *wandev, char __user *u_nam
709 * interfaces are deleted and the link irrecoverably disconnected. 709 * interfaces are deleted and the link irrecoverably disconnected.
710 */ 710 */
711 711
712 if (!wandev->ndev && wandev->shutdown) 712 if (!wandev->ndev && wandev->shutdown)
713 err = wandev->shutdown(wandev); 713 err = wandev->shutdown(wandev);
714 714
715 return err; 715 return err;
716} 716}
@@ -804,7 +804,7 @@ static int wanrouter_delete_interface(struct wan_device *wandev, char *name)
804 804
805static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) 805static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
806{ 806{
807 spin_lock_irqsave(lock, *smp_flags); 807 spin_lock_irqsave(lock, *smp_flags);
808} 808}
809 809
810 810
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index 930ea59463ad..abce82873604 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -37,10 +37,10 @@
37 37
38#define PROT_DECODE(prot) ((prot == WANCONFIG_FR) ? " FR" :\ 38#define PROT_DECODE(prot) ((prot == WANCONFIG_FR) ? " FR" :\
39 (prot == WANCONFIG_X25) ? " X25" : \ 39 (prot == WANCONFIG_X25) ? " X25" : \
40 (prot == WANCONFIG_PPP) ? " PPP" : \ 40 (prot == WANCONFIG_PPP) ? " PPP" : \
41 (prot == WANCONFIG_CHDLC) ? " CHDLC": \ 41 (prot == WANCONFIG_CHDLC) ? " CHDLC": \
42 (prot == WANCONFIG_MPPP) ? " MPPP" : \ 42 (prot == WANCONFIG_MPPP) ? " MPPP" : \
43 " Unknown" ) 43 " Unknown" )
44 44
45/****** Function Prototypes *************************************************/ 45/****** Function Prototypes *************************************************/
46 46
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index b37d894358ec..e62ba41b05c5 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, 4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally 5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work. 6 * screw up. It might even work.
7 * 7 *
8 * This code REQUIRES 2.1.15 or higher 8 * This code REQUIRES 2.1.15 or higher
9 * 9 *
@@ -18,11 +18,11 @@
18 * X.25 002 Jonathan Naylor Centralised disconnect handling. 18 * X.25 002 Jonathan Naylor Centralised disconnect handling.
19 * New timer architecture. 19 * New timer architecture.
20 * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant. 20 * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
21 * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of 21 * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
22 * facilities negotiation and increased 22 * facilities negotiation and increased
23 * the throughput upper limit. 23 * the throughput upper limit.
24 * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups 24 * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
25 * 2000-09-04 Henner Eisen Set sock->state in x25_accept(). 25 * 2000-09-04 Henner Eisen Set sock->state in x25_accept().
26 * Fixed x25_output() related skb leakage. 26 * Fixed x25_output() related skb leakage.
27 * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket. 27 * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
28 * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation. 28 * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
@@ -256,8 +256,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
256 * call user data vs this sockets call user data 256 * call user data vs this sockets call user data
257 */ 257 */
258 if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { 258 if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) {
259 if((memcmp(x25_sk(s)->calluserdata.cuddata, 259 if((memcmp(x25_sk(s)->calluserdata.cuddata,
260 skb->data, 260 skb->data,
261 x25_sk(s)->cudmatchlength)) == 0) { 261 x25_sk(s)->cudmatchlength)) == 0) {
262 sock_hold(s); 262 sock_hold(s);
263 goto found; 263 goto found;
@@ -421,7 +421,7 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
421{ 421{
422 struct sock *sk = sock->sk; 422 struct sock *sk = sock->sk;
423 int val, len, rc = -ENOPROTOOPT; 423 int val, len, rc = -ENOPROTOOPT;
424 424
425 if (level != SOL_X25 || optname != X25_QBITINCL) 425 if (level != SOL_X25 || optname != X25_QBITINCL)
426 goto out; 426 goto out;
427 427
@@ -434,7 +434,7 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
434 rc = -EINVAL; 434 rc = -EINVAL;
435 if (len < 0) 435 if (len < 0)
436 goto out; 436 goto out;
437 437
438 rc = -EFAULT; 438 rc = -EFAULT;
439 if (put_user(len, optlen)) 439 if (put_user(len, optlen))
440 goto out; 440 goto out;
@@ -523,12 +523,12 @@ static int x25_create(struct socket *sock, int protocol)
523 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; 523 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
524 x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; 524 x25->facilities.throughput = X25_DEFAULT_THROUGHPUT;
525 x25->facilities.reverse = X25_DEFAULT_REVERSE; 525 x25->facilities.reverse = X25_DEFAULT_REVERSE;
526 x25->dte_facilities.calling_len = 0; 526 x25->dte_facilities.calling_len = 0;
527 x25->dte_facilities.called_len = 0; 527 x25->dte_facilities.called_len = 0;
528 memset(x25->dte_facilities.called_ae, '\0', 528 memset(x25->dte_facilities.called_ae, '\0',
529 sizeof(x25->dte_facilities.called_ae)); 529 sizeof(x25->dte_facilities.called_ae));
530 memset(x25->dte_facilities.calling_ae, '\0', 530 memset(x25->dte_facilities.calling_ae, '\0',
531 sizeof(x25->dte_facilities.calling_ae)); 531 sizeof(x25->dte_facilities.calling_ae));
532 532
533 rc = 0; 533 rc = 0;
534out: 534out:
@@ -608,7 +608,7 @@ static int x25_release(struct socket *sock)
608 break; 608 break;
609 } 609 }
610 610
611 sock->sk = NULL; 611 sock->sk = NULL;
612 sk->sk_socket = NULL; /* Not used, but we should do this */ 612 sk->sk_socket = NULL; /* Not used, but we should do this */
613out: 613out:
614 return 0; 614 return 0;
@@ -635,7 +635,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
635static int x25_wait_for_connection_establishment(struct sock *sk) 635static int x25_wait_for_connection_establishment(struct sock *sk)
636{ 636{
637 DECLARE_WAITQUEUE(wait, current); 637 DECLARE_WAITQUEUE(wait, current);
638 int rc; 638 int rc;
639 639
640 add_wait_queue_exclusive(sk->sk_sleep, &wait); 640 add_wait_queue_exclusive(sk->sk_sleep, &wait);
641 for (;;) { 641 for (;;) {
@@ -686,7 +686,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
686 if (sk->sk_state == TCP_ESTABLISHED) 686 if (sk->sk_state == TCP_ESTABLISHED)
687 goto out; 687 goto out;
688 688
689 sk->sk_state = TCP_CLOSE; 689 sk->sk_state = TCP_CLOSE;
690 sock->state = SS_UNCONNECTED; 690 sock->state = SS_UNCONNECTED;
691 691
692 rc = -EINVAL; 692 rc = -EINVAL;
@@ -778,7 +778,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
778 remove_wait_queue(sk->sk_sleep, &wait); 778 remove_wait_queue(sk->sk_sleep, &wait);
779 return rc; 779 return rc;
780} 780}
781 781
782static int x25_accept(struct socket *sock, struct socket *newsock, int flags) 782static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
783{ 783{
784 struct sock *sk = sock->sk; 784 struct sock *sk = sock->sk;
@@ -837,7 +837,7 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
837 837
838 return 0; 838 return 0;
839} 839}
840 840
841int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, 841int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
842 unsigned int lci) 842 unsigned int lci)
843{ 843{
@@ -1120,7 +1120,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
1120 if (msg->msg_flags & MSG_OOB) 1120 if (msg->msg_flags & MSG_OOB)
1121 skb_queue_tail(&x25->interrupt_out_queue, skb); 1121 skb_queue_tail(&x25->interrupt_out_queue, skb);
1122 else { 1122 else {
1123 len = x25_output(sk, skb); 1123 len = x25_output(sk, skb);
1124 if (len < 0) 1124 if (len < 0)
1125 kfree_skb(skb); 1125 kfree_skb(skb);
1126 else if (x25->qbitincl) 1126 else if (x25->qbitincl)
@@ -1219,7 +1219,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1219 msg->msg_flags |= MSG_TRUNC; 1219 msg->msg_flags |= MSG_TRUNC;
1220 } 1220 }
1221 1221
1222 /* Currently, each datagram always contains a complete record */ 1222 /* Currently, each datagram always contains a complete record */
1223 msg->msg_flags |= MSG_EOR; 1223 msg->msg_flags |= MSG_EOR;
1224 1224
1225 rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1225 rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
@@ -1277,8 +1277,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1277 case SIOCGSTAMP: 1277 case SIOCGSTAMP:
1278 rc = -EINVAL; 1278 rc = -EINVAL;
1279 if (sk) 1279 if (sk)
1280 rc = sock_get_timestamp(sk, 1280 rc = sock_get_timestamp(sk,
1281 (struct timeval __user *)argp); 1281 (struct timeval __user *)argp);
1282 break; 1282 break;
1283 case SIOCGIFADDR: 1283 case SIOCGIFADDR:
1284 case SIOCSIFADDR: 1284 case SIOCSIFADDR:
@@ -1346,17 +1346,17 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1346 } 1346 }
1347 1347
1348 case SIOCX25GDTEFACILITIES: { 1348 case SIOCX25GDTEFACILITIES: {
1349 rc = copy_to_user(argp, &x25->dte_facilities, 1349 rc = copy_to_user(argp, &x25->dte_facilities,
1350 sizeof(x25->dte_facilities)); 1350 sizeof(x25->dte_facilities));
1351 if (rc) 1351 if (rc)
1352 rc = -EFAULT; 1352 rc = -EFAULT;
1353 break; 1353 break;
1354 } 1354 }
1355 1355
1356 case SIOCX25SDTEFACILITIES: { 1356 case SIOCX25SDTEFACILITIES: {
1357 struct x25_dte_facilities dtefacs; 1357 struct x25_dte_facilities dtefacs;
1358 rc = -EFAULT; 1358 rc = -EFAULT;
1359 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) 1359 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
1360 break; 1360 break;
1361 rc = -EINVAL; 1361 rc = -EINVAL;
1362 if (sk->sk_state != TCP_LISTEN && 1362 if (sk->sk_state != TCP_LISTEN &&
@@ -1414,7 +1414,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1414 if (copy_from_user(&sub_addr, argp, 1414 if (copy_from_user(&sub_addr, argp,
1415 sizeof(sub_addr))) 1415 sizeof(sub_addr)))
1416 break; 1416 break;
1417 rc = -EINVAL; 1417 rc = -EINVAL;
1418 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) 1418 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
1419 break; 1419 break;
1420 x25->cudmatchlength = sub_addr.cudmatchlength; 1420 x25->cudmatchlength = sub_addr.cudmatchlength;
@@ -1443,7 +1443,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1443 break; 1443 break;
1444 } 1444 }
1445 1445
1446 default: 1446 default:
1447 rc = -ENOIOCTLCMD; 1447 rc = -ENOIOCTLCMD;
1448 break; 1448 break;
1449 } 1449 }
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index 2b2e7fd689f3..5f631061c229 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -18,7 +18,7 @@ static int max_timer[] = { 300 * HZ };
18static struct ctl_table_header *x25_table_header; 18static struct ctl_table_header *x25_table_header;
19 19
20static struct ctl_table x25_table[] = { 20static struct ctl_table x25_table[] = {
21 { 21 {
22 .ctl_name = NET_X25_RESTART_REQUEST_TIMEOUT, 22 .ctl_name = NET_X25_RESTART_REQUEST_TIMEOUT,
23 .procname = "restart_request_timeout", 23 .procname = "restart_request_timeout",
24 .data = &sysctl_x25_restart_request_timeout, 24 .data = &sysctl_x25_restart_request_timeout,
@@ -29,7 +29,7 @@ static struct ctl_table x25_table[] = {
29 .extra1 = &min_timer, 29 .extra1 = &min_timer,
30 .extra2 = &max_timer, 30 .extra2 = &max_timer,
31 }, 31 },
32 { 32 {
33 .ctl_name = NET_X25_CALL_REQUEST_TIMEOUT, 33 .ctl_name = NET_X25_CALL_REQUEST_TIMEOUT,
34 .procname = "call_request_timeout", 34 .procname = "call_request_timeout",
35 .data = &sysctl_x25_call_request_timeout, 35 .data = &sysctl_x25_call_request_timeout,
@@ -40,7 +40,7 @@ static struct ctl_table x25_table[] = {
40 .extra1 = &min_timer, 40 .extra1 = &min_timer,
41 .extra2 = &max_timer, 41 .extra2 = &max_timer,
42 }, 42 },
43 { 43 {
44 .ctl_name = NET_X25_RESET_REQUEST_TIMEOUT, 44 .ctl_name = NET_X25_RESET_REQUEST_TIMEOUT,
45 .procname = "reset_request_timeout", 45 .procname = "reset_request_timeout",
46 .data = &sysctl_x25_reset_request_timeout, 46 .data = &sysctl_x25_reset_request_timeout,
@@ -51,7 +51,7 @@ static struct ctl_table x25_table[] = {
51 .extra1 = &min_timer, 51 .extra1 = &min_timer,
52 .extra2 = &max_timer, 52 .extra2 = &max_timer,
53 }, 53 },
54 { 54 {
55 .ctl_name = NET_X25_CLEAR_REQUEST_TIMEOUT, 55 .ctl_name = NET_X25_CLEAR_REQUEST_TIMEOUT,
56 .procname = "clear_request_timeout", 56 .procname = "clear_request_timeout",
57 .data = &sysctl_x25_clear_request_timeout, 57 .data = &sysctl_x25_clear_request_timeout,
@@ -62,7 +62,7 @@ static struct ctl_table x25_table[] = {
62 .extra1 = &min_timer, 62 .extra1 = &min_timer,
63 .extra2 = &max_timer, 63 .extra2 = &max_timer,
64 }, 64 },
65 { 65 {
66 .ctl_name = NET_X25_ACK_HOLD_BACK_TIMEOUT, 66 .ctl_name = NET_X25_ACK_HOLD_BACK_TIMEOUT,
67 .procname = "acknowledgement_hold_back_timeout", 67 .procname = "acknowledgement_hold_back_timeout",
68 .data = &sysctl_x25_ack_holdback_timeout, 68 .data = &sysctl_x25_ack_holdback_timeout,
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index f099fd6a7c0e..c7221de98a95 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * X.25 Packet Layer release 002 2 * X.25 Packet Layer release 002
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, randomly fail to work with new 4 * This is ALPHA test software. This code may break your machine, randomly fail to work with new
5 * releases, misbehave and/or generally screw up. It might even work. 5 * releases, misbehave and/or generally screw up. It might even work.
6 * 6 *
7 * This code REQUIRES 2.1.15 or higher 7 * This code REQUIRES 2.1.15 or higher
8 * 8 *
@@ -31,7 +31,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
31 unsigned int lci; 31 unsigned int lci;
32 32
33 frametype = skb->data[2]; 33 frametype = skb->data[2];
34 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 34 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
35 35
36 /* 36 /*
37 * LCI of zero is always for us, and its always a link control 37 * LCI of zero is always for us, and its always a link control
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 27f5cc7966f6..dec404afa113 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, 4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally 5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work. 6 * screw up. It might even work.
7 * 7 *
8 * This code REQUIRES 2.1.15 or higher 8 * This code REQUIRES 2.1.15 or higher
9 * 9 *
@@ -15,7 +15,7 @@
15 * 15 *
16 * History 16 * History
17 * X.25 001 Split from x25_subr.c 17 * X.25 001 Split from x25_subr.c
18 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities 18 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
19 * negotiation. 19 * negotiation.
20 * apr/14/05 Shaun Pereira - Allow fast select with no restriction 20 * apr/14/05 Shaun Pereira - Allow fast select with no restriction
21 * on response. 21 * on response.
@@ -125,8 +125,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
125 break; 125 break;
126 case X25_FAC_CLASS_D: 126 case X25_FAC_CLASS_D:
127 switch (*p) { 127 switch (*p) {
128 case X25_FAC_CALLING_AE: 128 case X25_FAC_CALLING_AE:
129 if (p[1] > X25_MAX_DTE_FACIL_LEN) 129 if (p[1] > X25_MAX_DTE_FACIL_LEN)
130 break; 130 break;
131 dte_facs->calling_len = p[2]; 131 dte_facs->calling_len = p[2];
132 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); 132 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
@@ -293,7 +293,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
293} 293}
294 294
295/* 295/*
296 * Limit values of certain facilities according to the capability of the 296 * Limit values of certain facilities according to the capability of the
297 * currently attached x25 link. 297 * currently attached x25 link.
298 */ 298 */
299void x25_limit_facilities(struct x25_facilities *facilities, 299void x25_limit_facilities(struct x25_facilities *facilities,
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index eed50e10f09b..c5239fcdefa0 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, 4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally 5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work. 6 * screw up. It might even work.
7 * 7 *
8 * This code REQUIRES 2.1.15 or higher 8 * This code REQUIRES 2.1.15 or higher
9 * 9 *
@@ -17,7 +17,7 @@
17 * X.25 001 Jonathan Naylor Started coding. 17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor Centralised disconnection code. 18 * X.25 002 Jonathan Naylor Centralised disconnection code.
19 * New timer architecture. 19 * New timer architecture.
20 * 2000-03-20 Daniela Squassoni Disabling/enabling of facilities 20 * 2000-03-20 Daniela Squassoni Disabling/enabling of facilities
21 * negotiation. 21 * negotiation.
22 * 2000-11-10 Henner Eisen Check and reset for out-of-sequence 22 * 2000-11-10 Henner Eisen Check and reset for out-of-sequence
23 * i-frames. 23 * i-frames.
@@ -67,7 +67,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
67 kfree_skb(skbo); 67 kfree_skb(skbo);
68 } 68 }
69 69
70 x25->fraglen = 0; 70 x25->fraglen = 0;
71 } 71 }
72 72
73 skb_set_owner_r(skbn, sk); 73 skb_set_owner_r(skbn, sk);
@@ -167,7 +167,7 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
167 int queued = 0; 167 int queued = 0;
168 int modulus; 168 int modulus;
169 struct x25_sock *x25 = x25_sk(sk); 169 struct x25_sock *x25 = x25_sk(sk);
170 170
171 modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS; 171 modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
172 172
173 switch (frametype) { 173 switch (frametype) {
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 0a760fe66843..741ce95d4ad1 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, 4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally 5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work. 6 * screw up. It might even work.
7 * 7 *
8 * This code REQUIRES 2.1.15 or higher 8 * This code REQUIRES 2.1.15 or higher
9 * 9 *
@@ -16,7 +16,7 @@
16 * History 16 * History
17 * X.25 001 Jonathan Naylor Started coding. 17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor New timer architecture. 18 * X.25 002 Jonathan Naylor New timer architecture.
19 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities 19 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
20 * negotiation. 20 * negotiation.
21 * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh. 21 * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
22 */ 22 */
@@ -94,7 +94,7 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
94 skb->data[3], skb->data[4], 94 skb->data[3], skb->data[4],
95 skb->data[5], skb->data[6]); 95 skb->data[5], skb->data[6]);
96 break; 96 break;
97 97
98 default: 98 default:
99 printk(KERN_WARNING "x25: received unknown %02X " 99 printk(KERN_WARNING "x25: received unknown %02X "
100 "with LCI 000\n", frametype); 100 "with LCI 000\n", frametype);
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c
index a2e62cea819a..6f5737853912 100644
--- a/net/x25/x25_out.c
+++ b/net/x25/x25_out.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, 4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally 5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work. 6 * screw up. It might even work.
7 * 7 *
8 * This code REQUIRES 2.1.15 or higher 8 * This code REQUIRES 2.1.15 or higher
9 * 9 *
@@ -78,7 +78,7 @@ int x25_output(struct sock *sk, struct sk_buff *skb)
78 "sent\n", err, sent); 78 "sent\n", err, sent);
79 return err; 79 return err;
80 } 80 }
81 81
82 skb_reserve(skbn, frontlen); 82 skb_reserve(skbn, frontlen);
83 83
84 len = max_len > skb->len ? skb->len : max_len; 84 len = max_len > skb->len ? skb->len : max_len;
@@ -101,7 +101,7 @@ int x25_output(struct sock *sk, struct sk_buff *skb)
101 skb_queue_tail(&sk->sk_write_queue, skbn); 101 skb_queue_tail(&sk->sk_write_queue, skbn);
102 sent += len; 102 sent += len;
103 } 103 }
104 104
105 kfree_skb(skb); 105 kfree_skb(skb);
106 } else { 106 } else {
107 skb_queue_tail(&sk->sk_write_queue, skb); 107 skb_queue_tail(&sk->sk_write_queue, skb);
@@ -110,7 +110,7 @@ int x25_output(struct sock *sk, struct sk_buff *skb)
110 return sent; 110 return sent;
111} 111}
112 112
113/* 113/*
114 * This procedure is passed a buffer descriptor for an iframe. It builds 114 * This procedure is passed a buffer descriptor for an iframe. It builds
115 * the rest of the control part of the frame and then writes it out. 115 * the rest of the control part of the frame and then writes it out.
116 */ 116 */
@@ -131,7 +131,7 @@ static void x25_send_iframe(struct sock *sk, struct sk_buff *skb)
131 skb->data[2] |= (x25->vr << 5) & 0xE0; 131 skb->data[2] |= (x25->vr << 5) & 0xE0;
132 } 132 }
133 133
134 x25_transmit_link(skb, x25->neighbour); 134 x25_transmit_link(skb, x25->neighbour);
135} 135}
136 136
137void x25_kick(struct sock *sk) 137void x25_kick(struct sock *sk)
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index e0470bd8c2f9..3c9f1ba56221 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, 4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally 5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work. 6 * screw up. It might even work.
7 * 7 *
8 * This code REQUIRES 2.4 with seq_file support 8 * This code REQUIRES 2.4 with seq_file support
9 * 9 *
@@ -62,7 +62,7 @@ static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
62 rt = v; 62 rt = v;
63 if (rt->node.next != &x25_route_list) 63 if (rt->node.next != &x25_route_list)
64 rt = list_entry(rt->node.next, struct x25_route, node); 64 rt = list_entry(rt->node.next, struct x25_route, node);
65 else 65 else
66 rt = NULL; 66 rt = NULL;
67out: 67out:
68 return rt; 68 return rt;
@@ -88,7 +88,7 @@ static int x25_seq_route_show(struct seq_file *seq, void *v)
88 rt->dev ? rt->dev->name : "???"); 88 rt->dev ? rt->dev->name : "???");
89out: 89out:
90 return 0; 90 return 0;
91} 91}
92 92
93static __inline__ struct sock *x25_get_socket_idx(loff_t pos) 93static __inline__ struct sock *x25_get_socket_idx(loff_t pos)
94{ 94{
@@ -163,7 +163,7 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v)
163 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 163 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
164out: 164out:
165 return 0; 165 return 0;
166} 166}
167 167
168static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos) 168static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
169{ 169{
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 883a848bca5b..060fcfaa2f47 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, 4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally 5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work. 6 * screw up. It might even work.
7 * 7 *
8 * This code REQUIRES 2.1.15 or higher 8 * This code REQUIRES 2.1.15 or higher
9 * 9 *
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
index 71ff3088f6fe..2af190dc5b01 100644
--- a/net/x25/x25_timer.c
+++ b/net/x25/x25_timer.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This is ALPHA test software. This code may break your machine, 4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally 5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work. 6 * screw up. It might even work.
7 * 7 *
8 * This code REQUIRES 2.1.15 or higher 8 * This code REQUIRES 2.1.15 or higher
9 * 9 *
@@ -99,8 +99,8 @@ static void x25_heartbeat_expiry(unsigned long param)
99{ 99{
100 struct sock *sk = (struct sock *)param; 100 struct sock *sk = (struct sock *)param;
101 101
102 bh_lock_sock(sk); 102 bh_lock_sock(sk);
103 if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */ 103 if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */
104 goto restart_heartbeat; 104 goto restart_heartbeat;
105 105
106 switch (x25_sk(sk)->state) { 106 switch (x25_sk(sk)->state) {
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 248f94814dfb..f373a8a7d9c8 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * xfrm algorithm interface 2 * xfrm algorithm interface
3 * 3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option) 8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version. 9 * any later version.
10 */ 10 */
11 11
@@ -32,14 +32,14 @@ static struct xfrm_algo_desc aalg_list[] = {
32{ 32{
33 .name = "hmac(digest_null)", 33 .name = "hmac(digest_null)",
34 .compat = "digest_null", 34 .compat = "digest_null",
35 35
36 .uinfo = { 36 .uinfo = {
37 .auth = { 37 .auth = {
38 .icv_truncbits = 0, 38 .icv_truncbits = 0,
39 .icv_fullbits = 0, 39 .icv_fullbits = 0,
40 } 40 }
41 }, 41 },
42 42
43 .desc = { 43 .desc = {
44 .sadb_alg_id = SADB_X_AALG_NULL, 44 .sadb_alg_id = SADB_X_AALG_NULL,
45 .sadb_alg_ivlen = 0, 45 .sadb_alg_ivlen = 0,
@@ -57,7 +57,7 @@ static struct xfrm_algo_desc aalg_list[] = {
57 .icv_fullbits = 128, 57 .icv_fullbits = 128,
58 } 58 }
59 }, 59 },
60 60
61 .desc = { 61 .desc = {
62 .sadb_alg_id = SADB_AALG_MD5HMAC, 62 .sadb_alg_id = SADB_AALG_MD5HMAC,
63 .sadb_alg_ivlen = 0, 63 .sadb_alg_ivlen = 0,
@@ -142,14 +142,14 @@ static struct xfrm_algo_desc ealg_list[] = {
142{ 142{
143 .name = "ecb(cipher_null)", 143 .name = "ecb(cipher_null)",
144 .compat = "cipher_null", 144 .compat = "cipher_null",
145 145
146 .uinfo = { 146 .uinfo = {
147 .encr = { 147 .encr = {
148 .blockbits = 8, 148 .blockbits = 8,
149 .defkeybits = 0, 149 .defkeybits = 0,
150 } 150 }
151 }, 151 },
152 152
153 .desc = { 153 .desc = {
154 .sadb_alg_id = SADB_EALG_NULL, 154 .sadb_alg_id = SADB_EALG_NULL,
155 .sadb_alg_ivlen = 0, 155 .sadb_alg_ivlen = 0,
@@ -248,22 +248,22 @@ static struct xfrm_algo_desc ealg_list[] = {
248 } 248 }
249}, 249},
250{ 250{
251 .name = "cbc(serpent)", 251 .name = "cbc(serpent)",
252 .compat = "serpent", 252 .compat = "serpent",
253 253
254 .uinfo = { 254 .uinfo = {
255 .encr = { 255 .encr = {
256 .blockbits = 128, 256 .blockbits = 128,
257 .defkeybits = 128, 257 .defkeybits = 128,
258 } 258 }
259 }, 259 },
260 260
261 .desc = { 261 .desc = {
262 .sadb_alg_id = SADB_X_EALG_SERPENTCBC, 262 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
263 .sadb_alg_ivlen = 8, 263 .sadb_alg_ivlen = 8,
264 .sadb_alg_minbits = 128, 264 .sadb_alg_minbits = 128,
265 .sadb_alg_maxbits = 256, 265 .sadb_alg_maxbits = 256,
266 } 266 }
267}, 267},
268{ 268{
269 .name = "cbc(camellia)", 269 .name = "cbc(camellia)",
@@ -283,22 +283,22 @@ static struct xfrm_algo_desc ealg_list[] = {
283 } 283 }
284}, 284},
285{ 285{
286 .name = "cbc(twofish)", 286 .name = "cbc(twofish)",
287 .compat = "twofish", 287 .compat = "twofish",
288 288
289 .uinfo = { 289 .uinfo = {
290 .encr = { 290 .encr = {
291 .blockbits = 128, 291 .blockbits = 128,
292 .defkeybits = 128, 292 .defkeybits = 128,
293 } 293 }
294 }, 294 },
295 295
296 .desc = { 296 .desc = {
297 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC, 297 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
298 .sadb_alg_ivlen = 8, 298 .sadb_alg_ivlen = 8,
299 .sadb_alg_minbits = 128, 299 .sadb_alg_minbits = 128,
300 .sadb_alg_maxbits = 256 300 .sadb_alg_maxbits = 256
301 } 301 }
302}, 302},
303}; 303};
304 304
@@ -478,7 +478,7 @@ void xfrm_probe_algs(void)
478{ 478{
479#ifdef CONFIG_CRYPTO 479#ifdef CONFIG_CRYPTO
480 int i, status; 480 int i, status;
481 481
482 BUG_ON(in_softirq()); 482 BUG_ON(in_softirq());
483 483
484 for (i = 0; i < aalg_entries(); i++) { 484 for (i = 0; i < aalg_entries(); i++) {
@@ -487,14 +487,14 @@ void xfrm_probe_algs(void)
487 if (aalg_list[i].available != status) 487 if (aalg_list[i].available != status)
488 aalg_list[i].available = status; 488 aalg_list[i].available = status;
489 } 489 }
490 490
491 for (i = 0; i < ealg_entries(); i++) { 491 for (i = 0; i < ealg_entries(); i++) {
492 status = crypto_has_blkcipher(ealg_list[i].name, 0, 492 status = crypto_has_blkcipher(ealg_list[i].name, 0,
493 CRYPTO_ALG_ASYNC); 493 CRYPTO_ALG_ASYNC);
494 if (ealg_list[i].available != status) 494 if (ealg_list[i].available != status)
495 ealg_list[i].available = status; 495 ealg_list[i].available = status;
496 } 496 }
497 497
498 for (i = 0; i < calg_entries(); i++) { 498 for (i = 0; i < calg_entries(); i++) {
499 status = crypto_has_comp(calg_list[i].name, 0, 499 status = crypto_has_comp(calg_list[i].name, 0,
500 CRYPTO_ALG_ASYNC); 500 CRYPTO_ALG_ASYNC);
@@ -541,15 +541,15 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
541 if (copy > 0) { 541 if (copy > 0) {
542 if (copy > len) 542 if (copy > len)
543 copy = len; 543 copy = len;
544 544
545 sg.page = virt_to_page(skb->data + offset); 545 sg.page = virt_to_page(skb->data + offset);
546 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 546 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
547 sg.length = copy; 547 sg.length = copy;
548 548
549 err = icv_update(desc, &sg, copy); 549 err = icv_update(desc, &sg, copy);
550 if (unlikely(err)) 550 if (unlikely(err))
551 return err; 551 return err;
552 552
553 if ((len -= copy) == 0) 553 if ((len -= copy) == 0)
554 return 0; 554 return 0;
555 offset += copy; 555 offset += copy;
@@ -566,11 +566,11 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
566 566
567 if (copy > len) 567 if (copy > len)
568 copy = len; 568 copy = len;
569 569
570 sg.page = frag->page; 570 sg.page = frag->page;
571 sg.offset = frag->page_offset + offset-start; 571 sg.offset = frag->page_offset + offset-start;
572 sg.length = copy; 572 sg.length = copy;
573 573
574 err = icv_update(desc, &sg, copy); 574 err = icv_update(desc, &sg, copy);
575 if (unlikely(err)) 575 if (unlikely(err))
576 return err; 576 return err;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 414f89070380..ee15bdae1419 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -4,7 +4,7 @@
4 * Changes: 4 * Changes:
5 * YOSHIFUJI Hideaki @USAGI 5 * YOSHIFUJI Hideaki @USAGI
6 * Split up af-specific portion 6 * Split up af-specific portion
7 * 7 *
8 */ 8 */
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index fa7ce060b454..a24f38510719 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * xfrm_policy.c 2 * xfrm_policy.c
3 * 3 *
4 * Changes: 4 * Changes:
@@ -151,7 +151,7 @@ retry:
151 return type; 151 return type;
152} 152}
153 153
154int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, 154int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
155 unsigned short family) 155 unsigned short family)
156{ 156{
157 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 157 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -262,7 +262,7 @@ static inline unsigned long make_jiffies(long secs)
262 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 262 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
263 return MAX_SCHEDULE_TIMEOUT-1; 263 return MAX_SCHEDULE_TIMEOUT-1;
264 else 264 else
265 return secs*HZ; 265 return secs*HZ;
266} 266}
267 267
268static void xfrm_policy_timer(unsigned long data) 268static void xfrm_policy_timer(unsigned long data)
@@ -1024,17 +1024,17 @@ end:
1024static inline int policy_to_flow_dir(int dir) 1024static inline int policy_to_flow_dir(int dir)
1025{ 1025{
1026 if (XFRM_POLICY_IN == FLOW_DIR_IN && 1026 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1027 XFRM_POLICY_OUT == FLOW_DIR_OUT && 1027 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1028 XFRM_POLICY_FWD == FLOW_DIR_FWD) 1028 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1029 return dir; 1029 return dir;
1030 switch (dir) { 1030 switch (dir) {
1031 default: 1031 default:
1032 case XFRM_POLICY_IN: 1032 case XFRM_POLICY_IN:
1033 return FLOW_DIR_IN; 1033 return FLOW_DIR_IN;
1034 case XFRM_POLICY_OUT: 1034 case XFRM_POLICY_OUT:
1035 return FLOW_DIR_OUT; 1035 return FLOW_DIR_OUT;
1036 case XFRM_POLICY_FWD: 1036 case XFRM_POLICY_FWD:
1037 return FLOW_DIR_FWD; 1037 return FLOW_DIR_FWD;
1038 }; 1038 };
1039} 1039}
1040 1040
@@ -1044,9 +1044,9 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
1044 1044
1045 read_lock_bh(&xfrm_policy_lock); 1045 read_lock_bh(&xfrm_policy_lock);
1046 if ((pol = sk->sk_policy[dir]) != NULL) { 1046 if ((pol = sk->sk_policy[dir]) != NULL) {
1047 int match = xfrm_selector_match(&pol->selector, fl, 1047 int match = xfrm_selector_match(&pol->selector, fl,
1048 sk->sk_family); 1048 sk->sk_family);
1049 int err = 0; 1049 int err = 0;
1050 1050
1051 if (match) { 1051 if (match) {
1052 err = security_xfrm_policy_lookup(pol, fl->secid, 1052 err = security_xfrm_policy_lookup(pol, fl->secid,
@@ -1511,7 +1511,7 @@ restart:
1511 } 1511 }
1512 *dst_p = dst; 1512 *dst_p = dst;
1513 dst_release(dst_orig); 1513 dst_release(dst_orig);
1514 xfrm_pols_put(pols, npols); 1514 xfrm_pols_put(pols, npols);
1515 return 0; 1515 return 0;
1516 1516
1517error: 1517error:
@@ -1546,7 +1546,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
1546 */ 1546 */
1547 1547
1548static inline int 1548static inline int
1549xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 1549xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1550 unsigned short family) 1550 unsigned short family)
1551{ 1551{
1552 if (xfrm_state_kern(x)) 1552 if (xfrm_state_kern(x))
@@ -1619,7 +1619,7 @@ static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp
1619 return 0; 1619 return 0;
1620} 1620}
1621 1621
1622int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 1622int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1623 unsigned short family) 1623 unsigned short family)
1624{ 1624{
1625 struct xfrm_policy *pol; 1625 struct xfrm_policy *pol;
@@ -2243,7 +2243,7 @@ static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
2243 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2243 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2244 if (sel_tgt->family == sel_cmp->family && 2244 if (sel_tgt->family == sel_cmp->family &&
2245 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 2245 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2246 sel_cmp->family) == 0 && 2246 sel_cmp->family) == 0 &&
2247 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 2247 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2248 sel_cmp->family) == 0 && 2248 sel_cmp->family) == 0 &&
2249 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2249 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 91b02687db52..a35f9e4ede26 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -227,7 +227,7 @@ static inline unsigned long make_jiffies(long secs)
227 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 227 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
228 return MAX_SCHEDULE_TIMEOUT-1; 228 return MAX_SCHEDULE_TIMEOUT-1;
229 else 229 else
230 return secs*HZ; 230 return secs*HZ;
231} 231}
232 232
233static void xfrm_timer_handler(unsigned long data) 233static void xfrm_timer_handler(unsigned long data)
@@ -523,7 +523,7 @@ static void xfrm_hash_grow_check(int have_hash_collision)
523} 523}
524 524
525struct xfrm_state * 525struct xfrm_state *
526xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 526xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
527 struct flowi *fl, struct xfrm_tmpl *tmpl, 527 struct flowi *fl, struct xfrm_tmpl *tmpl,
528 struct xfrm_policy *pol, int *err, 528 struct xfrm_policy *pol, int *err,
529 unsigned short family) 529 unsigned short family)
@@ -534,7 +534,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
534 int acquire_in_progress = 0; 534 int acquire_in_progress = 0;
535 int error = 0; 535 int error = 0;
536 struct xfrm_state *best = NULL; 536 struct xfrm_state *best = NULL;
537 537
538 spin_lock_bh(&xfrm_state_lock); 538 spin_lock_bh(&xfrm_state_lock);
539 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { 539 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
540 if (x->props.family == family && 540 if (x->props.family == family &&
@@ -570,7 +570,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
570 acquire_in_progress = 1; 570 acquire_in_progress = 1;
571 } else if (x->km.state == XFRM_STATE_ERROR || 571 } else if (x->km.state == XFRM_STATE_ERROR ||
572 x->km.state == XFRM_STATE_EXPIRED) { 572 x->km.state == XFRM_STATE_EXPIRED) {
573 if (xfrm_selector_match(&x->sel, fl, family) && 573 if (xfrm_selector_match(&x->sel, fl, family) &&
574 security_xfrm_state_pol_flow_match(x, pol, fl)) 574 security_xfrm_state_pol_flow_match(x, pol, fl))
575 error = -ESRCH; 575 error = -ESRCH;
576 } 576 }
@@ -866,7 +866,7 @@ struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
866 } 866 }
867 x->props.calgo = orig->props.calgo; 867 x->props.calgo = orig->props.calgo;
868 868
869 if (orig->encap) { 869 if (orig->encap) {
870 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL); 870 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
871 if (!x->encap) 871 if (!x->encap)
872 goto error; 872 goto error;
@@ -947,7 +947,7 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
947 } 947 }
948 } 948 }
949 949
950 return NULL; 950 return NULL;
951} 951}
952EXPORT_SYMBOL(xfrm_migrate_state_find); 952EXPORT_SYMBOL(xfrm_migrate_state_find);
953 953
@@ -1121,8 +1121,8 @@ xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1121EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 1121EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1122 1122
1123struct xfrm_state * 1123struct xfrm_state *
1124xfrm_find_acq(u8 mode, u32 reqid, u8 proto, 1124xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1125 xfrm_address_t *daddr, xfrm_address_t *saddr, 1125 xfrm_address_t *daddr, xfrm_address_t *saddr,
1126 int create, unsigned short family) 1126 int create, unsigned short family)
1127{ 1127{
1128 struct xfrm_state *x; 1128 struct xfrm_state *x;
@@ -1738,7 +1738,7 @@ error:
1738} 1738}
1739 1739
1740EXPORT_SYMBOL(xfrm_init_state); 1740EXPORT_SYMBOL(xfrm_init_state);
1741 1741
1742void __init xfrm_state_init(void) 1742void __init xfrm_state_init(void)
1743{ 1743{
1744 unsigned int sz; 1744 unsigned int sz;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 079a5d315759..d55436d00e86 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -48,7 +48,7 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
48 48
49 algp = RTA_DATA(rt); 49 algp = RTA_DATA(rt);
50 50
51 len -= (algp->alg_key_len + 7U) / 8; 51 len -= (algp->alg_key_len + 7U) / 8;
52 if (len < 0) 52 if (len < 0)
53 return -EINVAL; 53 return -EINVAL;
54 54
@@ -1107,7 +1107,7 @@ static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
1107 uctx->ctx_alg = s->ctx_alg; 1107 uctx->ctx_alg = s->ctx_alg;
1108 uctx->ctx_len = s->ctx_len; 1108 uctx->ctx_len = s->ctx_len;
1109 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 1109 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
1110 return 0; 1110 return 0;
1111 1111
1112 rtattr_failure: 1112 rtattr_failure:
1113 return -1; 1113 return -1;
@@ -2467,7 +2467,7 @@ static int __init xfrm_user_init(void)
2467 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 2467 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2468 2468
2469 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX, 2469 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
2470 xfrm_netlink_rcv, THIS_MODULE); 2470 xfrm_netlink_rcv, THIS_MODULE);
2471 if (nlsk == NULL) 2471 if (nlsk == NULL)
2472 return -ENOMEM; 2472 return -ENOMEM;
2473 rcu_assign_pointer(xfrm_nl, nlsk); 2473 rcu_assign_pointer(xfrm_nl, nlsk);